blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
abbab8b1ae242f9577ad9cfd933cb3d9e11a23a7
|
4fcad69a9b2aec97fa29e0010d82f0f085cdc446
|
/tsampi/pypy/lib-python/hypothesis/searchstrategy/strategies.py
|
8fc6d3b5ec25c95b29d5376c79fb0a07fa0c58d4
|
[] |
no_license
|
tsampi/tsampi-0
|
b64d4457f58314343630b04232c6ecc74c7bfda1
|
5e0183e80718d5668b4b5b96631853942e344b64
|
refs/heads/master
| 2021-01-19T04:35:05.640785 | 2016-09-12T18:34:25 | 2016-09-12T18:34:25 | 49,612,767 | 1 | 3 | null | 2016-03-25T10:35:41 | 2016-01-14T01:02:18 |
Python
|
UTF-8
|
Python
| false | false | 10,227 |
py
|
# coding=utf-8
#
# This file is part of Hypothesis (https://github.com/DRMacIver/hypothesis)
#
# Most of this work is copyright (C) 2013-2015 David R. MacIver
# ([email protected]), but it contains contributions by others. See
# https://github.com/DRMacIver/hypothesis/blob/master/CONTRIBUTING.rst for a
# full list of people who may hold copyright, and consult the git log if you
# need to determine who owns an individual contribution.
#
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at http://mozilla.org/MPL/2.0/.
#
# END HEADER
from __future__ import division, print_function, absolute_import
import hypothesis.internal.conjecture.utils as cu
from hypothesis.errors import NoExamples, NoSuchExample, Unsatisfiable, \
UnsatisfiedAssumption
from hypothesis.control import assume, reject
from hypothesis.internal.compat import hrange
from hypothesis.internal.reflection import get_pretty_function_description
def one_of_strategies(xs):
"""Helper function for unioning multiple strategies."""
xs = tuple(xs)
if not xs:
raise ValueError('Cannot join an empty list of strategies')
from hypothesis.strategies import one_of
return one_of(xs)
class SearchStrategy(object):
"""A SearchStrategy is an object that knows how to explore data of a given
type.
Except where noted otherwise, methods on this class are not part of the
public API and their behaviour may change significantly between minor
version releases. They will generally be stable between patch releases.
With that in mind, here is how SearchStrategy works.
A search strategy is responsible for generating, simplifying and
serializing examples for saving.
In order to do this a strategy has three types (where type here is more
precise than just the class of the value. For example a tuple of ints
should be considered different from a tuple of strings):
1. The strategy parameter type
2. The strategy template type
3. The generated type
Of these, the first two should be considered to be private implementation
details of a strategy and the only valid thing to do them is to pass them
back to the search strategy. Additionally, templates may be compared for
equality and hashed.
Templates must be of quite a restricted type. A template may be any of the
following:
1. Any instance of the types bool, float, int, str (unicode on 2.7)
2. None
3. Any tuple or namedtuple of valid template types
4. Any frozenset of valid template types
This may be relaxed a bit in future, but the requirement that templates are
hashable probably won't be.
This may all seem overly complicated but it's for a fairly good reason.
For more discussion of the motivation see
http://hypothesis.readthedocs.org/en/master/internals.html
Given these, data generation happens in three phases:
1. Draw a parameter value from a random number (defined by
draw_parameter)
2. Given a parameter value and a Random, draw a random template
3. Reify a template value, deterministically turning it into a value of
the desired type.
Data simplification proceeds on template values, taking a template and
providing a generator over some examples of similar but simpler templates.
"""
supports_find = True
is_empty = False
def example(self, random=None):
"""Provide an example of the sort of value that this strategy
generates. This is biased to be slightly simpler than is typical for
values from this strategy, for clarity purposes.
This method shouldn't be taken too seriously. It's here for interactive
exploration of the API, not for any sort of real testing.
This method is part of the public API.
"""
from hypothesis import find, settings
try:
return find(
self,
lambda x: True,
random=random,
settings=settings(
max_shrinks=0,
max_iterations=1000,
database=None
)
)
except (NoSuchExample, Unsatisfiable):
raise NoExamples(
u'Could not find any valid examples in 100 tries'
)
def map(self, pack):
"""Returns a new strategy that generates values by generating a value
from this strategy and then calling pack() on the result, giving that.
This method is part of the public API.
"""
return MappedSearchStrategy(
pack=pack, strategy=self
)
def flatmap(self, expand):
"""Returns a new strategy that generates values by generating a value
from this strategy, say x, then generating a value from
strategy(expand(x))
This method is part of the public API.
"""
from hypothesis.searchstrategy.flatmapped import FlatMapStrategy
return FlatMapStrategy(
expand=expand, strategy=self
)
def filter(self, condition):
"""Returns a new strategy that generates values from this strategy
which satisfy the provided condition. Note that if the condition is too
hard to satisfy this might result in your tests failing with
Unsatisfiable.
This method is part of the public API.
"""
return FilteredStrategy(
condition=condition,
strategy=self,
)
def __or__(self, other):
"""Return a strategy which produces values by randomly drawing from one
of this strategy or the other strategy.
This method is part of the public API.
"""
if not isinstance(other, SearchStrategy):
raise ValueError('Cannot | a SearchStrategy with %r' % (other,))
if other.is_empty:
return self
return one_of_strategies((self, other))
def validate(self):
"""Through an exception if the strategy is not valid.
This can happen due to lazy construction
"""
pass
def do_draw(self, data):
raise NotImplementedError('%s.do_draw' % (type(self).__name__,))
def __init__(self):
pass
class OneOfStrategy(SearchStrategy):
"""Implements a union of strategies. Given a number of strategies this
generates values which could have come from any of them.
The conditional distribution draws uniformly at random from some non-empty
subset of these strategies and then draws from the conditional distribution
of that strategy.
"""
def __init__(self, strategies, bias=None):
SearchStrategy.__init__(self)
strategies = tuple(strategies)
self.element_strategies = list(strategies)
self.bias = bias
if bias is not None:
assert 0 < bias < 1
self.weights = [bias ** i for i in range(len(strategies))]
def do_draw(self, data):
n = len(self.element_strategies)
if self.bias is None:
i = cu.integer_range(data, 0, n - 1)
else:
def biased_i(random):
while True:
i = random.randint(0, n - 1)
if random.random() <= self.weights[i]:
return i
i = cu.integer_range_with_distribution(
data, 0, n - 1, biased_i)
return data.draw(self.element_strategies[i])
def __repr__(self):
return ' | '.join(map(repr, self.element_strategies))
def validate(self):
for e in self.element_strategies:
e.validate()
class MappedSearchStrategy(SearchStrategy):
"""A strategy which is defined purely by conversion to and from another
strategy.
Its parameter and distribution come from that other strategy.
"""
def __init__(self, strategy, pack=None):
SearchStrategy.__init__(self)
self.mapped_strategy = strategy
if pack is not None:
self.pack = pack
self.is_empty = strategy.is_empty
def __repr__(self):
if not hasattr(self, '_cached_repr'):
self._cached_repr = '%r.map(%s)' % (
self.mapped_strategy, get_pretty_function_description(
self.pack)
)
return self._cached_repr
def validate(self):
self.mapped_strategy.validate()
def pack(self, x):
"""Take a value produced by the underlying mapped_strategy and turn it
into a value suitable for outputting from this strategy."""
raise NotImplementedError(
'%s.pack()' % (self.__class__.__name__))
def do_draw(self, data):
for _ in range(3):
i = data.index
try:
return self.pack(self.mapped_strategy.do_draw(data))
except UnsatisfiedAssumption:
if data.index == i:
raise
reject()
class FilteredStrategy(SearchStrategy):
def __init__(self, strategy, condition):
super(FilteredStrategy, self).__init__()
self.condition = condition
self.filtered_strategy = strategy
self.is_empty = strategy.is_empty
def __repr__(self):
if not hasattr(self, '_cached_repr'):
self._cached_repr = '%r.filter(%s)' % (
self.filtered_strategy, get_pretty_function_description(
self.condition)
)
return self._cached_repr
def validate(self):
self.filtered_strategy.validate()
def do_draw(self, data):
for _ in hrange(3):
start_index = data.index
value = data.draw(self.filtered_strategy)
if self.condition(value):
return value
else:
# This is to guard against the case where we consume no data.
# As long as we consume data, we'll eventually pass or raise.
# But if we don't this could be an infinite loop.
assume(data.index > start_index)
data.mark_invalid()
|
[
"[email protected]"
] | |
5a14c5dba72623bdcdb313672fbc2daafe67e92c
|
d2f91b93ad42aaefa5fc315a9b3a5d45d07fa705
|
/slbman/venv/Lib/site-packages/aliyunsdkcdn/request/v20141111/DescribeUserCustomerLabelsRequest.py
|
c8fc8cf6265ebce75fe8f06baa73ec2d8faee042
|
[] |
no_license
|
junlongzhou5566/managePlatform
|
66cb5bc5b176147ff0038819924f7efa8df1d556
|
3201ba1a11b05c86db5f42aa9ca8eaf1cc20e216
|
refs/heads/master
| 2021-03-29T00:58:23.337808 | 2020-03-17T09:50:21 | 2020-03-17T09:50:21 | 247,910,365 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,446 |
py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class DescribeUserCustomerLabelsRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Cdn', '2014-11-11', 'DescribeUserCustomerLabels')
def get_Uid(self):
return self.get_query_params().get('Uid')
def set_Uid(self,Uid):
self.add_query_param('Uid',Uid)
def get_SecurityToken(self):
return self.get_query_params().get('SecurityToken')
def set_SecurityToken(self,SecurityToken):
self.add_query_param('SecurityToken',SecurityToken)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
|
[
"[email protected]@qq.com"
] |
[email protected]@qq.com
|
6f00a953f4fa99d16d0061f0dbc1fd5347fbf9e6
|
8e6005ff82a6b37b8c4e2a2fed5791323837d316
|
/RecoBTag/SoftLepton/python/softElectronES_cfi.py
|
52068ce86695633b8718d2cac44ad50b5f882ba5
|
[] |
no_license
|
CMSRA2Tau/gurrola-sl5-on-sl6
|
633050a5ec5fd1a81a15c2e1dcf6b4952b718a9e
|
f56a99cd7121bcbdf301c2bea9fe397a6b9ef6a1
|
refs/heads/master
| 2020-04-15T06:13:09.462508 | 2014-12-17T17:57:01 | 2014-12-17T17:57:01 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 133 |
py
|
import FWCore.ParameterSet.Config as cms
softElectron = cms.ESProducer("ElectronTaggerESProducer",
ipSign = cms.string("any")
)
|
[
"[email protected]"
] | |
8fac7d928bc84de65246152f01be74062f5b0855
|
eb9c3dac0dca0ecd184df14b1fda62e61cc8c7d7
|
/google/ads/googleads/v6/googleads-py/google/ads/googleads/v6/errors/types/shared_set_error.py
|
43a227580d39ce496d5ef5f7db138a54ebe3bf91
|
[
"Apache-2.0"
] |
permissive
|
Tryweirder/googleapis-gen
|
2e5daf46574c3af3d448f1177eaebe809100c346
|
45d8e9377379f9d1d4e166e80415a8c1737f284d
|
refs/heads/master
| 2023-04-05T06:30:04.726589 | 2021-04-13T23:35:20 | 2021-04-13T23:35:20 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,250 |
py
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package='google.ads.googleads.v6.errors',
marshal='google.ads.googleads.v6',
manifest={
'SharedSetErrorEnum',
},
)
class SharedSetErrorEnum(proto.Message):
r"""Container for enum describing possible shared set errors."""
class SharedSetError(proto.Enum):
r"""Enum describing possible shared set errors."""
UNSPECIFIED = 0
UNKNOWN = 1
CUSTOMER_CANNOT_CREATE_SHARED_SET_OF_THIS_TYPE = 2
DUPLICATE_NAME = 3
SHARED_SET_REMOVED = 4
SHARED_SET_IN_USE = 5
__all__ = tuple(sorted(__protobuf__.manifest))
|
[
"bazel-bot-development[bot]@users.noreply.github.com"
] |
bazel-bot-development[bot]@users.noreply.github.com
|
2abdd38dea5b5bf0ea675c26ada1586f3d849381
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03439/s570157916.py
|
2dd9d9548f62cf052083d4afa720b0e4796ac96b
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 288 |
py
|
n = int(input())
print(0)
L = R = input()
if L == "Vacant":
exit()
l = 0
r = n
while True:
m = (l + r) // 2
print(m)
M = input()
if M == "Vacant":
exit()
if (M == L) ^ ((m - l) % 2 == 1):
l = m
L = M
else:
r = m
R = M
|
[
"[email protected]"
] | |
d338b9ceafda5116c1882198fe560b051f6d981b
|
0061ed492e5c566c8b4c9bfef7218b79518803f2
|
/python/generate-syllables.py
|
8e2321abfb9f6341eebf9f438f4197141b7447dc
|
[
"CC-BY-4.0"
] |
permissive
|
oneoffcoder/rpa
|
4c04e811d22ec28fda17410be21100f27cc56aeb
|
cb0401e0c38652f998ca1b3bfe49d2084a279be7
|
refs/heads/master
| 2021-08-04T14:30:10.344198 | 2021-07-23T19:11:59 | 2021-07-23T19:11:59 | 225,539,119 | 11 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 773 |
py
|
import itertools
C = """
c, d, f, h, k, l, m, n, p, q, r, s, t, v, x, y, z,
ch, dh, hl, hm, hn, kh, ml, nc, nk, np, nq, nr, nt, ny, ph, pl, qh, rh, th, ts, tx, xy,
hml, hny, nch, nkh, nph, npl, nqh, nrh, nth, nts, ntx, plh, tsh, txh,
nplh, ntsh, ntxh
""".strip()
V = """
a, e, i, o, u, w,
ai, au, aw, ee, ia, oo, ua
""".strip()
T = """
j, s, v, m, g, b
""".strip()
C = [c.strip() for c in C.split(',')]
V = [v.strip() for v in V.split(',')]
T = [t.strip() for t in T.split(',')]
print(f'C = {len(C)}, V = {len(V)}, T = {len(T)}')
c_frag = '|'.join([f"'{c}'" for c in C])
v_frag = '|'.join([f"'{c}'" for c in V])
t_frag = '|'.join([f"'{c}'" for c in T])
c_frag = f'({c_frag})'
v_frag = f'({v_frag})'
t_frag = f'({t_frag})'
print(c_frag)
print(v_frag)
print(t_frag)
|
[
"[email protected]"
] | |
7d5c1b44ad5a8a64637df9196e677c4996b7dde8
|
e23a4f57ce5474d468258e5e63b9e23fb6011188
|
/125_algorithms/_exercises/templates/_algorithms_challenges/pybites/advanced/2_v3/regex.py
|
274f3408853cc0593272182183e8fe7bd31963bf
|
[] |
no_license
|
syurskyi/Python_Topics
|
52851ecce000cb751a3b986408efe32f0b4c0835
|
be331826b490b73f0a176e6abed86ef68ff2dd2b
|
refs/heads/master
| 2023-06-08T19:29:16.214395 | 2023-05-29T17:09:11 | 2023-05-29T17:09:11 | 220,583,118 | 3 | 2 | null | 2023-02-16T03:08:10 | 2019-11-09T02:58:47 |
Python
|
UTF-8
|
Python
| false | false | 1,621 |
py
|
# _______ __
# ____ pp__ _______ pp__
#
# COURSE ('Introduction 1 Lecture 01:47'
# 'The Basics 4 Lectures 32:03'
# 'Getting Technical! 4 Lectures 41:51'
# 'Challenge 2 Lectures 27:48'
# 'Afterword 1 Lecture 05:02')
# TWEET ('New PyBites article: Module of the Week - Requests-cache '
# 'for Repeated API Calls - http://pybit.es/requests-cache.html '
# '#python #APIs')
# HTML ('<p>pybites != greedy</p>'
# '<p>not the same can be said REgarding ...</p>')
#
#
# ___ extract_course_times course_?
# """Return the course timings from the passed in
# course string. Timings are in mm:ss (minutes:seconds)
# format, so taking COURSE above you would extract:
# ['01:47', '32:03', '41:51', '27:48', '05:02']
# Return this list.
# """
# r.. __.f.. _ (\d\d:\d\d) ?
#
#
# ___ get_all_hashtags_and_links tweet_?
# """Get all hashtags and links from the tweet text
# that is passed into this function. So for TWEET
# above you need to extract the following list:
# ['http://pybit.es/requests-cache.html',
# '#python',
# '#APIs']
# Return this list.
# """
# r.. __.f.. _ (#\w+|https?://[^\s]*)' ?
#
#
# ___ match_first_paragraph html_?
# """Extract the first paragraph of the passed in
# html, so for HTML above this would be:
# 'pybites != greedy' (= content of first paragraph).
# Return this string.
# """
# result __.s.. _ <p>(.+?)</p> ?
# r.. ?.g.. 1 __ ? ____ ''
#
# __ _______ __ _______
# pp__ ?
# pp__ ?
# pp__ ?
|
[
"[email protected]"
] | |
666f8021d9d99254a7a69fb2f55c76a50c261736
|
9d29861e44389e02762e6eb0457c6a415a54e26c
|
/samples/cloud_monitoring/util.py
|
4a83bb03cbb3e7051ea9b5ba34375c07d307ea8c
|
[
"MIT"
] |
permissive
|
itsmemattchung/pyrax
|
e787d67f8a79036834575f951f8c9e81d64d8b8f
|
e8eff127a5c9b6e64a9a42593d5e889c3c03f81d
|
refs/heads/master
| 2021-01-18T10:14:31.752469 | 2015-05-16T16:44:35 | 2015-05-16T16:44:35 | 21,360,598 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,694 |
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c)2013 Rackspace US, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import sys
def option_chooser(options, attr=None):
"""Given an iterable, enumerate its contents for a user to choose from.
If the optional `attr` is not None, that attribute in each iterated
object will be printed.
This function will exit the program if the user chooses the escape option.
"""
for num, option in enumerate(options):
if attr:
print("%s: %s" % (num, getattr(option, attr)))
else:
print("%s: %s" % (num, option))
# Add an escape option
escape_opt = num + 1
print("%s: I want to exit!" % escape_opt)
choice = raw_input("Selection: ")
try:
ichoice = int(choice)
if ichoice > escape_opt:
raise ValueError
except ValueError:
print("Valid entries are the numbers 0-%s. Received '%s'." % (escape_opt,
choice))
sys.exit()
if ichoice == escape_opt:
print("Bye!")
sys.exit()
return ichoice
|
[
"[email protected]"
] | |
91ffe969a51f4efe6edb0ebab8465bbed5120892
|
bb35185816208aaeb73016b7380e01a2d6b86278
|
/lists/migrations/0003_list.py
|
a52176c79813f19f0b541d426d089bd9720f86e7
|
[] |
no_license
|
the-fool/goat_driven_development
|
fa70dee5dc4dd48180ff6b5302e6ef966f2e207a
|
0d55ca75e29b4a90c4e54500df88c4b23854ab81
|
refs/heads/master
| 2021-01-10T02:32:16.947425 | 2016-02-15T17:25:37 | 2016-02-15T17:25:37 | 51,492,427 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 505 |
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-02-12 16:08
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('lists', '0002_item_text'),
]
operations = [
migrations.CreateModel(
name='List',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
),
]
|
[
"[email protected]"
] | |
df3ff938a6aae98b752d7f465f52fbbab2f796c3
|
78d949674cd1e6261ee23372e6ff24897ba6bc9a
|
/user/migrations/0076_auto_20190310_0610.py
|
b9c330c739a0bf271a6f0b0cb827de029ff57fad
|
[] |
no_license
|
Aravindhan-M/first_project
|
60377a65dec777ce6a55a57358dccb96ef6c396d
|
68f5815aa7a7d7fa695bb753b5312f66bb201fe1
|
refs/heads/master
| 2022-12-14T11:23:56.782700 | 2019-08-09T09:53:17 | 2019-08-09T09:53:17 | 185,782,547 | 2 | 0 | null | 2022-12-08T01:45:24 | 2019-05-09T11:02:59 |
Python
|
UTF-8
|
Python
| false | false | 415 |
py
|
# Generated by Django 2.1.5 on 2019-03-10 03:10
from django.db import migrations
import user.managers
class Migration(migrations.Migration):
dependencies = [
('user', '0075_merge_20190310_0257'),
]
operations = [
migrations.AlterModelManagers(
name='user',
managers=[
('objects', user.managers.UserManager()),
],
),
]
|
[
"[email protected]"
] | |
cd254179e403b8ca9bffcc103ef4ccd10d84811c
|
75452de12ec9eea346e3b9c7789ac0abf3eb1d73
|
/scripts/fuzzing/test/factory_fake.py
|
629dfd2ea6fcc8d2db3e8661a32834b1072f5f8b
|
[
"BSD-3-Clause"
] |
permissive
|
oshunter/fuchsia
|
c9285cc8c14be067b80246e701434bbef4d606d1
|
2196fc8c176d01969466b97bba3f31ec55f7767b
|
refs/heads/master
| 2022-12-22T11:30:15.486382 | 2020-08-16T03:41:23 | 2020-08-16T03:41:23 | 287,920,017 | 2 | 2 |
BSD-3-Clause
| 2022-12-16T03:30:27 | 2020-08-16T10:18:30 |
C++
|
UTF-8
|
Python
| false | false | 3,616 |
py
|
#!/usr/bin/env python2.7
# Copyright 2020 The Fuchsia Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
import test_env
from lib.factory import Factory
from host_fake import FakeHost
from lib.buildenv import BuildEnv
from lib.device import Device
class FakeFactory(Factory):
"""Fake factory that creates objects for testing.
Unlike the real factory, this object caches and reuses created BuildEnvs
and Devices. It also allows tests to access created objects for
examination.
Attributes:
buildenv: The associated BuildEnv object.
device: The associated Device object.
fuzzer: The most recently created FakeFuzzer object.
"""
def __init__(self):
super(FakeFactory, self).__init__(host=FakeHost())
self._parser = None
self._buildenv = None
self._device = None
self._fuzzer = None
# Factory created objects, lazily instantiated.
@property
def parser(self):
"""The associated ArgParser object."""
if not self._parser:
self._parser = self.create_parser()
return self._parser
@property
def buildenv(self):
"""The associated BuildEnv object."""
if not self._buildenv:
self._buildenv = self.create_buildenv()
return self._buildenv
@property
def device(self):
"""The associated Device object."""
if not self._device:
self._device = self.create_device()
return self._device
@property
def fuzzer(self):
"""The most recently created Fuzzer object."""
assert self._fuzzer, 'No fuzzer created.'
return self._fuzzer
# Methods to create objects.
def create_buildenv(self):
"""Returns the factory's build environment, creating it if needed."""
fuchsia_dir = self.host.getenv('FUCHSIA_DIR')
self.host.mkdir(fuchsia_dir)
buildenv = BuildEnv(self.host, fuchsia_dir)
build_dir = 'build_dir'
self.host.mkdir(buildenv.path(build_dir))
self.host.touch(buildenv.path(build_dir, 'host_x64', 'symbolize'))
self.host.touch(
buildenv.path(
'prebuilt', 'third_party', 'clang', self.host.platform, 'bin',
'llvm-symbolizer'))
self.host.mkdir(
buildenv.path(
'prebuilt', 'third_party', 'clang', self.host.platform, 'lib',
'debug', '.build-id'))
self.host.mkdir(buildenv.path(build_dir, '.build-id'))
self.host.mkdir(buildenv.path(build_dir + '.zircon', '.build-id'))
self.host.touch(buildenv.path(build_dir, 'ssh-keys', 'ssh_config'))
buildenv.configure(build_dir)
buildenv.add_fuzzer('fake-package1', 'fake-target1')
buildenv.add_fuzzer('fake-package1', 'fake-target2')
buildenv.add_fuzzer('fake-package1', 'fake-target3')
buildenv.add_fuzzer('fake-package2', 'fake-target1')
buildenv.add_fuzzer('fake-package2', 'fake-target11')
buildenv.add_fuzzer('fake-package2', 'an-extremely-verbose-target-name')
return buildenv
def create_device(self):
"""Returns the factory's device, creating it if needed."""
device = Device(self.create_buildenv(), '::1')
device.configure()
return device
def create_fuzzer(self, args, device=None):
self._fuzzer = super(FakeFactory, self).create_fuzzer(
args, device=device)
return self.fuzzer
|
[
"[email protected]"
] | |
78a8205c956d52789d3103e985ea37bea67890e7
|
53342cc883c88c50dd3a0dc1794e627f24771cc3
|
/pysmi/codegen/jsondoc.py
|
d32d108d55fd220128f3acb2ae41cc0085dd0915
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
astralblue/pysmi
|
567f5cb8de40a70895dce8db957b6aff9acabf51
|
09e74f62d2772a6b4753764b2ab50f8b6e124fb8
|
refs/heads/master
| 2020-06-11T10:39:31.108017 | 2016-12-04T11:04:41 | 2016-12-04T11:04:41 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 29,358 |
py
|
#
# This file is part of pysmi software.
#
# Copyright (c) 2015-2016, Ilya Etingof <[email protected]>
# License: http://pysmi.sf.net/license.html
#
import sys
import re
from time import strptime, strftime
try:
import json
except ImportError:
import simplejson as json
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
from pysmi.mibinfo import MibInfo
from pysmi.codegen.base import AbstractCodeGen
from pysmi import error
from pysmi import debug
if sys.version_info[0] > 2:
# noinspection PyShadowingBuiltins
unicode = str
# noinspection PyShadowingBuiltins
long = int
class JsonCodeGen(AbstractCodeGen):
"""Builds JSON document representing MIB module supplied
in form of an Abstract Syntax Tree on input.
Instance of this class is supposed to be passed to *MibCompiler*,
the rest is internal to *MibCompiler*.
"""
constImports = {
'SNMPv2-SMI': ('iso',
'NOTIFICATION-TYPE', # bug in some MIBs (e.g. A3COM-HUAWEI-DHCPSNOOP-MIB)
'MODULE-IDENTITY', 'OBJECT-TYPE', 'OBJECT-IDENTITY'),
'SNMPv2-TC': ('DisplayString', 'TEXTUAL-CONVENTION',), # XXX
'SNMPv2-CONF': ('MODULE-COMPLIANCE', 'NOTIFICATION-GROUP',), # XXX
}
# never compile these, they either:
# - define MACROs (implementation supplies them)
# - or carry conflicting OIDs (so that all IMPORT's of them will be rewritten)
# - or have manual fixes
# - or import base ASN.1 types from implementation-specific MIBs
fakeMibs = ('ASN1',
'ASN1-ENUMERATION',
'ASN1-REFINEMENT') + AbstractCodeGen.baseMibs
baseTypes = ['Integer', 'Integer32', 'Bits', 'ObjectIdentifier', 'OctetString']
typeClasses = {
'NetworkAddress': 'IpAddress', # RFC1065-SMI, RFC1155-SMI -> SNMPv2-SMI
'nullSpecific': 'zeroDotZero', # RFC1158-MIB -> SNMPv2-SMI
'ipRoutingTable': 'ipRouteTable', # RFC1158-MIB -> RFC1213-MIB
'snmpEnableAuthTraps': 'snmpEnableAuthenTraps' # RFC1158-MIB -> SNMPv2-MIB
}
smiv1IdxTypes = ['INTEGER', 'OCTET STRING', 'IPADDRESS', 'NETWORKADDRESS']
indent = ' ' * 4
fakeidx = 1000 # starting index for fake symbols
def __init__(self):
self._rows = set()
self._cols = {} # k, v = name, datatype
self._seenSyms = set()
self._importMap = {}
self._out = {} # k, v = name, generated code
self.moduleName = ['DUMMY']
self.genRules = {'text': 1}
self.symbolTable = {}
@staticmethod
def transOpers(symbol):
return symbol.replace('-', '_')
@staticmethod
def isBinary(s):
return isinstance(s, (str, unicode)) and s[0] == '\'' \
and s[-2:] in ('\'b', '\'B')
@staticmethod
def isHex(s):
return isinstance(s, (str, unicode)) and s[0] == '\'' \
and s[-2:] in ('\'h', '\'H')
def str2int(self, s):
if self.isBinary(s):
if s[1:-2]:
i = int(s[1:-2], 2)
else:
raise error.PySmiSemanticError('empty binary string to int conversion')
elif self.isHex(s):
if s[1:-2]:
i = int(s[1:-2], 16)
else:
raise error.PySmiSemanticError('empty hex string to int conversion')
else:
i = int(s)
return i
def prepData(self, pdata):
data = []
for el in pdata:
if not isinstance(el, tuple):
data.append(el)
elif len(el) == 1:
data.append(el[0])
else:
data.append(
self.handlersTable[el[0]](self, self.prepData(el[1:]))
)
return data
def genImports(self, imports):
# convertion to SNMPv2
toDel = []
for module in list(imports):
if module in self.convertImportv2:
for symbol in imports[module]:
if symbol in self.convertImportv2[module]:
toDel.append((module, symbol))
for newImport in self.convertImportv2[module][symbol]:
newModule, newSymbol = newImport
if newModule in imports:
imports[newModule].append(newSymbol)
else:
imports[newModule] = [newSymbol]
# removing converted symbols
for d in toDel:
imports[d[0]].remove(d[1])
# merging mib and constant imports
for module in self.constImports:
if module in imports:
imports[module] += self.constImports[module]
else:
imports[module] = self.constImports[module]
outDict = OrderedDict()
outDict['class'] = 'imports'
for module in sorted(imports):
symbols = []
for symbol in set(imports[module]):
symbols.append(symbol)
if symbols:
self._seenSyms.update([self.transOpers(s) for s in symbols])
self._importMap.update([(self.transOpers(s), module) for s in symbols])
if module not in outDict:
outDict[module] = []
outDict[module].extend(symbols)
return OrderedDict(imports=outDict), tuple(sorted(imports))
# noinspection PyMethodMayBeStatic
def genLabel(self, symbol):
return '-' in symbol and symbol or ''
def addToExports(self, symbol, moduleIdentity=0):
self._seenSyms.add(symbol)
# noinspection PyUnusedLocal
def regSym(self, symbol, outDict, parentOid=None, moduleIdentity=0):
if symbol in self._seenSyms and symbol not in self._importMap:
raise error.PySmiSemanticError('Duplicate symbol found: %s' % symbol)
self.addToExports(symbol, moduleIdentity)
self._out[symbol] = outDict
def genNumericOid(self, oid):
numericOid = ()
for part in oid:
if isinstance(part, tuple):
parent, module = part
if parent == 'iso':
numericOid += (1,)
continue
if module not in self.symbolTable:
# XXX do getname for possible future borrowed mibs
raise error.PySmiSemanticError('no module "%s" in symbolTable' % module)
if parent not in self.symbolTable[module]:
raise error.PySmiSemanticError('no symbol "%s" in module "%s"' % (parent, module))
numericOid += self.genNumericOid(self.symbolTable[module][parent]['oid'])
else:
numericOid += (part,)
return numericOid
def getBaseType(self, symName, module):
if module not in self.symbolTable:
raise error.PySmiSemanticError('no module "%s" in symbolTable' % module)
if symName not in self.symbolTable[module]:
raise error.PySmiSemanticError('no symbol "%s" in module "%s"' % (symName, module))
symType, symSubtype = self.symbolTable[module][symName].get('syntax', (('', ''), ''))
if not symType[0]:
raise error.PySmiSemanticError('unknown type for symbol "%s"' % symName)
if symType[0] in self.baseTypes:
return symType, symSubtype
else:
baseSymType, baseSymSubtype = self.getBaseType(*symType)
if isinstance(baseSymSubtype, list):
if isinstance(symSubtype, list):
symSubtype += baseSymSubtype
else:
symSubtype = baseSymSubtype
return baseSymType, symSubtype
# Clause generation functions
# noinspection PyUnusedLocal
def genAgentCapabilities(self, data):
name, description, oid = data
label = self.genLabel(name)
name = self.transOpers(name)
oidStr, parentOid = oid
outDict = OrderedDict()
outDict['name'] = name
outDict['oid'] = oidStr
outDict['class'] = 'agentcapabilities'
if self.genRules['text'] and description:
outDict['description'] = description
self.regSym(name, outDict, parentOid)
return outDict
# noinspection PyUnusedLocal
def genModuleIdentity(self, data):
name, lastUpdated, organization, contactInfo, description, revisions, oid = data
label = self.genLabel(name)
name = self.transOpers(name)
oidStr, parentOid = oid
outDict = OrderedDict()
outDict['name'] = name
outDict['oid'] = oidStr
outDict['class'] = 'moduleidentity'
if revisions:
outDict['revisions'] = revisions
if self.genRules['text']:
if lastUpdated:
outDict['lastupdated'] = lastUpdated
if organization:
outDict['organization'] = organization
if contactInfo:
outDict['contactinfo'] = contactInfo
if description:
outDict['description'] = description
self.regSym(name, outDict, parentOid, moduleIdentity=1)
return outDict
# noinspection PyUnusedLocal
def genModuleCompliance(self, data):
name, description, compliances, oid = data
label = self.genLabel(name)
name = self.transOpers(name)
oidStr, parentOid = oid
outDict = OrderedDict()
outDict['name'] = name
outDict['oid'] = oidStr
outDict['class'] = 'modulecompliance'
if compliances:
outDict['modulecompliance'] = compliances
if self.genRules['text'] and description:
outDict['description'] = description
self.regSym(name, outDict, parentOid)
return outDict
# noinspection PyUnusedLocal
def genNotificationGroup(self, data):
name, objects, description, oid = data
label = self.genLabel(name)
name = self.transOpers(name)
oidStr, parentOid = oid
outDict = OrderedDict()
outDict['name'] = name
outDict['oid'] = oidStr
outDict['class'] = 'notificationgroup'
if objects:
outDict['objects'] = [{'module': self.moduleName[0], 'object': self.transOpers(obj)} for obj in objects]
if self.genRules['text'] and description:
outDict['description'] = description
self.regSym(name, outDict, parentOid)
return outDict
# noinspection PyUnusedLocal
def genNotificationType(self, data):
name, objects, description, oid = data
label = self.genLabel(name)
name = self.transOpers(name)
oidStr, parentOid = oid
outDict = OrderedDict()
outDict['name'] = name
outDict['oid'] = oidStr
outDict['class'] = 'notificationtype'
if objects:
outDict['objects'] = [{'module': self.moduleName[0], 'object': self.transOpers(obj)} for obj in objects]
if self.genRules['text'] and description:
outDict['description'] = description
self.regSym(name, outDict, parentOid)
return outDict
# noinspection PyUnusedLocal
def genObjectGroup(self, data):
name, objects, description, oid = data
label = self.genLabel(name)
name = self.transOpers(name)
oidStr, parentOid = oid
outDict = OrderedDict(
{
'name': name,
'oid': oidStr,
'class': 'objectgroup',
}
)
if objects:
outDict['objects'] = [{'module': self.moduleName[0], 'object': self.transOpers(obj)} for obj in objects]
if self.genRules['text'] and description:
outDict['description'] = description
self.regSym(name, outDict, parentOid)
return outDict
# noinspection PyUnusedLocal
def genObjectIdentity(self, data):
name, description, oid = data
label = self.genLabel(name)
name = self.transOpers(name)
oidStr, parentOid = oid
outDict = OrderedDict()
outDict['name'] = name
outDict['oid'] = oidStr
outDict['class'] = 'objectidentity'
if self.genRules['text'] and description:
outDict['description'] = description
self.regSym(name, outDict, parentOid)
return outDict
# noinspection PyUnusedLocal
def genObjectType(self, data):
name, syntax, units, maxaccess, description, augmention, index, defval, oid = data
label = self.genLabel(name)
name = self.transOpers(name)
oidStr, parentOid = oid
indexStr, fakeStrlist, fakeSyms = index or ('', '', [])
defval = self.genDefVal(defval, objname=name)
outDict = OrderedDict()
outDict['name'] = name
outDict['oid'] = oidStr
outDict['class'] = 'objecttype'
if syntax[1]:
outDict['syntax'] = syntax[1]
if defval:
outDict['default'] = defval
if units:
outDict['units'] = units
if maxaccess:
outDict['maxaccess'] = maxaccess
if indexStr:
outDict['indices'] = indexStr
if augmention:
augmention = self.transOpers(augmention)
outDict['augmention'] = OrderedDict()
outDict['augmention']['name'] = name
outDict['augmention']['module'] = self.moduleName[0]
outDict['augmention']['object'] = augmention
if self.genRules['text'] and description:
outDict['description'] = description
self.regSym(name, outDict, parentOid)
# TODO
# if fakeSyms: # fake symbols for INDEX to support SMIv1
# for i in range(len(fakeSyms)):
# fakeOutStr = fakeStrlist[i] % oidStr
# self.regSym(fakeSyms[i], fakeOutStr, name)
return outDict
# noinspection PyUnusedLocal
def genTrapType(self, data):
name, enterprise, variables, description, value = data
label = self.genLabel(name)
name = self.transOpers(name)
enterpriseStr, parentOid = enterprise
outDict = OrderedDict()
outDict['name'] = name
outDict['oid'] = enterpriseStr + '0.' + str(value)
outDict['class'] = 'notificationtype'
if variables:
outDict['objects'] = [{'module': self.moduleName[0], 'object': self.transOpers(obj)} for obj in variables]
if self.genRules['text'] and description:
outDict['description'] = description
self.regSym(name, outDict, parentOid)
return outDict
# noinspection PyUnusedLocal
def genTypeDeclaration(self, data):
name, declaration = data
outDict = OrderedDict()
outDict['name'] = name
outDict['class'] = 'type'
if declaration:
parentType, attrs = declaration
if parentType: # skipping SEQUENCE case
name = self.transOpers(name)
outDict.update(attrs)
self.regSym(name, outDict)
return outDict
# noinspection PyUnusedLocal
def genValueDeclaration(self, data):
name, oid = data
label = self.genLabel(name)
name = self.transOpers(name)
oidStr, parentOid = oid
outDict = OrderedDict()
outDict['name'] = name
outDict['oid'] = oidStr
outDict['class'] = 'objectidentity'
self.regSym(name, outDict, parentOid)
return outDict
# Subparts generation functions
# noinspection PyMethodMayBeStatic,PyUnusedLocal
def genBitNames(self, data):
names = data[0]
return names
def genBits(self, data):
bits = data[0]
return 'Bits', dict(bits)
# noinspection PyUnusedLocal
def genCompliances(self, data):
compliances = []
for complianceModule in data[0]:
name = complianceModule[0] or self.moduleName[0]
compliances += [{'object': self.transOpers(compl), 'module': name} for compl in complianceModule[1]]
return compliances
# noinspection PyUnusedLocal
def genConceptualTable(self, data):
row = data[0]
if row[1] and row[1][-2:] == '()':
row = row[1][:-2]
self._rows.add(row)
return 'MibTable', ''
# noinspection PyMethodMayBeStatic,PyUnusedLocal
def genContactInfo(self, data):
text = data[0]
return re.sub('\s+', ' ', text)
# noinspection PyUnusedLocal
def genDisplayHint(self, data):
return re.sub('\s+', ' ', data[0])
# noinspection PyUnusedLocal
def genDefVal(self, data, objname=None):
if not data:
return {}
if not objname:
return data
outDict = OrderedDict()
defval = data[0]
defvalType = self.getBaseType(objname, self.moduleName[0])
if isinstance(defval, (int, long)): # number
outDict.update(value=defval, format='decimal')
elif self.isHex(defval): # hex
if defvalType[0][0] in ('Integer32', 'Integer'): # common bug in MIBs
outDict.update(value=str(int(defval[1:-2], 16)), format='hex')
else:
outDict.update(value=defval[1:-2], format='hex')
elif self.isBinary(defval): # binary
binval = defval[1:-2]
if defvalType[0][0] in ('Integer32', 'Integer'): # common bug in MIBs
outDict.update(value=str(int(binval or '0', 2)), format='bin')
else:
hexval = binval and hex(int(binval, 2))[2:] or ''
outDict.update(value=hexval, format='hex')
elif defval[0] == defval[-1] and defval[0] == '"': # quoted string
if defval[1:-1] == '' and defvalType != 'OctetString': # common bug
# a warning should be here
return {} # we will set no default value
outDict.update(value=defval[1:-1], format='string')
else: # symbol (oid as defval) or name for enumeration member
if defvalType[0][0] == 'ObjectIdentifier' and \
(defval in self.symbolTable[self.moduleName[0]] or defval in self._importMap): # oid
module = self._importMap.get(defval, self.moduleName[0])
try:
val = str(self.genNumericOid(self.symbolTable[module][defval]['oid']))
outDict.update(value=val, format='oid')
except:
# or no module if it will be borrowed later
raise error.PySmiSemanticError('no symbol "%s" in module "%s"' % (defval, module))
# enumeration
elif defvalType[0][0] in ('Integer32', 'Integer') and \
isinstance(defvalType[1], list) and defval in dict(defvalType[1]):
outDict.update(value=defval, format='enum')
elif defvalType[0][0] == 'Bits':
defvalBits = []
bits = dict(defvalType[1])
for bit in defval:
bitValue = bits.get(bit, None)
if bitValue is not None:
defvalBits.append((bit, bitValue))
else:
raise error.PySmiSemanticError('no such bit as "%s" for symbol "%s"' % (bit, objname))
outDict.update(value=self.genBits([defvalBits])[1], format='bits')
return outDict
else:
raise error.PySmiSemanticError(
'unknown type "%s" for defval "%s" of symbol "%s"' % (defvalType, defval, objname))
return {'default': outDict}
# noinspection PyMethodMayBeStatic,PyUnusedLocal
def genDescription(self, data):
text = data[0]
return re.sub('\s+', ' ', text)
def genEnumSpec(self, data):
items = data[0]
return {'enumeration': dict(items)}
# noinspection PyUnusedLocal
def genTableIndex(self, data):
def genFakeSyms(fakeidx, idxType):
fakeSymName = 'pysmiFakeCol%s' % fakeidx
objType = self.typeClasses.get(idxType, idxType)
objType = self.transOpers(objType)
return {'module': self.moduleName[0],
object: objType}
indexes = data[0]
idxStrlist, fakeSyms, fakeStrlist = [], [], []
for idx in indexes:
idxName = idx[1]
if idxName in self.smiv1IdxTypes: # SMIv1 support
idxType = idxName
fakeSymStr, idxName = genFakeSyms(self.fakeidx, idxType)
fakeStrlist.append(fakeSymStr)
fakeSyms.append(idxName)
self.fakeidx += 1
index = OrderedDict()
index['module'] = self._importMap.get(idxName, self.moduleName[0])
index['object'] = idxName
idxStrlist.append(index)
return idxStrlist, fakeStrlist, fakeSyms
def genIntegerSubType(self, data):
ranges = []
for rng in data[0]:
vmin, vmax = len(rng) == 1 and (rng[0], rng[0]) or rng
vmin, vmax = self.str2int(vmin), self.str2int(vmax)
ran = OrderedDict()
ran['min'] = vmin
ran['max'] = vmax
ranges.append(ran)
return {'range': ranges}
# noinspection PyMethodMayBeStatic,PyUnusedLocal
def genMaxAccess(self, data):
access = data[0]
return access
def genOctetStringSubType(self, data):
sizes = []
for rng in data[0]:
vmin, vmax = len(rng) == 1 and (rng[0], rng[0]) or rng
vmin, vmax = self.str2int(vmin), self.str2int(vmax)
size = OrderedDict()
size['min'] = vmin
size['max'] = vmax
sizes.append(size)
return {'size': sizes}
# noinspection PyUnusedLocal
def genOid(self, data):
out = ()
parent = ''
for el in data[0]:
if isinstance(el, (str, unicode)):
parent = self.transOpers(el)
out += ((parent, self._importMap.get(parent, self.moduleName[0])),)
elif isinstance(el, (int, long)):
out += (el,)
elif isinstance(el, tuple):
out += (el[1],) # XXX Do we need to create a new object el[0]?
else:
raise error.PySmiSemanticError('unknown datatype for OID: %s' % el)
return '.'.join([str(x) for x in self.genNumericOid(out)]), parent
# noinspection PyUnusedLocal
def genObjects(self, data):
if data[0]:
return [self.transOpers(obj) for obj in data[0]] # XXX self.transOpers or not??
return []
# noinspection PyMethodMayBeStatic,PyUnusedLocal
def genTime(self, data):
times = []
for t in data:
lenTimeStr = len(t)
if lenTimeStr == 11:
t = '19' + t
# XXX raise in strict mode
# elif lenTimeStr != 13:
# raise error.PySmiSemanticError("Invalid date %s" % t)
try:
times.append(strftime('%Y-%m-%d %H:%M', strptime(t, '%Y%m%d%H%MZ')))
except ValueError:
# XXX raise in strict mode
# raise error.PySmiSemanticError("Invalid date %s: %s" % (t, sys.exc_info()[1]))
t = '197001010000Z' # dummy date for dates with typos
times.append(strftime('%Y-%m-%d %H:%M', strptime(t, '%Y%m%d%H%MZ')))
return times
# noinspection PyMethodMayBeStatic,PyUnusedLocal
def genLastUpdated(self, data):
text = data[0]
return re.sub('\s+', ' ', text)
# noinspection PyMethodMayBeStatic,PyUnusedLocal
def genOrganization(self, data):
text = data[0]
return re.sub('\s+', ' ', text)
# noinspection PyUnusedLocal
def genRevisions(self, data):
times = self.genTime(data[0])
return times
def genRow(self, data):
row = data[0]
row = self.transOpers(row)
return row in self.symbolTable[self.moduleName[0]]['_symtable_rows'] and (
'MibTableRow', '') or self.genSimpleSyntax(data)
# noinspection PyUnusedLocal
def genSequence(self, data):
cols = data[0]
self._cols.update(cols)
return '', ''
def genSimpleSyntax(self, data):
objType = data[0]
objType = self.typeClasses.get(objType, objType)
objType = self.transOpers(objType)
subtype = len(data) == 2 and data[1] or {}
outDict = OrderedDict()
outDict['type'] = objType
outDict['class'] = 'type'
if subtype:
outDict['constraints'] = subtype
return 'MibScalar', outDict
# noinspection PyUnusedLocal
def genTypeDeclarationRHS(self, data):
if len(data) == 1:
parentType, attrs = data[0]
outDict = OrderedDict()
if not attrs:
return outDict
# just syntax
outDict['type'] = attrs['type']
else:
# Textual convention
display, syntax = data
parentType, attrs = syntax
outDict = OrderedDict()
outDict['type'] = attrs['type']
outDict['class'] = 'textualconvention'
if display:
outDict['displayhint'] = display
return parentType, outDict
# noinspection PyMethodMayBeStatic,PyUnusedLocal
def genUnits(self, data):
text = data[0]
return re.sub('\s+', ' ', text)
handlersTable = {
'agentCapabilitiesClause': genAgentCapabilities,
'moduleIdentityClause': genModuleIdentity,
'moduleComplianceClause': genModuleCompliance,
'notificationGroupClause': genNotificationGroup,
'notificationTypeClause': genNotificationType,
'objectGroupClause': genObjectGroup,
'objectIdentityClause': genObjectIdentity,
'objectTypeClause': genObjectType,
'trapTypeClause': genTrapType,
'typeDeclaration': genTypeDeclaration,
'valueDeclaration': genValueDeclaration,
'ApplicationSyntax': genSimpleSyntax,
'BitNames': genBitNames,
'BITS': genBits,
'ComplianceModules': genCompliances,
'conceptualTable': genConceptualTable,
'CONTACT-INFO': genContactInfo,
'DISPLAY-HINT': genDisplayHint,
'DEFVAL': genDefVal,
'DESCRIPTION': genDescription,
'enumSpec': genEnumSpec,
'INDEX': genTableIndex,
'integerSubType': genIntegerSubType,
'MaxAccessPart': genMaxAccess,
'Notifications': genObjects,
'octetStringSubType': genOctetStringSubType,
'objectIdentifier': genOid,
'Objects': genObjects,
'LAST-UPDATED': genLastUpdated,
'ORGANIZATION': genOrganization,
'Revisions': genRevisions,
'row': genRow,
'SEQUENCE': genSequence,
'SimpleSyntax': genSimpleSyntax,
'typeDeclarationRHS': genTypeDeclarationRHS,
'UNITS': genUnits,
'VarTypes': genObjects,
# 'a': lambda x: genXXX(x, 'CONSTRAINT')
}
def genCode(self, ast, symbolTable, **kwargs):
self.genRules['text'] = kwargs.get('genTexts', False)
self.symbolTable = symbolTable
self._rows.clear()
self._cols.clear()
self._seenSyms.clear()
self._importMap.clear()
self._out.clear()
self.moduleName[0], moduleOid, imports, declarations = ast
outDict, importedModules = self.genImports(imports and imports or {})
for declr in declarations or []:
if declr:
self.handlersTable[declr[0]](self, self.prepData(declr[1:]))
for sym in self.symbolTable[self.moduleName[0]]['_symtable_order']:
if sym not in self._out:
raise error.PySmiCodegenError('No generated code for symbol %s' % sym)
outDict[sym] = self._out[sym]
if 'comments' in kwargs:
outDict['meta'] = OrderedDict()
outDict['meta']['comments'] = kwargs['comments']
outDict['meta']['module'] = self.moduleName[0]
debug.logger & debug.flagCodegen and debug.logger(
'canonical MIB name %s (%s), imported MIB(s) %s, Python code size %s bytes' % (
self.moduleName[0], moduleOid, ','.join(importedModules) or '<none>', len(outDict)))
return MibInfo(oid=None, name=self.moduleName[0],
imported=tuple([x for x in importedModules if x not in self.fakeMibs])), json.dumps(outDict, indent=2)
def genIndex(self, mibsMap, **kwargs):
out = '\nfrom pysnmp.proto.rfc1902 import ObjectName\n\noidToMibMap = {\n'
count = 0
for name, oid in mibsMap:
out += 'ObjectName("%s"): "%s",\n' % (oid, name)
count += 1
out += '}\n'
if 'comments' in kwargs:
out = ''.join(['# %s\n' % x for x in kwargs['comments']]) + '#\n' + out
out = '#\n# PySNMP MIB indices (http://pysnmp.sf.net)\n' + out
debug.logger & debug.flagCodegen and debug.logger(
'OID->MIB index built, %s entries, %s bytes' % (count, len(out)))
return out
|
[
"[email protected]"
] | |
1bbb76b0c9c1c2dacf2770af4281dbfd3e6218a5
|
eee0b990f4b09c3418d9fdce6ef733cf6df99fc8
|
/ansible_fsm/merge.py
|
35069b8bbf7978681723f9d604514aa3a57bb7d4
|
[
"Apache-2.0"
] |
permissive
|
benthomasson/ansible-fsm
|
6a463339e9ca82fbf3bf36c09b5a978baba1ba5a
|
fc82655c66040bfee1bb3b9defb1c93fbb0d0f97
|
refs/heads/master
| 2020-04-08T19:55:59.943230 | 2019-01-30T13:39:36 | 2019-01-30T13:39:36 | 159,676,903 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,819 |
py
|
from collections import OrderedDict
from pprint import pprint
def _merge_list_of_named_items(merged, a, b, sub_merge_fn):
"""
Merge two lists by merging items with the same name.
"""
a_items = {x['name']: x for x in a}
b_items = OrderedDict()
for item_b in b:
b_items[item_b['name']] = item_b
for key in b_items.keys():
merged.append(sub_merge_fn(b_items[key], a_items.get(key, {})))
for key in a_items.keys():
if key in b_items:
continue
merged.append(a_items[key])
def _merge_ordered_dicts(merged, a, b, skip_keys=[]):
"""
Merge two ordered dicts and preserve the order of the keys from b then add keys from a that are not in b.
"""
for key in b.keys():
if key in skip_keys:
pass
else:
merged[key] = b[key]
for key in a.keys():
if key in skip_keys:
pass
elif key in b:
continue
else:
merged[key] = a[key]
def merge_ast(a, b):
"""
Merge two ASTs by merging FSMs with the same name.
"""
merged = []
_merge_list_of_named_items(merged, a, b, merge_fsm)
return merged
def merge_fsm(a, b):
"""
Merge two FSMs and preserve the order of the keys from b then add keys from a that are not in b.
"""
merged = OrderedDict()
_merge_ordered_dicts(merged, a, b, skip_keys=['states'])
merged['states'] = []
_merge_list_of_named_items(merged['states'], a.get('states', []), b.get('states', []), merge_state)
return merged
def merge_state(a, b):
merged = OrderedDict()
_merge_ordered_dicts(merged, a, b, skip_keys=['handlers'])
merged['handlers'] = {}
_merge_ordered_dicts(merged['handlers'], a.get('handlers', {}), b.get('handlers', {}))
return merged
|
[
"[email protected]"
] | |
7f66c61d3053bbfe625c83c90d9fc0a051416dfb
|
3947a6283fd42413dcf68aa3133a13208d17c50e
|
/FinderTemplate.py
|
82d5d398b427b344db5ae7652429abc8c2a2a516
|
[] |
no_license
|
Mystified131/FindingStrings
|
2d60ac114303c7ebdf54b43a1fecc86fb768228c
|
8d3a91957d66c4760818279e99c7579565c19cf0
|
refs/heads/master
| 2020-05-20T15:55:08.417673 | 2019-05-13T14:49:28 | 2019-05-13T14:49:28 | 185,654,392 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 388 |
py
|
#This code imports the modules
import re
import requests
#This code retrieves a blob of text from the remote target
page = requests.get("http://www.thomasparksolutions.com").text
#This code makes a list of the substrings sought
#Replace the string Python with your desired regex
results = re.findall('(Thomas)',page)
#This code prints the result(s)
for i in results:
print (i)
|
[
"[email protected]"
] | |
7bceaa84866c21708fd5eef20d623749a08a1709
|
46e57ccd746256fa4e471e53d8c24ce285635f50
|
/专题训练/数组/中等/3sum Closest.py
|
709fa3231efcccf5eb486141b9cdf79c39ec31f7
|
[] |
no_license
|
sandwu/leetcode_problems
|
6aea1f1837f5a8cfebc8f9c4cd8863081dfabb87
|
b99ef39268c5713fae3dbbf0a5548ec1f5a7e434
|
refs/heads/master
| 2021-08-16T15:48:05.412320 | 2020-04-26T07:04:10 | 2020-04-26T07:04:10 | 168,446,663 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,281 |
py
|
class Solution(object):
"""
和15题的思路差不多,利用先求得三者的和,然后进行与target最小值的判断,保持当前最小值sum1,然后逐步推进数组,直到l>=r,所以是个O(n^2)
Runtime: 72 ms, faster than 75.37% of Python online submissions for 3Sum Closest.
Memory Usage: 11 MB, less than 100.00% of Python online submissions for 3Sum Closest.
"""
def threeSumClosest(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: int
"""
nums.sort()
n = len(nums)
sum1 = nums[0] + nums[1] + nums[2]
for i in range(1,n):
l,r = i+1, n-1
while l < r:
res = nums[i] + nums[l] + nums[r]
if abs(sum1-target) > abs(res-target): #相当于一直维护当前的最靠近target的值
sum1 = res
if res == target:
return res
elif res < target:
while l < r and nums[l]==nums[l-1]:
l += 1
l += 1
else:
while l < r and nums[r] == nums[r-1]:
r -= 1
r -= 1
return sum1
|
[
"[email protected]"
] | |
e6507917417689eff40a169e51c981fd54ab8bd0
|
63eb05febaac75f781a266d48d1cfff2debe64ea
|
/the_tale/game/bills/tests/test_building_destroy.py
|
3062c4ba21cf15e1d64eaa62978d2dd74b318cee
|
[
"BSD-2-Clause-Views"
] |
permissive
|
MadRiw/the-tale
|
185ca33e410a59de63a594daf15fc8a5701338d2
|
1801beab2ed149556c0b3380e8adaaa976f74e6c
|
refs/heads/master
| 2021-01-15T23:45:34.873857 | 2015-06-17T13:06:12 | 2015-06-17T13:06:12 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,269 |
py
|
# coding: utf-8
import mock
import datetime
from the_tale.game import names
from the_tale.game.map.places.models import Building
from the_tale.game.map.places.prototypes import BuildingPrototype
from the_tale.game.map.places.storage import buildings_storage
from the_tale.game.map.places.relations import BUILDING_STATE
from the_tale.game.bills.relations import BILL_STATE
from the_tale.game.bills.prototypes import BillPrototype, VotePrototype
from the_tale.game.bills.bills import BuildingDestroy
from the_tale.game.bills.tests.helpers import BaseTestPrototypes
class BuildingDestroyTests(BaseTestPrototypes):
def setUp(self):
super(BuildingDestroyTests, self).setUp()
self.person_1 = self.place1.persons[0]
self.person_2 = self.place2.persons[0]
self.person_3 = self.place3.persons[0]
self.building_1 = BuildingPrototype.create(self.person_1, utg_name=names.generator.get_test_name('building-name-1'))
self.building_2 = BuildingPrototype.create(self.person_2, utg_name=names.generator.get_test_name('building-name-2'))
self.bill_data = BuildingDestroy(person_id=self.person_1.id, old_place_name_forms=self.place1.utg_name)
self.bill = BillPrototype.create(self.account1, 'bill-1-caption', 'bill-1-rationale', self.bill_data, chronicle_on_accepted='chronicle-on-accepted')
def test_create(self):
self.assertEqual(self.bill.data.person_id, self.person_1.id)
def test_actors(self):
self.assertEqual([id(a) for a in self.bill_data.actors], [id(self.person_1.place)])
def test_update(self):
form = self.bill.data.get_user_form_update(post={'caption': 'new-caption',
'rationale': 'new-rationale',
'chronicle_on_accepted': 'chronicle-on-accepted-2',
'person': self.person_2.id })
self.assertTrue(form.is_valid())
self.bill.update(form)
self.bill = BillPrototype.get_by_id(self.bill.id)
self.assertEqual(self.bill.data.person_id, self.person_2.id)
def test_user_form_choices(self):
form = self.bill.data.get_user_form_update(initial={'person': self.bill.data.person_id })
persons_ids = []
for city_name, person_choices in form.fields['person'].choices:
persons_ids.extend(choice_id for choice_id, choice_name in person_choices)
self.assertEqual(set(persons_ids), set([self.person_1.id, self.person_2.id]))
@mock.patch('the_tale.game.bills.conf.bills_settings.MIN_VOTES_PERCENT', 0.6)
@mock.patch('the_tale.game.bills.prototypes.BillPrototype.time_before_voting_end', datetime.timedelta(seconds=0))
def test_apply(self):
self.assertEqual(Building.objects.filter(state=BUILDING_STATE.WORKING).count(), 2)
VotePrototype.create(self.account2, self.bill, False)
VotePrototype.create(self.account3, self.bill, True)
form = BuildingDestroy.ModeratorForm({'approved': True})
self.assertTrue(form.is_valid())
self.bill.update_by_moderator(form)
self.assertTrue(self.bill.apply())
bill = BillPrototype.get_by_id(self.bill.id)
self.assertTrue(bill.state.is_ACCEPTED)
self.assertEqual(Building.objects.filter(state=BUILDING_STATE.WORKING).count(), 1)
self.assertEqual(len(buildings_storage.all()), 1)
building = buildings_storage.all()[0]
self.assertNotEqual(building.id, self.building_1.id)
@mock.patch('the_tale.game.bills.conf.bills_settings.MIN_VOTES_PERCENT', 0.6)
@mock.patch('the_tale.game.bills.prototypes.BillPrototype.time_before_voting_end', datetime.timedelta(seconds=0))
def test_duplicate_apply(self):
self.assertEqual(Building.objects.filter(state=BUILDING_STATE.WORKING).count(), 2)
VotePrototype.create(self.account2, self.bill, False)
VotePrototype.create(self.account3, self.bill, True)
form = BuildingDestroy.ModeratorForm({'approved': True})
self.assertTrue(form.is_valid())
self.bill.update_by_moderator(form)
self.assertTrue(self.bill.apply())
bill = BillPrototype.get_by_id(self.bill.id)
bill.state = BILL_STATE.VOTING
bill.save()
self.assertTrue(bill.apply())
self.assertEqual(Building.objects.filter(state=BUILDING_STATE.WORKING).count(), 1)
@mock.patch('the_tale.game.bills.conf.bills_settings.MIN_VOTES_PERCENT', 0.6)
@mock.patch('the_tale.game.bills.prototypes.BillPrototype.time_before_voting_end', datetime.timedelta(seconds=0))
def test_no_building(self):
self.assertEqual(Building.objects.filter(state=BUILDING_STATE.WORKING).count(), 2)
VotePrototype.create(self.account2, self.bill, False)
VotePrototype.create(self.account3, self.bill, True)
form = BuildingDestroy.ModeratorForm({'approved': True})
self.assertTrue(form.is_valid())
self.bill.update_by_moderator(form)
self.building_1.destroy()
self.building_1.save()
self.assertTrue(self.bill.apply())
self.assertEqual(Building.objects.filter(state=BUILDING_STATE.WORKING).count(), 1)
|
[
"[email protected]"
] | |
77f81016ce59e4a6cc2945653f32bd7d8102a644
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02862/s826059466.py
|
e92f0b2b83b7a167580486ae93c9f6ca0120f581
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 689 |
py
|
import sys
# N = int(input())
X, Y = [int(i) for i in input().split()]
def cmb(n, r, mod):
if ( r<0 or r>n ):
return 0
r = min(r, n-r)
return g1[n] * g2[r] * g2[n-r] % mod
mod = 10**9+7 #出力の制限
N = 10**6
g1 = [1, 1] # 元テーブル
g2 = [1, 1] #逆元テーブル
inverse = [0, 1] #逆元テーブル計算用テーブル
for i in range( 2, N + 1 ):
g1.append( ( g1[-1] * i ) % mod )
inverse.append( ( -inverse[mod % i] * (mod//i) ) % mod )
g2.append( (g2[-1] * inverse[-1]) % mod )
a_3 = 2 * Y - X
b_3 = 2 * X - Y
if a_3 % 3 != 0 or b_3 % 3 != 0 or a_3 < 0 or b_3 < 0:
print(0)
sys.exit(0)
a = a_3//3
b = b_3//3
print(cmb(a+b,a,mod))
|
[
"[email protected]"
] | |
c50b7650fc3230e08546d4b0bde6a864e55bb463
|
e341bc45e2b889ae5f88576e6ef7c3e9c84a9f60
|
/dojo/a/Python_OOP_Slack/Python_OOP/Human/server.py
|
8c4b6aecd0d364dde3c270cacc0b19c2498b5d87
|
[] |
no_license
|
jhflorey/Web_Automation_Framework
|
8efb5b91a9b4ef01f2c8bde0856ea8e85467ca1d
|
41bc124d3eaf2985667afa399e4789c26de8744a
|
refs/heads/master
| 2021-01-25T07:54:47.907619 | 2017-06-09T00:34:03 | 2017-06-09T00:34:03 | 93,682,430 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 912 |
py
|
import random
class Human(object):
def __init__(self, clan=None):
print 'New Human!!!'
self.health = 100
self.clan = clan
self.strength = 3
self.intelligence = 3
self.stealth = 3
def taunt(self):
print "You want a piece of me?"
michael = Human()
michael.taunt()
def attack(self):
self.taunt()
luck = round(random.random() * 100)
if(luck > 50):
if((luck * self.stealth) > 150):
print 'attacking!'
return True
else:
print 'attack failed'
return False
else:
self.health -= self.strength
print "attack failed"
return False
class Cat(object):
def __init__(self, color, type, age):
self.color = color
self.type = type
self.age = age
michael = Human('clan')
print michael.health
garfield = Cat('orange, fat, 5')
print garfield.color
print garfield.age
print garfield.type
|
[
"[email protected]"
] | |
24d8aaf428c14575bceaaa1eb0d3fc8997296484
|
902facd06f1f37f2a65a8e5c1b1a208a5429fba3
|
/buildout-cache/eggs/collective.carousel-1.7-py2.7.egg/collective/carousel/tests/test_portlet.py
|
fc410d96dddb0797ded993e4d974bf2cf356cea6
|
[] |
no_license
|
Vinsurya/Plone
|
8366e57383da90b61aea82ab08a90415d753a15b
|
55e273528cd5db4bbd1929a23ef74c3d873ec690
|
refs/heads/master
| 2021-01-19T18:55:48.515216 | 2015-05-12T16:10:08 | 2015-05-12T16:10:08 | 33,325,981 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,484 |
py
|
# Carousel is rendered through a viewlet in IAboveContent
# using items provided by the carousel provider added to the context
from collective.carousel.portlets import carousel
from collective.carousel.tests.base import TestCase
from plone.app.portlets.storage import PortletAssignmentMapping
from plone.portlets.interfaces import IPortletAssignment
from plone.portlets.interfaces import IPortletDataProvider
from plone.portlets.interfaces import IPortletManager
from plone.portlets.interfaces import IPortletRenderer
from plone.portlets.interfaces import IPortletType
from zope.component import getUtility, getMultiAdapter
# default test query
query = [{
'i': 'portal_type',
'o': 'plone.app.querystring.operation.selection.is',
'v': ['Document', 'Event', 'News Item']
}]
class PortletTest(TestCase):
def afterSetUp(self):
"""Set up the carousel Collection and some dummy objects"""
self.setRoles('Manager')
self.folder.invokeFactory('Collection', 'collection')
collection = getattr(self.folder, 'collection')
collection.setQuery(query)
field = self.folder.Schema().getField('carouselprovider')
field.set(self.folder, collection)
# add a few objects
self.folder.invokeFactory('Document', 'carousel-doc')
self.folder.invokeFactory('News Item', 'carousel-news-item')
self.folder.invokeFactory('Event', 'carousel-event')
def testPortletTypeRegistered(self):
portlet = getUtility(IPortletType, name='portlet.Carousel')
self.assertEquals(portlet.addview, 'portlet.Carousel')
def testInterfaces(self):
portlet = carousel.Assignment(header=u"title")
self.failUnless(IPortletAssignment.providedBy(portlet))
self.failUnless(IPortletDataProvider.providedBy(portlet.data))
def testInvokeAddview(self):
portlet = getUtility(IPortletType, name='portlet.Carousel')
mapping = self.portal.restrictedTraverse(
'++contextportlets++plone.leftcolumn')
for m in mapping.keys():
del mapping[m]
addview = mapping.restrictedTraverse('+/' + portlet.addview)
addview.createAndAdd(data={'header': u"test title"})
self.assertEquals(len(mapping), 1)
self.failUnless(isinstance(mapping.values()[0], carousel.Assignment))
def testInvokeEditView(self):
mapping = PortletAssignmentMapping()
request = self.folder.REQUEST
mapping['foo'] = carousel.Assignment(header=u"title")
editview = getMultiAdapter((mapping['foo'], request), name='edit')
self.failUnless(isinstance(editview, carousel.EditForm))
def testRenderer(self):
context = self.folder
request = self.folder.REQUEST
view = self.folder.restrictedTraverse('@@plone')
manager = getUtility(IPortletManager, name='plone.rightcolumn',
context=self.portal)
assignment = carousel.Assignment(header=u"title")
renderer = getMultiAdapter((context, request, view,
manager, assignment),
IPortletRenderer)
self.failUnless(isinstance(renderer, carousel.Renderer))
class TestRenderer(TestCase):
# default test query
query = [{
'i': 'portal_type',
'o': 'plone.app.querystring.operation.selection.is',
'v': ['Document', 'Event', 'News Item']
}]
def afterSetUp(self):
self.setRoles(('Manager', ))
self.folder.invokeFactory('Collection', 'collection')
collection = getattr(self.folder, 'collection')
collection.setQuery(query)
field = self.folder.Schema().getField('carouselprovider')
field.set(self.folder, collection)
# add a few objects
self.folder.invokeFactory('Document', 'carousel-doc')
self.folder.invokeFactory('News Item', 'carousel-news-item')
self.folder.invokeFactory('Event', 'carousel-event')
def renderer(self, context=None, request=None, view=None,
manager=None, assignment=None):
context = context or self.folder
request = request or self.folder.REQUEST
view = view or self.folder.restrictedTraverse('@@plone')
manager = manager or getUtility(IPortletManager,
name='plone.rightcolumn',
context=self.portal)
assignment = assignment or carousel.Assignment(header=u"title")
return getMultiAdapter((context, request, view, manager, assignment),
IPortletRenderer)
def test_render(self):
r = self.renderer(context=self.portal,
assignment=carousel.Assignment(
header=u"title",
target_collection=
'/plone/Members/test_user_1_/collection'))
r = r.__of__(self.folder)
r.update()
output = r.render()
self.assertTrue('title' in output)
def test_css_class(self):
r = self.renderer(
context=self.portal,
assignment=carousel.Assignment(header=u"Test carousel"))
self.assertEquals('portlet-carousel-test-carousel', r.css_class())
def test_hideheader(self):
r = self.renderer(
context=self.portal,
assignment=carousel.Assignment(header=u"Test carousel", hideheader=True))
output = r.render()
self.failUnless('class="portletHeader hiddenStructure"' in output)
def test_portlet_collection(self):
query = [{
'i': 'portal_type',
'o': 'plone.app.querystring.operation.selection.is',
'v': ['Document']
}]
# add a few documents
for i in range(6):
self.folder.invokeFactory('Document', 'document_%s' % i)
getattr(self.folder, 'document_%s' % i).reindexObject()
collection = getattr(self.folder, 'collection')
collection.setQuery(query)
# the documents are returned by the collection
collection_num_items = len(self.folder.collection.queryCatalog())
# We better have some documents - we should have 8
self.failUnless(collection_num_items >= 8)
mapping = PortletAssignmentMapping()
mapping['foo'] = carousel.Assignment(
header=u"Test carousel",
target_collection='/Members/test_user_1_/collection')
r = self.renderer(context=None, request=None, view=None,
manager=None, assignment=mapping['foo'])
# sanity check
self.assertEqual(r.collection().id, 'collection')
# we want the portlet to return us the same results as the collection
self.assertEquals(collection_num_items, len(r.results()))
def test_edit_link(self):
collection = getattr(self.folder, 'collection')
collection.setQuery(query)
mapping = PortletAssignmentMapping()
mapping['foo'] = carousel.Assignment(
header=u"Test carousel",
target_collection='/Members/test_user_1_/collection')
r = self.renderer(context=None, request=None, view=None,
manager=None, assignment=mapping['foo'])
self.assertTrue(r.editCarouselLink().endswith('/edit'))
def test_suite():
from unittest import defaultTestLoader
return defaultTestLoader.loadTestsFromName(__name__)
|
[
"[email protected]"
] | |
a3f6e118f74a61067ad87c807459fdbef492e5fd
|
118704d5cc395019a3afb2aa74e8dc87da7fb8ba
|
/errorhandles.py
|
f955ef33c1a989108c2f66122c20a5f7382e1771
|
[] |
no_license
|
daniemart5/PythangMan
|
a2a0eb546b9ce824009da796bccc5b63ca550d15
|
7a957f7218b9053306fd99eef91aa8e2c57ae8b8
|
refs/heads/master
| 2020-09-02T10:58:08.912103 | 2019-11-15T19:09:20 | 2019-11-15T19:09:20 | 219,206,382 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 205 |
py
|
try:
age = int(input('Age: '))
income = 20000
risk = income / age
print(age)
except ZeroDivisionError:
print("Age must be higher than 0")
except ValueError:
print('Invalid value')
|
[
"github email address"
] |
github email address
|
f015df5f2e90e880e6be266068df95216321a9fa
|
4a42a8b32e2a79d598981141961396a6c2d625d2
|
/dialogbot/views.py
|
4d330c7a654e98a887491229c0105f322d53c20d
|
[] |
no_license
|
sgmagar/DialogSlackBot
|
e851b2c44a11fd56b80c74e15b3dfb8fb8f74d22
|
89fddbacf58e6c8cf45d95d86411fd0668640c77
|
refs/heads/master
| 2022-12-15T22:46:53.536518 | 2018-07-08T04:03:41 | 2018-07-08T04:03:41 | 138,411,634 | 0 | 2 | null | 2022-12-08T02:13:27 | 2018-06-23T15:19:34 |
Python
|
UTF-8
|
Python
| false | false | 4,922 |
py
|
import json
import logging
from urllib.parse import urlencode
import os
import requests
from django.http import HttpResponse, JsonResponse
from django.views import View
from django.views.generic import TemplateView
from slackclient import SlackClient
from dialogbot.models import Team, Category
from .mixins import SlackMixin
from .dialogs import category_form
class IndexView(SlackMixin, TemplateView):
template_name = 'dialogbot/index.html'
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx.update({
'title': 'Home',
'authorization_url': self.get_authorization_url()
})
return ctx
def get_authorization_url(self):
state = os.urandom(8).hex()
self.request.session["slack_oauth_state"] = state
query = urlencode({
"client_id": self.client_id,
"scope": self.scopes,
"redirect_uri": self.get_redirect_url(),
"state": state
})
return "https://slack.com/oauth/authorize?" + query
class OauthCallbackView(SlackMixin, TemplateView):
template_name = 'dialogbot/auth_result.html'
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx['error'] = kwargs.get('error') or None
return super().get_context_data(**kwargs)
def get(self, request, *args, **kwargs):
response = self.exchange_code_for_token()
logging.info(response)
team, created = Team.objects.update_or_create(
team_id=response["team_id"], app_id=self.client_id, defaults={
"user_access_token": response["access_token"],
"bot_access_token": response["bot"]["bot_access_token"],
"team_name": response['team_name']
}
)
return self.render_to_response(response)
def exchange_code_for_token(self):
code = self.request.GET.get("code")
state = self.request.GET.get("state")
error = self.request.GET.get("error")
if error or not state or state != self.request.session.get('slack_oauth_state'):
return {
'error': "Error while installing rocket app in your workspace."
}
sc = SlackClient("")
# Request the auth tokens from Slack
response = sc.api_call(
"oauth.access",
client_id=self.client_id,
client_secret=self.client_secret,
redirect_uri=self.get_redirect_url(),
code=code
)
if not response.get("ok"):
return {
'error': "Error while installing rocket app in your workspace."
}
return response
class CommandView(SlackMixin, View):
# for setting team object
set_team_obj = True
def post(self, request, *args, **kwargs):
command = self.data['command'].strip('/')
try:
method = getattr(self, f'{command}_command')
except AttributeError:
logging.info(f'Unhandled command {command}')
return HttpResponse(status=200)
return method()
def interrupt_command(self):
resolution_times = [1, 4, 8, 12]
types = ['Interruption', 'Service Outage']
categories = Category.objects.values_list('title',flat=True) or ['sample_category']
payload = {
'token': self.team.bot_access_token,
'trigger_id': self.data['trigger_id'],
'dialog': json.dumps(category_form(resolution_times, types, categories))
}
response = requests.post('https://slack.com/api/dialog.open', params=payload)
return HttpResponse(status=200)
class InteractionView(SlackMixin, View):
# for setting team object
set_team_obj = True
def post(self, request, *args, **kwargs):
callback_id = self.data['callback_id']
try:
method = getattr(self, f'handle_{callback_id}')
except AttributeError:
logging.info(f'Unhandled interaction {callback_id}')
return HttpResponse(status=200)
return method()
def handle_category(self):
submission = self.data['submission']
username = self.data['user']['name']
message = {
'text': f"Category Submission Success by `{username}`",
'attachments': get_attachments(submission)
}
requests.post(self.data['response_url'], data=json.dumps(message))
return HttpResponse(status=200)
def get_attachments(submission):
fields = [
{
"title": key.replace("_", " ").title(),
"value": value
}
for key, value in submission.items()
]
attachment = {
"color": "#aaefab",
"mrkdwn_in": ['fields', 'text', 'pretext'],
"fields": fields,
'footer': 'Category',
}
return [attachment]
|
[
"[email protected]"
] | |
f9dcccc861d7d82429d1611cba13efbfed9c30d5
|
309ac0cd757422b77e2bd820205fcafd11216bc9
|
/src/feature/BOB.py
|
47be42cf04e31d7c9a3a4d911e57ef4b855d3cd2
|
[
"Apache-2.0"
] |
permissive
|
jainal09/speaker-recognition
|
024722fc16c60833a20529fdd9d651ee607ba9d3
|
53b0b5a3a7d6134e9bf21dfa4c1a7519b8e3389f
|
refs/heads/master
| 2020-09-04T17:28:46.365993 | 2019-11-05T19:16:03 | 2019-11-05T19:16:03 | 219,831,541 | 0 | 0 |
Apache-2.0
| 2019-11-05T19:15:33 | 2019-11-05T19:15:32 | null |
UTF-8
|
Python
| false | false | 993 |
py
|
#!/usr/bin/env python2
# -*- coding: UTF-8 -*-
# File: BOB.py
# Date: Wed Oct 29 22:38:35 2014 +0800
# Author: Yuxin Wu <[email protected]>
from utils import cached_func, diff_feature
import bob
import bob.ap
import numpy
@cached_func
def get_bob_extractor(fs, win_length_ms=32, win_shift_ms=16,
n_filters=55, n_ceps=19, f_min=0., f_max=6000,
delta_win=2, pre_emphasis_coef=0.95, dct_norm=True,
mel_scale=True):
ret = bob.ap.Ceps(fs, win_length_ms, win_shift_ms, n_filters, n_ceps, f_min,
f_max, delta_win, pre_emphasis_coef, mel_scale, dct_norm)
return ret
def extract(fs, signal=None, diff=False, **kwargs):
"""accept two argument, or one as a tuple"""
if signal is None:
assert type(fs) == tuple
fs, signal = fs[0], fs[1]
signal = numpy.cast['float'](signal)
ret = get_bob_extractor(fs, **kwargs)(signal)
if diff:
return diff_feature(ret)
return ret
|
[
"[email protected]"
] | |
e814636b3b0b7fc2d863355d0f65f5daf9722c50
|
482ec3480e8418dda62f85a5359e70c89256f1a8
|
/pythonTutorial/drf_tutorial/quickstart/views.py
|
ca61c5364c666436720a5e36e5a295b7ce045e48
|
[] |
no_license
|
rajatgirotra/study
|
84d319968f31f78798a56362546f21d22abd7ae7
|
acbb6d21a8182feabcb3329e17c76ac3af375255
|
refs/heads/master
| 2023-09-01T20:48:31.137541 | 2023-08-29T01:41:17 | 2023-08-29T01:41:17 | 85,041,241 | 6 | 1 | null | 2023-05-01T19:25:38 | 2017-03-15T07:17:24 |
C++
|
UTF-8
|
Python
| false | false | 602 |
py
|
from django.shortcuts import render
from django.contrib.auth.models import User, Group
from rest_framework import viewsets
from .serializers import UserSerializer, GroupSerializer
# Create your views here.
class UserViewSet(viewsets.ModelViewSet):
""" API Endpoint that allows users to be viewed and edited. """
queryset = User.objects.all().order_by('-date_joined')
serializer_class = UserSerializer
class GroupViewSet(viewsets.ModelViewSet):
""" API Endpoint that allows users to be viewed and edited. """
queryset = Group.objects.all()
serializer_class = GroupSerializer
|
[
"[email protected]"
] | |
e0055c8917f50e0204eac439448028eb2c6d46c9
|
d05c946e345baa67e7894ee33ca21e24b8d26028
|
/web-scraping/pdf-url-extractor/pdf_link_extractor.py
|
f8ba7423e404f066582b76bc2e587b76fac36a74
|
[
"MIT"
] |
permissive
|
x4nth055/pythoncode-tutorials
|
327255550812f84149841d56f2d13eaa84efd42e
|
d6ba5d672f7060ba88384db5910efab1768c7230
|
refs/heads/master
| 2023-09-01T02:36:58.442748 | 2023-08-19T14:04:34 | 2023-08-19T14:04:34 | 199,449,624 | 1,858 | 2,055 |
MIT
| 2023-08-25T20:41:56 | 2019-07-29T12:35:40 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 412 |
py
|
import pikepdf # pip3 install pikepdf
file = "1810.04805.pdf"
# file = "1710.05006.pdf"
pdf_file = pikepdf.Pdf.open(file)
urls = []
# iterate over PDF pages
for page in pdf_file.pages:
for annots in page.get("/Annots"):
uri = annots.get("/A").get("/URI")
if uri is not None:
print("[+] URL Found:", uri)
urls.append(uri)
print("[*] Total URLs extracted:", len(urls))
|
[
"[email protected]"
] | |
397adeb4b7a56622f5af0d2c042b01dd33be81de
|
14d8e090d540a2947ba71038390078d7783ff510
|
/update/models.py
|
dabb8836578ee1835bce4b488ea09814ea5103b0
|
[] |
no_license
|
BrutyLi/osupf
|
657d61c7e8e8711e50006944bfd675cb39bddedc
|
5dc218a05a980755ea218bd5e4b3189096a68c71
|
refs/heads/master
| 2021-01-25T12:52:59.625225 | 2018-03-02T02:34:13 | 2018-03-02T02:34:13 | 123,520,320 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 630 |
py
|
from django.db import models
# Create your models here.
class hostInfo(models.Model):
hname=models.CharField(max_length=32,verbose_name='描述',blank=True)
hip=models.GenericIPAddressField(verbose_name='IPadd')
hcpu=models.CharField(max_length=8,verbose_name='cpu',)
hdisk=models.CharField(max_length=16,verbose_name='磁盘')
huser=models.CharField(max_length=128,verbose_name='用户信息')
hlog=models.TextField(max_length=10240,verbose_name='日志')
def __str__(self):
return self.hname
class Meta:
verbose_name='主机信息'
verbose_name_plural='主机信息'
|
[
"[email protected]"
] | |
f1eb78fa9b327850db28fbb286357a8dd27b0ee2
|
747febe786dd6b7fd6c63cfe73dbe3023354daa8
|
/src/the_tale/the_tale/statistics/models.py
|
d416b459decf2bdf77f76b382d9135e4eca0ee66
|
[
"BSD-3-Clause"
] |
permissive
|
the-tale/the-tale
|
4e4b8d91dc873a5fb935fe58e9721a877baa6d3f
|
e8450bd2332344da805b1851e728da5a3e5bf0ef
|
refs/heads/develop
| 2023-08-01T13:53:46.835667 | 2022-12-25T18:04:56 | 2022-12-25T18:04:56 | 1,949,167 | 98 | 52 |
BSD-3-Clause
| 2023-02-15T18:57:33 | 2011-06-24T18:49:48 |
Python
|
UTF-8
|
Python
| false | false | 465 |
py
|
import smart_imports
smart_imports.all()
class Record(django_models.Model):
date = django_models.DateTimeField(null=False)
type = rels_django.RelationIntegerField(relation=relations.RECORD_TYPE, db_index=True)
value_int = django_models.BigIntegerField()
value_float = django_models.FloatField()
class FullStatistics(django_models.Model):
created_at = django_models.DateTimeField(auto_now_add=True)
data = django_models.JSONField()
|
[
"[email protected]"
] | |
c171379a40fd0890faefcc5b51ef17c8afb1c0e0
|
043160352216a7fc21be4c8a44507e00f523bf80
|
/test/functional/test_framework/test_framework.py
|
63c46950ceec5c0220df49ca42530135eac1aae5
|
[
"MIT"
] |
permissive
|
odinyblockchain/odinycoin
|
5ef2a1bca374230882c91e8c6717bbb8faf889ad
|
183751aac9357455913f1d8a415b1dcb04225ee0
|
refs/heads/master
| 2022-12-18T14:14:02.535216 | 2020-09-20T22:05:14 | 2020-09-20T22:05:14 | 295,208,711 | 0 | 2 |
MIT
| 2020-09-18T10:33:17 | 2020-09-13T18:06:52 |
C++
|
UTF-8
|
Python
| false | false | 49,828 |
py
|
#!/usr/bin/env python3
# Copyright (c) 2014-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Base class for RPC testing."""
from enum import Enum
from io import BytesIO
import logging
import optparse
import os
import pdb
import shutil
from struct import pack
import sys
import tempfile
import time
from . import coverage
from .address import wif_to_privkey
from .authproxy import JSONRPCException
from .blocktools import (
create_block,
create_coinbase_pos,
create_transaction_from_outpoint,
is_zerocoin,
)
from .key import CECKey
from .messages import (
COIN,
COutPoint,
CTransaction,
CTxIn,
CTxOut,
hash256,
)
from .script import (
CScript,
OP_CHECKSIG,
)
from .test_node import TestNode
from .util import (
MAX_NODES,
PortSeed,
assert_equal,
assert_greater_than,
check_json_precision,
connect_nodes_bi,
connect_nodes_clique,
disconnect_nodes,
DEFAULT_FEE,
get_datadir_path,
hex_str_to_bytes,
bytes_to_hex_str,
initialize_datadir,
set_node_times,
SPORK_ACTIVATION_TIME,
SPORK_DEACTIVATION_TIME,
sync_blocks,
sync_mempools,
vZC_DENOMS,
)
class TestStatus(Enum):
PASSED = 1
FAILED = 2
SKIPPED = 3
TEST_EXIT_PASSED = 0
TEST_EXIT_FAILED = 1
TEST_EXIT_SKIPPED = 77
TMPDIR_PREFIX = "odinycoin_func_test_"
class OdinycoinTestFramework():
"""Base class for a odinycoin test script.
Individual odinycoin test scripts should subclass this class and override the set_test_params() and run_test() methods.
Individual tests can also override the following methods to customize the test setup:
- add_options()
- setup_chain()
- setup_network()
- setup_nodes()
The __init__() and main() methods should not be overridden.
This class also contains various public and private helper methods."""
def __init__(self):
"""Sets test framework defaults. Do not override this method. Instead, override the set_test_params() method"""
self.setup_clean_chain = False
self.nodes = []
self.mocktime = 0
self.supports_cli = False
self.set_test_params()
assert hasattr(self, "num_nodes"), "Test must set self.num_nodes in set_test_params()"
def main(self):
"""Main function. This should not be overridden by the subclass test scripts."""
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--nocleanup", dest="nocleanup", default=False, action="store_true",
help="Leave odinycoinds and test.* datadir on exit or error")
parser.add_option("--noshutdown", dest="noshutdown", default=False, action="store_true",
help="Don't stop odinycoinds after the test execution")
parser.add_option("--srcdir", dest="srcdir", default=os.path.normpath(os.path.dirname(os.path.realpath(__file__))+"/../../../src"),
help="Source directory containing odinycoind/odinycoin-cli (default: %default)")
parser.add_option("--cachedir", dest="cachedir", default=os.path.normpath(os.path.dirname(os.path.realpath(__file__)) + "/../../cache"),
help="Directory for caching pregenerated datadirs")
parser.add_option("--tmpdir", dest="tmpdir", help="Root directory for datadirs")
parser.add_option("-l", "--loglevel", dest="loglevel", default="INFO",
help="log events at this level and higher to the console. Can be set to DEBUG, INFO, WARNING, ERROR or CRITICAL. Passing --loglevel DEBUG will output all logs to console. Note that logs at all levels are always written to the test_framework.log file in the temporary test directory.")
parser.add_option("--tracerpc", dest="trace_rpc", default=False, action="store_true",
help="Print out all RPC calls as they are made")
parser.add_option("--portseed", dest="port_seed", default=os.getpid(), type='int',
help="The seed to use for assigning port numbers (default: current process id)")
parser.add_option("--coveragedir", dest="coveragedir",
help="Write tested RPC commands into this directory")
parser.add_option("--configfile", dest="configfile",
help="Location of the test framework config file")
parser.add_option("--pdbonfailure", dest="pdbonfailure", default=False, action="store_true",
help="Attach a python debugger if test fails")
parser.add_option("--usecli", dest="usecli", default=False, action="store_true",
help="use odinycoin-cli instead of RPC for all commands")
self.add_options(parser)
(self.options, self.args) = parser.parse_args()
PortSeed.n = self.options.port_seed
os.environ['PATH'] = self.options.srcdir + ":" + self.options.srcdir + "/qt:" + os.environ['PATH']
check_json_precision()
self.options.cachedir = os.path.abspath(self.options.cachedir)
# Set up temp directory and start logging
if self.options.tmpdir:
self.options.tmpdir = os.path.abspath(self.options.tmpdir)
os.makedirs(self.options.tmpdir, exist_ok=False)
else:
self.options.tmpdir = tempfile.mkdtemp(prefix=TMPDIR_PREFIX)
self._start_logging()
success = TestStatus.FAILED
try:
if self.options.usecli and not self.supports_cli:
raise SkipTest("--usecli specified but test does not support using CLI")
self.setup_chain()
self.setup_network()
time.sleep(5)
self.run_test()
success = TestStatus.PASSED
except JSONRPCException as e:
self.log.exception("JSONRPC error")
except SkipTest as e:
self.log.warning("Test Skipped: %s" % e.message)
success = TestStatus.SKIPPED
except AssertionError as e:
self.log.exception("Assertion failed")
except KeyError as e:
self.log.exception("Key error")
except Exception as e:
self.log.exception("Unexpected exception caught during testing")
except KeyboardInterrupt as e:
self.log.warning("Exiting after keyboard interrupt")
if success == TestStatus.FAILED and self.options.pdbonfailure:
print("Testcase failed. Attaching python debugger. Enter ? for help")
pdb.set_trace()
if not self.options.noshutdown:
self.log.info("Stopping nodes")
if self.nodes:
self.stop_nodes()
else:
for node in self.nodes:
node.cleanup_on_exit = False
self.log.info("Note: odinycoinds were not stopped and may still be running")
if not self.options.nocleanup and not self.options.noshutdown and success != TestStatus.FAILED:
self.log.info("Cleaning up")
shutil.rmtree(self.options.tmpdir)
else:
self.log.warning("Not cleaning up dir %s" % self.options.tmpdir)
if success == TestStatus.PASSED:
self.log.info("Tests successful")
exit_code = TEST_EXIT_PASSED
elif success == TestStatus.SKIPPED:
self.log.info("Test skipped")
exit_code = TEST_EXIT_SKIPPED
else:
self.log.error("Test failed. Test logging available at %s/test_framework.log", self.options.tmpdir)
self.log.error("Hint: Call {} '{}' to consolidate all logs".format(os.path.normpath(os.path.dirname(os.path.realpath(__file__)) + "/../combine_logs.py"), self.options.tmpdir))
exit_code = TEST_EXIT_FAILED
logging.shutdown()
sys.exit(exit_code)
# Methods to override in subclass test scripts.
def set_test_params(self):
"""Tests must this method to change default values for number of nodes, topology, etc"""
raise NotImplementedError
def add_options(self, parser):
"""Override this method to add command-line options to the test"""
pass
def setup_chain(self):
"""Override this method to customize blockchain setup"""
self.log.info("Initializing test directory " + self.options.tmpdir)
if self.setup_clean_chain:
self._initialize_chain_clean()
else:
self._initialize_chain()
def setup_network(self):
"""Override this method to customize test network topology"""
self.setup_nodes()
# Connect the nodes as a "chain". This allows us
# to split the network between nodes 1 and 2 to get
# two halves that can work on competing chains.
for i in range(self.num_nodes - 1):
connect_nodes_bi(self.nodes, i, i + 1)
self.sync_all()
def setup_nodes(self):
"""Override this method to customize test node setup"""
extra_args = None
if hasattr(self, "extra_args"):
extra_args = self.extra_args
self.add_nodes(self.num_nodes, extra_args)
self.start_nodes()
def run_test(self):
"""Tests must override this method to define test logic"""
raise NotImplementedError
# Public helper methods. These can be accessed by the subclass test scripts.
def add_nodes(self, num_nodes, extra_args=None, rpchost=None, timewait=None, binary=None):
"""Instantiate TestNode objects"""
if extra_args is None:
extra_args = [[]] * num_nodes
if binary is None:
binary = [None] * num_nodes
assert_equal(len(extra_args), num_nodes)
assert_equal(len(binary), num_nodes)
for i in range(num_nodes):
self.nodes.append(TestNode(i, self.options.tmpdir, extra_args[i], rpchost, timewait=timewait, binary=binary[i], stderr=None, mocktime=self.mocktime, coverage_dir=self.options.coveragedir, use_cli=self.options.usecli))
def start_node(self, i, *args, **kwargs):
"""Start a odinycoind"""
node = self.nodes[i]
node.start(*args, **kwargs)
node.wait_for_rpc_connection()
time.sleep(10)
if self.options.coveragedir is not None:
coverage.write_all_rpc_commands(self.options.coveragedir, node.rpc)
def start_nodes(self, extra_args=None, *args, **kwargs):
"""Start multiple odinycoinds"""
if extra_args is None:
extra_args = [None] * self.num_nodes
assert_equal(len(extra_args), self.num_nodes)
try:
for i, node in enumerate(self.nodes):
node.start(extra_args[i], *args, **kwargs)
for node in self.nodes:
node.wait_for_rpc_connection()
except:
# If one node failed to start, stop the others
self.stop_nodes()
raise
time.sleep(10)
if self.options.coveragedir is not None:
for node in self.nodes:
coverage.write_all_rpc_commands(self.options.coveragedir, node.rpc)
def stop_node(self, i):
"""Stop a odinycoind test node"""
self.nodes[i].stop_node()
self.nodes[i].wait_until_stopped()
def stop_nodes(self):
"""Stop multiple odinycoind test nodes"""
for node in self.nodes:
# Issue RPC to stop nodes
node.stop_node()
for node in self.nodes:
# Wait for nodes to stop
time.sleep(5)
node.wait_until_stopped()
def restart_node(self, i, extra_args=None):
"""Stop and start a test node"""
self.stop_node(i)
self.start_node(i, extra_args)
def assert_start_raises_init_error(self, i, extra_args=None, expected_msg=None, *args, **kwargs):
with tempfile.SpooledTemporaryFile(max_size=2**16) as log_stderr:
try:
self.start_node(i, extra_args, stderr=log_stderr, *args, **kwargs)
self.stop_node(i)
except Exception as e:
assert 'odinycoind exited' in str(e) # node must have shutdown
self.nodes[i].running = False
self.nodes[i].process = None
if expected_msg is not None:
log_stderr.seek(0)
stderr = log_stderr.read().decode('utf-8')
if expected_msg not in stderr:
raise AssertionError("Expected error \"" + expected_msg + "\" not found in:\n" + stderr)
else:
if expected_msg is None:
assert_msg = "odinycoind should have exited with an error"
else:
assert_msg = "odinycoind should have exited with expected error " + expected_msg
raise AssertionError(assert_msg)
def wait_for_node_exit(self, i, timeout):
self.nodes[i].process.wait(timeout)
def split_network(self):
"""
Split the network of four nodes into nodes 0/1 and 2/3.
"""
disconnect_nodes(self.nodes[1], 2)
disconnect_nodes(self.nodes[2], 1)
self.sync_all([self.nodes[:2], self.nodes[2:]])
def join_network(self):
"""
Join the (previously split) network halves together.
"""
connect_nodes_bi(self.nodes, 1, 2)
self.sync_all()
def sync_all(self, node_groups=None):
if not node_groups:
node_groups = [self.nodes]
for group in node_groups:
sync_blocks(group)
sync_mempools(group)
def enable_mocktime(self):
"""Enable mocktime for the script.
mocktime may be needed for scripts that use the cached version of the
blockchain. If the cached version of the blockchain is used without
mocktime then the mempools will not sync due to IBD.
Sets mocktime to Tuesday, October 31, 2017 6:21:20 PM GMT (1572546080)
"""
self.mocktime = 1572546080
def disable_mocktime(self):
self.mocktime = 0
# Private helper methods. These should not be accessed by the subclass test scripts.
def _start_logging(self):
# Add logger and logging handlers
self.log = logging.getLogger('TestFramework')
self.log.setLevel(logging.DEBUG)
# Create file handler to log all messages
fh = logging.FileHandler(self.options.tmpdir + '/test_framework.log')
fh.setLevel(logging.DEBUG)
# Create console handler to log messages to stderr. By default this logs only error messages, but can be configured with --loglevel.
ch = logging.StreamHandler(sys.stdout)
# User can provide log level as a number or string (eg DEBUG). loglevel was caught as a string, so try to convert it to an int
ll = int(self.options.loglevel) if self.options.loglevel.isdigit() else self.options.loglevel.upper()
ch.setLevel(ll)
# Format logs the same as odinycoind's debug.log with microprecision (so log files can be concatenated and sorted)
formatter = logging.Formatter(fmt='%(asctime)s.%(msecs)03d000 %(name)s (%(levelname)s): %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
formatter.converter = time.gmtime
fh.setFormatter(formatter)
ch.setFormatter(formatter)
# add the handlers to the logger
self.log.addHandler(fh)
self.log.addHandler(ch)
if self.options.trace_rpc:
rpc_logger = logging.getLogger("BitcoinRPC")
rpc_logger.setLevel(logging.DEBUG)
rpc_handler = logging.StreamHandler(sys.stdout)
rpc_handler.setLevel(logging.DEBUG)
rpc_logger.addHandler(rpc_handler)
def _initialize_chain(self, toPosPhase=False):
"""Initialize a pre-mined blockchain for use by the test."""
def create_cachedir(cachedir):
if os.path.isdir(cachedir):
shutil.rmtree(cachedir)
os.makedirs(cachedir)
def copy_cachedir(origin, destination, num_nodes=MAX_NODES):
for i in range(num_nodes):
from_dir = get_datadir_path(origin, i)
to_dir = get_datadir_path(destination, i)
shutil.copytree(from_dir, to_dir)
initialize_datadir(destination, i) # Overwrite port/rpcport in odinycoin.conf
def clone_cache_from_node_1(cachedir, from_num=4):
""" Clones cache subdir from node 1 to nodes from 'from_num' to MAX_NODES"""
def copy_and_overwrite(from_path, to_path):
if os.path.exists(to_path):
shutil.rmtree(to_path)
shutil.copytree(from_path, to_path)
assert from_num < MAX_NODES
node_0_datadir = os.path.join(get_datadir_path(cachedir, 0), "regtest")
for i in range(from_num, MAX_NODES):
node_i_datadir = os.path.join(get_datadir_path(cachedir, i), "regtest")
for subdir in ["blocks", "chainstate", "sporks", "zerocoin"]:
copy_and_overwrite(os.path.join(node_0_datadir, subdir),
os.path.join(node_i_datadir, subdir))
initialize_datadir(cachedir, i) # Overwrite port/rpcport in odinycoin.conf
def cachedir_valid(cachedir):
for i in range(MAX_NODES):
if not os.path.isdir(get_datadir_path(cachedir, i)):
return False
# nodes directories exist. check if the first one has the .incomplete flagfile
return (not os.path.exists(os.path.join(get_datadir_path(cachedir, 0), ".incomplete")))
def clean_cache_subdir(cachedir):
os.remove(os.path.join(get_datadir_path(cachedir, 0), ".incomplete"))
def cache_path(n, *paths):
return os.path.join(get_datadir_path(cachedir, n), "regtest", *paths)
for i in range(MAX_NODES):
for entry in os.listdir(cache_path(i)):
if entry not in ['wallet.dat', 'chainstate', 'blocks', 'sporks', 'zerocoin', 'backups']:
os.remove(cache_path(i, entry))
def clean_cache_dir():
if os.path.isdir(self.options.cachedir):
# migrate old cache dir
if cachedir_valid(self.options.cachedir):
powcachedir = os.path.join(self.options.cachedir, "pow")
self.log.info("Found old cachedir. Migrating to %s" % str(powcachedir))
copy_cachedir(self.options.cachedir, powcachedir)
# remove everything except pow and pos subdirs
for entry in os.listdir(self.options.cachedir):
if entry not in ['pow', 'pos']:
entry_path = os.path.join(self.options.cachedir, entry)
if os.path.isfile(entry_path):
os.remove(entry_path)
elif os.path.isdir(entry_path):
shutil.rmtree(entry_path)
# no cachedir found
else:
os.makedirs(self.options.cachedir)
def start_nodes_from_dir(ddir, num_nodes=MAX_NODES):
self.log.info("Starting %d nodes..." % num_nodes)
for i in range(num_nodes):
datadir = initialize_datadir(ddir, i)
if i == 0:
# Add .incomplete flagfile
# (removed at the end during clean_cache_subdir)
open(os.path.join(datadir, ".incomplete"), 'a').close()
args = [os.getenv("BITCOIND", "odinycoind"), "-spendzeroconfchange=1", "-server", "-keypool=1",
"-datadir=" + datadir, "-discover=0"]
self.nodes.append(
TestNode(i, ddir, extra_args=[], rpchost=None, timewait=None, binary=None, stderr=None,
mocktime=self.mocktime, coverage_dir=None))
self.nodes[i].args = args
self.start_node(i)
self.log.info("Node %d started." % i)
# Wait for RPC connections to be ready
self.log.info("Nodes started. Waiting for RPC connections...")
for node in range(4):
self.nodes[node].wait_for_rpc_connection()
self.log.info("Connecting nodes")
connect_nodes_clique(self.nodes)
def stop_and_clean_cache_dir(ddir):
self.stop_nodes()
self.nodes = []
# Copy cache for nodes 5 to MAX_NODES
self.log.info("Copying cache dir to non-started nodes")
clone_cache_from_node_1(ddir)
self.log.info("Cleaning up.")
clean_cache_subdir(ddir)
def generate_pow_cache():
### POW Cache ###
# Create a 200-block-long chain; each of the 4 first nodes
# gets 25 mature blocks and 25 immature.
# Note: To preserve compatibility with older versions of
# initialize_chain, only 4 nodes will generate coins.
#
# blocks are created with timestamps 1 minutes apart
# starting from 331 minutes in the past
# Create cache directories, run odinycoinds:
create_cachedir(powcachedir)
self.log.info("Creating 'PoW-chain': 200 blocks")
start_nodes_from_dir(powcachedir, 4)
# Mine the blocks
self.log.info("Mining 200 blocks")
self.enable_mocktime()
block_time = self.mocktime - (331 * 60)
for i in range(2):
for peer in range(4):
for j in range(25):
set_node_times(self.nodes, block_time)
self.nodes[peer].generate(1)
block_time += 60
# Must sync before next peer starts generating blocks
sync_blocks(self.nodes)
# Shut them down, and clean up cache directories:
self.log.info("Stopping nodes")
stop_and_clean_cache_dir(powcachedir)
self.log.info("---> pow cache created")
self.disable_mocktime()
assert self.num_nodes <= MAX_NODES
clean_cache_dir()
powcachedir = os.path.join(self.options.cachedir, "pow")
is_powcache_valid = cachedir_valid(powcachedir)
poscachedir = os.path.join(self.options.cachedir, "pos")
is_poscache_valid = cachedir_valid(poscachedir)
if not toPosPhase and not is_powcache_valid:
self.log.info("PoW-CACHE NOT FOUND or INVALID.")
self.log.info("Creating new cached blockchain data.")
generate_pow_cache()
elif toPosPhase and not is_poscache_valid:
self.log.info("PoS-CACHE NOT FOUND or INVALID.")
self.log.info("Creating new cached blockchain data.")
# check if first 200 blocks (pow cache) is present. if not generate it.
if not is_powcache_valid:
self.log.info("PoW-CACHE NOT FOUND or INVALID. Generating it first.")
generate_pow_cache()
self.enable_mocktime()
block_time = self.mocktime - (131 * 60)
### POS Cache ###
# Create a 330-block-long chain
# First 200 PoW blocks are copied from PoW chain.
# The next 48 PoW blocks are mined in 12-blocks bursts by the first 4 nodes.
# The last 2 PoW blocks are then mined by the last node (Node 3).
# Then 80 PoS blocks are generated in 20-blocks bursts by the first 4 nodes.
#
# - Node 0 and node 1 get 62 mature blocks (pow) + 20 immmature (pos)
# 42 rewards spendable (62 mature blocks - 20 spent rewards)
# - Node 2 gets 56 mature blocks (pow) + 26 immmature (6 pow + 20 pos)
# 35 rewards spendable (55 mature blocks - 20 spent rewards)
# - Node 3 gets 50 mature blocks (pow) + 34 immmature (14 pow + 20 pos)
# 30 rewards spendable (50 mature blocks - 20 spent rewards)
# - Nodes 2 and 3 mint one zerocoin for each denom (tot 6666 ODC) on block 301/302
# 8 mature zc + 8/3 rewards spendable (35/30 - 27 spent) + change 83.92
#
# Block 331-336 will mature last 6 pow blocks mined by node 2.
# Then 337-350 will mature last 14 pow blocks mined by node 3.
# Then staked blocks start maturing at height 351.
# Create cache directories, run odinycoinds:
create_cachedir(poscachedir)
self.log.info("Creating 'PoS-chain': 330 blocks")
self.log.info("Copying 200 initial blocks from pow cache")
copy_cachedir(powcachedir, poscachedir)
# Change datadir and restart the nodes (only 4 of them)
start_nodes_from_dir(poscachedir, 4)
# Mine 50 more blocks to reach PoS start.
self.log.info("Mining 50 more blocks to reach PoS phase")
for peer in range(4):
for j in range(12):
set_node_times(self.nodes, block_time)
self.nodes[peer].generate(1)
block_time += 60
# Must sync before next peer starts generating blocks
if peer < 3:
sync_blocks(self.nodes)
set_node_times(self.nodes, block_time)
self.nodes[3].generate(2)
block_time += 60
sync_blocks(self.nodes)
# Then stake 80 blocks.
self.log.info("Staking 80 blocks...")
nBlocks = 250
res = [] # used to save the two txids for change outputs of mints (locked)
for peer in range(4):
for j in range(20):
# Stake block
block_time = self.generate_pos(peer, block_time)
nBlocks += 1
# Mint zerocoins with node-2 at block 301 and with node-3 at block 302
if nBlocks == 301 or nBlocks == 302:
# mints 7 zerocoins, one for each denom (tot 6666 ODC), fee = 0.01 * 8
# consumes 27 utxos (tot 6750 ODC), change = 6750 - 6666 - fee
res.append(self.nodes[nBlocks-299].mintzerocoin(6666))
self.sync_all()
# lock the change output (so it's not used as stake input in generate_pos)
assert (self.nodes[nBlocks-299].lockunspent(False, [{"txid": res[-1]['txid'], "vout": 8}]))
# Must sync before next peer starts generating blocks
sync_blocks(self.nodes)
time.sleep(1)
self.log.info("80 blocks staked")
# Unlock previously locked change outputs
for i in [2, 3]:
assert (self.nodes[i].lockunspent(True, [{"txid": res[i-2]['txid'], "vout": 8}]))
# Verify height and balances
self.test_PoS_chain_balances()
# Shut nodes down, and clean up cache directories:
self.log.info("Stopping nodes")
stop_and_clean_cache_dir(poscachedir)
self.log.info("--> pos cache created")
self.disable_mocktime()
else:
self.log.info("CACHE FOUND.")
# Copy requested cache to tempdir
if toPosPhase:
self.log.info("Copying datadir from %s to %s" % (poscachedir, self.options.tmpdir))
copy_cachedir(poscachedir, self.options.tmpdir, self.num_nodes)
else:
self.log.info("Copying datadir from %s to %s" % (powcachedir, self.options.tmpdir))
copy_cachedir(powcachedir, self.options.tmpdir, self.num_nodes)
def _initialize_chain_clean(self):
"""Initialize empty blockchain for use by the test.
Create an empty blockchain and num_nodes wallets.
Useful if a test case wants complete control over initialization."""
for i in range(self.num_nodes):
initialize_datadir(self.options.tmpdir, i)
### Odinycoin Specific TestFramework ###
###################################
def init_dummy_key(self):
self.DUMMY_KEY = CECKey()
self.DUMMY_KEY.set_secretbytes(hash256(pack('<I', 0xffff)))
def test_PoS_chain_balances(self):
from .util import DecimalAmt
# 330 blocks
# - Nodes 0 and 1 get 82 blocks:
# 62 pow + 20 pos (20 immature)
# - Nodes 2 gets 82 blocks:
# 62 pow + 20 pos (26 immature)
# - Nodes 3 gets 84 blocks:
# 64 pow + 20 pos (34 immature)
# - Nodes 2 and 3 have 6666 ODC worth of zerocoins
zc_tot = sum(vZC_DENOMS)
zc_fee = len(vZC_DENOMS) * 0.01
used_utxos = (zc_tot // 250) + 1
zc_change = 250 * used_utxos - zc_tot - zc_fee
# check at least 1 node and at most 5
num_nodes = min(5, len(self.nodes))
assert_greater_than(num_nodes, 0)
# each node has the same height and tip
best_block = self.nodes[0].getbestblockhash()
for i in range(num_nodes):
assert_equal(self.nodes[i].getblockcount(), 330)
if i > 0:
assert_equal(self.nodes[i].getbestblockhash(), best_block)
# balance is mature pow blocks rewards minus stake inputs (spent)
w_info = [self.nodes[i].getwalletinfo() for i in range(num_nodes)]
assert_equal(w_info[0]["balance"], DecimalAmt(250.0 * (62 - 20)))
assert_equal(w_info[1]["balance"], DecimalAmt(250.0 * (62 - 20)))
assert_equal(w_info[2]["balance"], DecimalAmt(250.0 * (56 - 20) - (used_utxos * 250) + zc_change))
assert_equal(w_info[3]["balance"], DecimalAmt(250.0 * (50 - 20) - (used_utxos * 250) + zc_change))
for i in range(4, num_nodes):
# only first 4 nodes have mined/staked
assert_equal(w_info[i]["balance"], DecimalAmt(0))
# immature balance is immature pow blocks rewards plus
# immature stakes (outputs=inputs+rewards)
assert_equal(w_info[0]["immature_balance"], DecimalAmt(500.0 * 20))
assert_equal(w_info[1]["immature_balance"], DecimalAmt(500.0 * 20))
assert_equal(w_info[2]["immature_balance"], DecimalAmt((250.0 * 6) + (500.0 * 20)))
assert_equal(w_info[3]["immature_balance"], DecimalAmt((250.0 * 14) + (500.0 * 20)))
for i in range(4, num_nodes):
# only first 4 nodes have mined/staked
assert_equal(w_info[i]["immature_balance"], DecimalAmt(0))
# check zerocoin balances / mints
for peer in [2, 3]:
if num_nodes > peer:
zcBalance = self.nodes[peer].getzerocoinbalance()
zclist = self.nodes[peer].listmintedzerocoins(True)
zclist_spendable = self.nodes[peer].listmintedzerocoins(True, True)
assert_equal(len(zclist), len(vZC_DENOMS))
assert_equal(zcBalance['Total'], 6666)
assert_equal(zcBalance['Immature'], 0)
if peer == 2:
assert_equal(len(zclist), len(zclist_spendable))
else:
# last mints added on accumulators - not spendable
assert_equal(0, len(zclist_spendable))
assert_equal(set([x['denomination'] for x in zclist]), set(vZC_DENOMS))
assert_equal([x['confirmations'] for x in zclist], [30-peer] * len(vZC_DENOMS))
self.log.info("Balances of first %d nodes check out" % num_nodes)
def get_prevouts(self, node_id, utxo_list, zpos=False, nHeight=-1):
""" get prevouts (map) for each utxo in a list
:param node_id: (int) index of the CTestNode used as rpc connection. Must own the utxos.
utxo_list: <if zpos=False> (JSON list) utxos returned from listunspent used as input
<if zpos=True> (JSON list) mints returned from listmintedzerocoins used as input
zpos: (bool) type of utxo_list
nHeight: (int) height of the previous block. used only if zpos=True for
stake checksum. Optional, if not provided rpc_conn's height is used.
:return: prevouts: ({bytes --> (int, bytes, int)} dictionary)
maps CStake "uniqueness" (i.e. serialized COutPoint -or hash stake, for zpiv-)
to (amount, prevScript, timeBlockFrom).
For zpiv prevScript is replaced with serialHash hex string.
"""
assert_greater_than(len(self.nodes), node_id)
rpc_conn = self.nodes[node_id]
prevouts = {}
for utxo in utxo_list:
if not zpos:
outPoint = COutPoint(int(utxo['txid'], 16), utxo['vout'])
outValue = int(utxo['amount']) * COIN
prevtx_json = rpc_conn.getrawtransaction(utxo['txid'], 1)
prevTx = CTransaction()
prevTx.deserialize(BytesIO(hex_str_to_bytes(prevtx_json['hex'])))
if (prevTx.is_coinbase() or prevTx.is_coinstake()) and utxo['confirmations'] < 100:
# skip immature coins
continue
prevScript = prevtx_json['vout'][utxo['vout']]['scriptPubKey']['hex']
prevTime = prevtx_json['blocktime']
prevouts[outPoint.serialize_uniqueness()] = (outValue, prevScript, prevTime)
else:
# get mint checkpoint
if nHeight == -1:
nHeight = rpc_conn.getblockcount()
checkpointBlock = rpc_conn.getblock(rpc_conn.getblockhash(nHeight), True)
checkpoint = int(checkpointBlock['acc_checkpoint'], 16)
# parse checksum and get checksumblock time
pos = vZC_DENOMS.index(utxo['denomination'])
checksum = (checkpoint >> (32 * (len(vZC_DENOMS) - 1 - pos))) & 0xFFFFFFFF
prevTime = rpc_conn.getchecksumblock(hex(checksum), utxo['denomination'], True)['time']
uniqueness = bytes.fromhex(utxo['hash stake'])[::-1]
prevouts[uniqueness] = (int(utxo["denomination"]) * COIN, utxo["serial hash"], prevTime)
return prevouts
def make_txes(self, node_id, spendingPrevOuts, to_pubKey):
""" makes a list of CTransactions each spending an input from spending PrevOuts to an output to_pubKey
:param node_id: (int) index of the CTestNode used as rpc connection. Must own spendingPrevOuts.
spendingPrevouts: ({bytes --> (int, bytes, int)} dictionary)
maps CStake "uniqueness" (i.e. serialized COutPoint -or hash stake, for zpiv-)
to (amount, prevScript, timeBlockFrom).
For zpiv prevScript is replaced with serialHash hex string.
to_pubKey (bytes) recipient public key
:return: block_txes: ([CTransaction] list)
"""
assert_greater_than(len(self.nodes), node_id)
rpc_conn = self.nodes[node_id]
block_txes = []
for uniqueness in spendingPrevOuts:
if is_zerocoin(uniqueness):
# spend zODC
_, serialHash, _ = spendingPrevOuts[uniqueness]
raw_spend = rpc_conn.createrawzerocoinspend(serialHash, "", False)
else:
# spend ODC
value_out = int(spendingPrevOuts[uniqueness][0] - DEFAULT_FEE * COIN)
scriptPubKey = CScript([to_pubKey, OP_CHECKSIG])
prevout = COutPoint()
prevout.deserialize_uniqueness(BytesIO(uniqueness))
tx = create_transaction_from_outpoint(prevout, b"", value_out, scriptPubKey)
# sign tx
raw_spend = rpc_conn.signrawtransaction(bytes_to_hex_str(tx.serialize()))['hex']
# add signed tx to the list
signed_tx = CTransaction()
signed_tx.from_hex(raw_spend)
block_txes.append(signed_tx)
return block_txes
def stake_block(self, node_id,
nHeight,
prevHhash,
stakeableUtxos,
startTime=None,
privKeyWIF=None,
vtx=[],
fDoubleSpend=False):
""" manually stakes a block selecting the coinstake input from a list of candidates
:param node_id: (int) index of the CTestNode used as rpc connection. Must own stakeableUtxos.
nHeight: (int) height of the block being produced
prevHash: (string) hex string of the previous block hash
stakeableUtxos: ({bytes --> (int, bytes, int)} dictionary)
maps CStake "uniqueness" (i.e. serialized COutPoint -or hash stake, for zpiv-)
to (amount, prevScript, timeBlockFrom).
For zpiv prevScript is replaced with serialHash hex string.
startTime: (int) epoch time to be used as blocktime (iterated in solve_stake)
privKeyWIF: (string) private key to be used for staking/signing
If empty string, it will be used the pk from the stake input
(dumping the sk from rpc_conn). If None, then the DUMMY_KEY will be used.
vtx: ([CTransaction] list) transactions to add to block.vtx
fDoubleSpend: (bool) wether any tx in vtx is allowed to spend the coinstake input
:return: block: (CBlock) block produced, must be manually relayed
"""
assert_greater_than(len(self.nodes), node_id)
rpc_conn = self.nodes[node_id]
if not len(stakeableUtxos) > 0:
raise Exception("Need at least one stakeable utxo to stake a block!")
# Get start time to stake
if startTime is None:
startTime = time.time()
# Create empty block with coinbase
nTime = int(startTime) & 0xfffffff0
coinbaseTx = create_coinbase_pos(nHeight)
block = create_block(int(prevHhash, 16), coinbaseTx, nTime)
# Find valid kernel hash - iterates stakeableUtxos, then block.nTime
block.solve_stake(stakeableUtxos)
# Check if this is a zPoS block or regular/cold stake - sign stake tx
block_sig_key = CECKey()
prevout = None
isZPoS = is_zerocoin(block.prevoutStake)
if isZPoS:
_, serialHash, _ = stakeableUtxos[block.prevoutStake]
raw_stake = rpc_conn.createrawzerocoinstake(serialHash)
stake_tx_signed_raw_hex = raw_stake["hex"]
stake_pkey = raw_stake["private-key"]
block_sig_key.set_compressed(True)
block_sig_key.set_secretbytes(bytes.fromhex(stake_pkey))
else:
coinstakeTx_unsigned = CTransaction()
prevout = COutPoint()
prevout.deserialize_uniqueness(BytesIO(block.prevoutStake))
coinstakeTx_unsigned.vin.append(CTxIn(prevout, b"", 0xffffffff))
coinstakeTx_unsigned.vout.append(CTxOut())
amount, prevScript, _ = stakeableUtxos[block.prevoutStake]
outNValue = int(amount + 250 * COIN)
coinstakeTx_unsigned.vout.append(CTxOut(outNValue, hex_str_to_bytes(prevScript)))
if privKeyWIF == "":
# Use dummy key
if not hasattr(self, 'DUMMY_KEY'):
self.init_dummy_key()
block_sig_key = self.DUMMY_KEY
# replace coinstake output script
coinstakeTx_unsigned.vout[1].scriptPubKey = CScript([block_sig_key.get_pubkey(), OP_CHECKSIG])
else:
if privKeyWIF == None:
# Use pk of the input. Ask sk from rpc_conn
rawtx = rpc_conn.getrawtransaction('{:064x}'.format(prevout.hash), True)
privKeyWIF = rpc_conn.dumpprivkey(rawtx["vout"][prevout.n]["scriptPubKey"]["addresses"][0])
# Use the provided privKeyWIF (cold staking).
# export the corresponding private key to sign block
privKey, compressed = wif_to_privkey(privKeyWIF)
block_sig_key.set_compressed(compressed)
block_sig_key.set_secretbytes(bytes.fromhex(privKey))
# Sign coinstake TX and add it to the block
stake_tx_signed_raw_hex = rpc_conn.signrawtransaction(
bytes_to_hex_str(coinstakeTx_unsigned.serialize()))['hex']
# Add coinstake to the block
coinstakeTx = CTransaction()
coinstakeTx.from_hex(stake_tx_signed_raw_hex)
block.vtx.append(coinstakeTx)
# Add provided transactions to the block.
# Don't add tx doublespending the coinstake input, unless fDoubleSpend=True
for tx in vtx:
if not fDoubleSpend:
# assume txes don't double spend zODC inputs when fDoubleSpend is false. It needs to
# be checked outside until a convenient tx.spends(zerocoin) is added to the framework.
if not isZPoS and tx.spends(prevout):
continue
block.vtx.append(tx)
# Get correct MerkleRoot and rehash block
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
# sign block with block signing key and return it
block.sign_block(block_sig_key)
return block
def stake_next_block(self, node_id,
stakeableUtxos,
btime=None,
privKeyWIF=None,
vtx=[],
fDoubleSpend=False):
""" Calls stake_block appending to the current tip"""
assert_greater_than(len(self.nodes), node_id)
nHeight = self.nodes[node_id].getblockcount()
prevHhash = self.nodes[node_id].getblockhash(nHeight)
return self.stake_block(node_id, nHeight+1, prevHhash, stakeableUtxos, btime, privKeyWIF, vtx, fDoubleSpend)
def check_tx_in_chain(self, node_id, txid):
assert_greater_than(len(self.nodes), node_id)
rawTx = self.nodes[node_id].getrawtransaction(txid, 1)
assert_greater_than(rawTx["confirmations"], 0)
def spend_inputs(self, node_id, inputs, outputs):
""" auxiliary function used by spend_utxo / spend_utxos """
assert_greater_than(len(self.nodes), node_id)
rpc_conn = self.nodes[node_id]
spendingTx = rpc_conn.createrawtransaction(inputs, outputs)
spendingTx_signed = rpc_conn.signrawtransaction(spendingTx)
if spendingTx_signed["complete"]:
txhash = rpc_conn.sendrawtransaction(spendingTx_signed["hex"])
return txhash
else:
return ""
def spend_utxo(self, node_id, utxo, recipient=''):
""" spend amount from previously unspent output to a provided address
:param node_id: (int) index of the CTestNode used as rpc connection. Must own the utxo.
utxo: (JSON) returned from listunspent used as input
recipient: (string) destination address (new one if not provided)
:return: txhash: (string) tx hash if successful, empty string otherwise
"""
assert_greater_than(len(self.nodes), node_id)
rpc_conn = self.nodes[node_id]
inputs = [{"txid": utxo["txid"], "vout": utxo["vout"]}]
out_amount = float(utxo["amount"]) - DEFAULT_FEE
outputs = {}
if recipient == '':
recipient = rpc_conn.getnewaddress()
outputs[recipient] = out_amount
return self.spend_inputs(node_id, inputs, outputs)
def spend_utxos(self, node_id, utxo_list, recipient='', fMultiple=False):
""" spend utxos to provided list of addresses or 10 new generate ones.
:param node_id: (int) index of the CTestNode used as rpc connection. Must own the utxo.
utxo_list: (JSON list) returned from listunspent used as input
recipient: (string, optional) destination address (new one if not provided)
fMultiple: (boolean, optional, default=false) spend each utxo on a different tx
:return: txHashes: (string list) list of hashes of completed txs
"""
assert_greater_than(len(self.nodes), node_id)
rpc_conn = self.nodes[node_id]
txHashes = []
# If no recipient is given, create a new one
if recipient == '':
recipient = rpc_conn.getnewaddress()
# If fMultiple=True send one tx for each utxo
if fMultiple:
for utxo in utxo_list:
txHash = self.spend_utxo(node_id, utxo, recipient)
if txHash != "":
txHashes.append(txHash)
# Otherwise make a single tx with all the inputs
else:
inputs = [{"txid": x["txid"], "vout": x["vout"]} for x in utxo_list]
out_amount = sum([float(x["amount"]) for x in utxo_list]) - DEFAULT_FEE
outputs = {}
if recipient == '':
recipient = rpc_conn.getnewaddress()
outputs[recipient] = out_amount
txHash = self.spend_inputs(node_id, inputs, outputs)
if txHash != "":
txHashes.append(txHash)
return txHashes
def generate_pos(self, node_id, btime=None):
""" stakes a block using generate on nodes[node_id]"""
assert_greater_than(len(self.nodes), node_id)
rpc_conn = self.nodes[node_id]
if btime is not None:
next_btime = btime + 60
fStaked = False
while not fStaked:
try:
rpc_conn.generate(1)
fStaked = True
except JSONRPCException as e:
if ("Couldn't create new block" in str(e)):
# couldn't generate block. check that this node can stake
ss = rpc_conn.getstakingstatus()
if not (ss["validtime"] and ss["haveconnections"] and ss["walletunlocked"] and
ss["mintablecoins"] and ss["enoughcoins"]):
raise AssertionError("Node %d unable to stake!" % node_id)
# try to stake one sec in the future
if btime is not None:
btime += 1
set_node_times(self.nodes, btime)
else:
time.sleep(1)
else:
raise e
# block generated. adjust block time
if btime is not None:
btime = max(btime + 1, next_btime)
set_node_times(self.nodes, btime)
return btime
else:
return None
def generate_pow(self, node_id, btime=None):
""" stakes a block using generate on nodes[node_id]"""
assert_greater_than(len(self.nodes), node_id)
self.nodes[node_id].generate(1)
if btime is not None:
btime += 60
set_node_times(self.nodes, btime)
return btime
def set_spork(self, node_id, sporkName, value):
assert_greater_than(len(self.nodes), node_id)
return self.nodes[node_id].spork(sporkName, value)
def get_spork(self, node_id, sporkName):
assert_greater_than(len(self.nodes), node_id)
return self.nodes[node_id].spork("show")[sporkName]
def activate_spork(self, node_id, sporkName):
return self.set_spork(node_id, sporkName, SPORK_ACTIVATION_TIME)
def deactivate_spork(self, node_id, sporkName):
return self.set_spork(node_id, sporkName, SPORK_DEACTIVATION_TIME)
def is_spork_active(self, node_id, sporkName):
assert_greater_than(len(self.nodes), node_id)
return self.nodes[node_id].spork("active")[sporkName]
### ------------------------------------------------------
class ComparisonTestFramework(OdinycoinTestFramework):
"""Test framework for doing p2p comparison testing
Sets up some odinycoind binaries:
- 1 binary: test binary
- 2 binaries: 1 test binary, 1 ref binary
- n>2 binaries: 1 test binary, n-1 ref binaries"""
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
def add_options(self, parser):
parser.add_option("--testbinary", dest="testbinary",
default=os.getenv("BITCOIND", "odinycoind"),
help="odinycoind binary to test")
parser.add_option("--refbinary", dest="refbinary",
default=os.getenv("BITCOIND", "odinycoind"),
help="odinycoind binary to use for reference nodes (if any)")
def setup_network(self):
extra_args = [['-whitelist=127.0.0.1']] * self.num_nodes
if hasattr(self, "extra_args"):
extra_args = self.extra_args
self.add_nodes(self.num_nodes, extra_args,
binary=[self.options.testbinary] +
[self.options.refbinary] * (self.num_nodes - 1))
self.start_nodes()
class SkipTest(Exception):
"""This exception is raised to skip a test"""
def __init__(self, message):
self.message = message
|
[
"[email protected]"
] | |
96ab3af6980eacbfc7d7b58bf47d470aa1ddfacf
|
06add04cbcffada7085043a75a1565d0e01cafff
|
/notion_scripts/not_goals.py
|
67c2cc703a4578364b23b72938d43c6cd2e0da0a
|
[] |
no_license
|
josancamon19/productivity-apps-wrapper
|
0c405726e9dbdc0de3db4f6e59d55f8e2e8f20ab
|
7d99836bda716deca90f4592a95b84786a44dc9f
|
refs/heads/main
| 2023-06-15T12:34:39.698296 | 2021-07-07T17:44:22 | 2021-07-07T17:44:22 | 374,472,485 | 22 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,944 |
py
|
import os
import requests
from . import utils, unofficial_api_utils, not_todoist
from integrations.todoist import get_goals_sections,detail_completed_tasks
database_goals = os.getenv('NOTION_GOALS_DB')
def get_db_added_goals():
data = requests.post(f'{utils.base_url}databases/{database_goals}/query', json={}, headers=utils.headers).json()
ids = [task['properties']['Id']['number'] for task in data['results']]
while cursor := data['next_cursor']:
data = requests.post(f'{utils.base_url}databases/{database_goals}/query', json={'start_cursor': cursor, 'page_size': 100},
headers=utils.headers).json()
ids += [task['properties']['Id']['number'] for task in data['results']]
return ids
def save_goals_tasks(tasks: list):
already_added = get_db_added_goals()
sections = get_goals_sections()
print(sections)
unofficial_api_utils.synchronize_goals(sections)
for task in detail_completed_tasks(tasks):
if task['id'] in already_added:
continue
data = {'parent': {'type': 'database_id', 'database_id': database_goals},
'properties': {
"Task": {
"type": "title",
"title": [{"type": "text", "text": {"content": task['content']}}]
},
"Goal": utils.map_select_value(str(task['section'])),
"Date Completion": {"type": "date", "date": {"start": task['date_completed']}},
"Id": utils.map_number_value(task['id']),
}}
result = requests.post(f'{utils.base_url}pages', json=data, headers=utils.headers)
if result.status_code >= 300:
print(result, result.content)
return
page_id = result.json().get('id')
not_todoist.add_notes_to_page(page_id, task['notes'])
|
[
"[email protected]"
] | |
d000e6a1545f8e7f4b807ad2095f24f6984d908d
|
9b01f7d430f7ee87217618cfa4567f42635e8923
|
/22-06-2017/cloudformation/nginx-demo-1/ansible/.env/lib/python2.7/site-packages/ansible/executor/play_iterator.py
|
2c3559526d65be021933166f909601d279f584c3
|
[] |
no_license
|
awsusergroupsantiago/demos
|
ccb045545d2a407a39d865cf19800d2b6d284b8f
|
e7f0dc8d9a4e8f2547c33a5a294fd76bf3ac9c9c
|
refs/heads/master
| 2022-04-30T23:43:30.646556 | 2020-08-08T01:35:40 | 2020-08-08T01:35:40 | 95,129,959 | 2 | 0 | null | 2022-03-29T21:54:09 | 2017-06-22T15:29:25 |
Python
|
UTF-8
|
Python
| false | false | 27,214 |
py
|
# (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import fnmatch
from ansible.compat.six import iteritems
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.module_utils.six import cmp
from ansible.playbook.block import Block
from ansible.playbook.task import Task
from ansible.playbook.role_include import IncludeRole
from ansible.utils.boolean import boolean
__all__ = ['PlayIterator']
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class HostState:
def __init__(self, blocks):
self._blocks = blocks[:]
self.cur_block = 0
self.cur_regular_task = 0
self.cur_rescue_task = 0
self.cur_always_task = 0
self.cur_dep_chain = None
self.run_state = PlayIterator.ITERATING_SETUP
self.fail_state = PlayIterator.FAILED_NONE
self.pending_setup = False
self.tasks_child_state = None
self.rescue_child_state = None
self.always_child_state = None
self.did_rescue = False
self.did_start_at_task = False
def __repr__(self):
return "HostState(%r)" % self._blocks
def __str__(self):
def _run_state_to_string(n):
states = ["ITERATING_SETUP", "ITERATING_TASKS", "ITERATING_RESCUE", "ITERATING_ALWAYS", "ITERATING_COMPLETE"]
try:
return states[n]
except IndexError:
return "UNKNOWN STATE"
def _failed_state_to_string(n):
states = {1:"FAILED_SETUP", 2:"FAILED_TASKS", 4:"FAILED_RESCUE", 8:"FAILED_ALWAYS"}
if n == 0:
return "FAILED_NONE"
else:
ret = []
for i in (1, 2, 4, 8):
if n & i:
ret.append(states[i])
return "|".join(ret)
return "HOST STATE: block=%d, task=%d, rescue=%d, always=%d, run_state=%s, fail_state=%s, pending_setup=%s, tasks child state? (%s), rescue child state? (%s), always child state? (%s), did rescue? %s, did start at task? %s" % (
self.cur_block,
self.cur_regular_task,
self.cur_rescue_task,
self.cur_always_task,
_run_state_to_string(self.run_state),
_failed_state_to_string(self.fail_state),
self.pending_setup,
self.tasks_child_state,
self.rescue_child_state,
self.always_child_state,
self.did_rescue,
self.did_start_at_task,
)
def __eq__(self, other):
if not isinstance(other, HostState):
return False
for attr in (
'_blocks', 'cur_block', 'cur_regular_task', 'cur_rescue_task', 'cur_always_task',
'run_state', 'fail_state', 'pending_setup', 'cur_dep_chain',
'tasks_child_state', 'rescue_child_state', 'always_child_state'
):
if getattr(self, attr) != getattr(other, attr):
return False
return True
def get_current_block(self):
return self._blocks[self.cur_block]
def copy(self):
new_state = HostState(self._blocks)
new_state.cur_block = self.cur_block
new_state.cur_regular_task = self.cur_regular_task
new_state.cur_rescue_task = self.cur_rescue_task
new_state.cur_always_task = self.cur_always_task
new_state.run_state = self.run_state
new_state.fail_state = self.fail_state
new_state.pending_setup = self.pending_setup
new_state.did_rescue = self.did_rescue
new_state.did_start_at_task = self.did_start_at_task
if self.cur_dep_chain is not None:
new_state.cur_dep_chain = self.cur_dep_chain[:]
if self.tasks_child_state is not None:
new_state.tasks_child_state = self.tasks_child_state.copy()
if self.rescue_child_state is not None:
new_state.rescue_child_state = self.rescue_child_state.copy()
if self.always_child_state is not None:
new_state.always_child_state = self.always_child_state.copy()
return new_state
class PlayIterator:
# the primary running states for the play iteration
ITERATING_SETUP = 0
ITERATING_TASKS = 1
ITERATING_RESCUE = 2
ITERATING_ALWAYS = 3
ITERATING_COMPLETE = 4
# the failure states for the play iteration, which are powers
# of 2 as they may be or'ed together in certain circumstances
FAILED_NONE = 0
FAILED_SETUP = 1
FAILED_TASKS = 2
FAILED_RESCUE = 4
FAILED_ALWAYS = 8
def __init__(self, inventory, play, play_context, variable_manager, all_vars, start_at_done=False):
self._play = play
self._blocks = []
self._task_uuid_cache = dict()
# Default options to gather
gather_subset = C.DEFAULT_GATHER_SUBSET
gather_timeout = C.DEFAULT_GATHER_TIMEOUT
# Retrieve subset to gather
if self._play.gather_subset is not None:
gather_subset = self._play.gather_subset
# Retrieve timeout for gather
if self._play.gather_timeout is not None:
gather_timeout = self._play.gather_timeout
setup_block = Block(play=self._play)
setup_task = Task(block=setup_block)
setup_task.action = 'setup'
setup_task.tags = ['always']
setup_task.args = {
'gather_subset': gather_subset,
}
if gather_timeout:
setup_task.args['gather_timeout'] = gather_timeout
setup_task.set_loader(self._play._loader)
# short circuit fact gathering if the entire playbook is conditional
if self._play._included_conditional is not None:
setup_task.when = self._play._included_conditional[:]
setup_block.block = [setup_task]
setup_block = setup_block.filter_tagged_tasks(play_context, all_vars)
self._blocks.append(setup_block)
self.cache_block_tasks(setup_block)
for block in self._play.compile():
new_block = block.filter_tagged_tasks(play_context, all_vars)
if new_block.has_tasks():
self.cache_block_tasks(new_block)
self._blocks.append(new_block)
for handler_block in self._play.handlers:
self.cache_block_tasks(handler_block)
self._host_states = {}
start_at_matched = False
for host in inventory.get_hosts(self._play.hosts):
self._host_states[host.name] = HostState(blocks=self._blocks)
# if the host's name is in the variable manager's fact cache, then set
# its _gathered_facts flag to true for smart gathering tests later
if host.name in variable_manager._fact_cache and variable_manager._fact_cache.get(host.name).get('module_setup', False):
host._gathered_facts = True
# if we're looking to start at a specific task, iterate through
# the tasks for this host until we find the specified task
if play_context.start_at_task is not None and not start_at_done:
while True:
(s, task) = self.get_next_task_for_host(host, peek=True)
if s.run_state == self.ITERATING_COMPLETE:
break
if task.name == play_context.start_at_task or fnmatch.fnmatch(task.name, play_context.start_at_task) or \
task.get_name() == play_context.start_at_task or fnmatch.fnmatch(task.get_name(), play_context.start_at_task):
start_at_matched = True
break
else:
self.get_next_task_for_host(host)
# finally, reset the host's state to ITERATING_SETUP
if start_at_matched:
self._host_states[host.name].did_start_at_task = True
self._host_states[host.name].run_state = self.ITERATING_SETUP
if start_at_matched:
# we have our match, so clear the start_at_task field on the
# play context to flag that we've started at a task (and future
# plays won't try to advance)
play_context.start_at_task = None
def get_host_state(self, host):
# Since we're using the PlayIterator to carry forward failed hosts,
# in the event that a previous host was not in the current inventory
# we create a stub state for it now
if host.name not in self._host_states:
self._host_states[host.name] = HostState(blocks=[])
return self._host_states[host.name].copy()
def cache_block_tasks(self, block):
def _cache_portion(p):
for t in p:
if isinstance(t, Block):
self.cache_block_tasks(t)
elif t._uuid not in self._task_uuid_cache:
self._task_uuid_cache[t._uuid] = t
for portion in (block.block, block.rescue, block.always):
if portion is not None:
_cache_portion(portion)
def get_next_task_for_host(self, host, peek=False):
display.debug("getting the next task for host %s" % host.name)
s = self.get_host_state(host)
task = None
if s.run_state == self.ITERATING_COMPLETE:
display.debug("host %s is done iterating, returning" % host.name)
return (s, None)
old_s = s
(s, task) = self._get_next_task_from_state(s, host=host, peek=peek)
if not peek:
self._host_states[host.name] = s
display.debug("done getting next task for host %s" % host.name)
display.debug(" ^ task is: %s" % task)
display.debug(" ^ state is: %s" % s)
return (s, task)
def _get_next_task_from_state(self, state, host, peek, in_child=False):
task = None
# try and find the next task, given the current state.
while True:
# try to get the current block from the list of blocks, and
# if we run past the end of the list we know we're done with
# this block
try:
block = state._blocks[state.cur_block]
except IndexError:
state.run_state = self.ITERATING_COMPLETE
return (state, None)
if state.run_state == self.ITERATING_SETUP:
# First, we check to see if we were pending setup. If not, this is
# the first trip through ITERATING_SETUP, so we set the pending_setup
# flag and try to determine if we do in fact want to gather facts for
# the specified host.
if not state.pending_setup:
state.pending_setup = True
# Gather facts if the default is 'smart' and we have not yet
# done it for this host; or if 'explicit' and the play sets
# gather_facts to True; or if 'implicit' and the play does
# NOT explicitly set gather_facts to False.
gathering = C.DEFAULT_GATHERING
implied = self._play.gather_facts is None or boolean(self._play.gather_facts)
if (gathering == 'implicit' and implied) or \
(gathering == 'explicit' and boolean(self._play.gather_facts)) or \
(gathering == 'smart' and implied and not host._gathered_facts):
# The setup block is always self._blocks[0], as we inject it
# during the play compilation in __init__ above.
setup_block = self._blocks[0]
if setup_block.has_tasks() and len(setup_block.block) > 0:
task = setup_block.block[0]
if not peek:
# mark the host as having gathered facts, because we're
# returning the setup task to be executed
host.set_gathered_facts(True)
else:
# This is the second trip through ITERATING_SETUP, so we clear
# the flag and move onto the next block in the list while setting
# the run state to ITERATING_TASKS
state.pending_setup = False
state.run_state = self.ITERATING_TASKS
if not state.did_start_at_task:
state.cur_block += 1
state.cur_regular_task = 0
state.cur_rescue_task = 0
state.cur_always_task = 0
state.child_state = None
elif state.run_state == self.ITERATING_TASKS:
# clear the pending setup flag, since we're past that and it didn't fail
if state.pending_setup:
state.pending_setup = False
# First, we check for a child task state that is not failed, and if we
# have one recurse into it for the next task. If we're done with the child
# state, we clear it and drop back to geting the next task from the list.
if state.tasks_child_state:
(state.tasks_child_state, task) = self._get_next_task_from_state(state.tasks_child_state, host=host, peek=peek, in_child=True)
if self._check_failed_state(state.tasks_child_state):
# failed child state, so clear it and move into the rescue portion
state.tasks_child_state = None
self._set_failed_state(state)
else:
# get the next task recursively
if task is None or state.tasks_child_state.run_state == self.ITERATING_COMPLETE:
# we're done with the child state, so clear it and continue
# back to the top of the loop to get the next task
state.tasks_child_state = None
continue
else:
# First here, we check to see if we've failed anywhere down the chain
# of states we have, and if so we move onto the rescue portion. Otherwise,
# we check to see if we've moved past the end of the list of tasks. If so,
# we move into the always portion of the block, otherwise we get the next
# task from the list.
if self._check_failed_state(state):
state.run_state = self.ITERATING_RESCUE
elif state.cur_regular_task >= len(block.block):
state.run_state = self.ITERATING_ALWAYS
else:
task = block.block[state.cur_regular_task]
# if the current task is actually a child block, create a child
# state for us to recurse into on the next pass
if isinstance(task, Block) or state.tasks_child_state is not None:
state.tasks_child_state = HostState(blocks=[task])
state.tasks_child_state.run_state = self.ITERATING_TASKS
# since we've created the child state, clear the task
# so we can pick up the child state on the next pass
task = None
state.cur_regular_task += 1
elif state.run_state == self.ITERATING_RESCUE:
# The process here is identical to ITERATING_TASKS, except instead
# we move into the always portion of the block.
if state.rescue_child_state:
(state.rescue_child_state, task) = self._get_next_task_from_state(state.rescue_child_state, host=host, peek=peek, in_child=True)
if self._check_failed_state(state.rescue_child_state):
state.rescue_child_state = None
self._set_failed_state(state)
else:
if task is None or state.rescue_child_state.run_state == self.ITERATING_COMPLETE:
state.rescue_child_state = None
continue
else:
if state.fail_state & self.FAILED_RESCUE == self.FAILED_RESCUE:
state.run_state = self.ITERATING_ALWAYS
elif state.cur_rescue_task >= len(block.rescue):
if len(block.rescue) > 0:
state.fail_state = self.FAILED_NONE
state.run_state = self.ITERATING_ALWAYS
state.did_rescue = True
else:
task = block.rescue[state.cur_rescue_task]
if isinstance(task, Block) or state.rescue_child_state is not None:
state.rescue_child_state = HostState(blocks=[task])
state.rescue_child_state.run_state = self.ITERATING_TASKS
task = None
state.cur_rescue_task += 1
elif state.run_state == self.ITERATING_ALWAYS:
# And again, the process here is identical to ITERATING_TASKS, except
# instead we either move onto the next block in the list, or we set the
# run state to ITERATING_COMPLETE in the event of any errors, or when we
# have hit the end of the list of blocks.
if state.always_child_state:
(state.always_child_state, task) = self._get_next_task_from_state(state.always_child_state, host=host, peek=peek, in_child=True)
if self._check_failed_state(state.always_child_state):
state.always_child_state = None
self._set_failed_state(state)
else:
if task is None or state.always_child_state.run_state == self.ITERATING_COMPLETE:
state.always_child_state = None
continue
else:
if state.cur_always_task >= len(block.always):
if state.fail_state != self.FAILED_NONE:
state.run_state = self.ITERATING_COMPLETE
else:
state.cur_block += 1
state.cur_regular_task = 0
state.cur_rescue_task = 0
state.cur_always_task = 0
state.run_state = self.ITERATING_TASKS
state.tasks_child_state = None
state.rescue_child_state = None
state.always_child_state = None
state.did_rescue = False
# we're advancing blocks, so if this was an end-of-role block we
# mark the current role complete
if block._eor and host.name in block._role._had_task_run and not in_child:
block._role._completed[host.name] = True
else:
task = block.always[state.cur_always_task]
if isinstance(task, Block) or state.always_child_state is not None:
state.always_child_state = HostState(blocks=[task])
state.always_child_state.run_state = self.ITERATING_TASKS
task = None
state.cur_always_task += 1
elif state.run_state == self.ITERATING_COMPLETE:
return (state, None)
# if something above set the task, break out of the loop now
if task:
break
return (state, task)
def _set_failed_state(self, state):
if state.run_state == self.ITERATING_SETUP:
state.fail_state |= self.FAILED_SETUP
state.run_state = self.ITERATING_COMPLETE
elif state.run_state == self.ITERATING_TASKS:
if state.tasks_child_state is not None:
state.tasks_child_state = self._set_failed_state(state.tasks_child_state)
else:
state.fail_state |= self.FAILED_TASKS
if state._blocks[state.cur_block].rescue:
state.run_state = self.ITERATING_RESCUE
elif state._blocks[state.cur_block].always:
state.run_state = self.ITERATING_ALWAYS
else:
state.run_state = self.ITERATING_COMPLETE
elif state.run_state == self.ITERATING_RESCUE:
if state.rescue_child_state is not None:
state.rescue_child_state = self._set_failed_state(state.rescue_child_state)
else:
state.fail_state |= self.FAILED_RESCUE
if state._blocks[state.cur_block].always:
state.run_state = self.ITERATING_ALWAYS
else:
state.run_state = self.ITERATING_COMPLETE
elif state.run_state == self.ITERATING_ALWAYS:
if state.always_child_state is not None:
state.always_child_state = self._set_failed_state(state.always_child_state)
else:
state.fail_state |= self.FAILED_ALWAYS
state.run_state = self.ITERATING_COMPLETE
return state
def mark_host_failed(self, host):
s = self.get_host_state(host)
display.debug("marking host %s failed, current state: %s" % (host, s))
s = self._set_failed_state(s)
display.debug("^ failed state is now: %s" % s)
self._host_states[host.name] = s
self._play._removed_hosts.append(host.name)
def get_failed_hosts(self):
return dict((host, True) for (host, state) in iteritems(self._host_states) if self._check_failed_state(state))
def _check_failed_state(self, state):
if state is None:
return False
elif state.run_state == self.ITERATING_RESCUE and self._check_failed_state(state.rescue_child_state):
return True
elif state.run_state == self.ITERATING_ALWAYS and self._check_failed_state(state.always_child_state):
return True
elif state.fail_state != self.FAILED_NONE:
if state.run_state == self.ITERATING_RESCUE and state.fail_state&self.FAILED_RESCUE == 0:
return False
elif state.run_state == self.ITERATING_ALWAYS and state.fail_state&self.FAILED_ALWAYS == 0:
return False
else:
return not state.did_rescue
elif state.run_state == self.ITERATING_TASKS and self._check_failed_state(state.tasks_child_state):
cur_block = self._blocks[state.cur_block]
if len(cur_block.rescue) > 0 and state.fail_state & self.FAILED_RESCUE == 0:
return False
else:
return True
return False
def is_failed(self, host):
s = self.get_host_state(host)
return self._check_failed_state(s)
def get_original_task(self, host, task):
'''
Finds the task in the task list which matches the UUID of the given task.
The executor engine serializes/deserializes objects as they are passed through
the different processes, and not all data structures are preserved. This method
allows us to find the original task passed into the executor engine.
'''
if isinstance(task, Task):
the_uuid = task._uuid
else:
the_uuid = task
return self._task_uuid_cache.get(the_uuid, None)
def _insert_tasks_into_state(self, state, task_list):
# if we've failed at all, or if the task list is empty, just return the current state
if state.fail_state != self.FAILED_NONE and state.run_state not in (self.ITERATING_RESCUE, self.ITERATING_ALWAYS) or not task_list:
return state
if state.run_state == self.ITERATING_TASKS:
if state.tasks_child_state:
state.tasks_child_state = self._insert_tasks_into_state(state.tasks_child_state, task_list)
else:
target_block = state._blocks[state.cur_block].copy(exclude_parent=True)
before = target_block.block[:state.cur_regular_task]
after = target_block.block[state.cur_regular_task:]
target_block.block = before + task_list + after
state._blocks[state.cur_block] = target_block
elif state.run_state == self.ITERATING_RESCUE:
if state.rescue_child_state:
state.rescue_child_state = self._insert_tasks_into_state(state.rescue_child_state, task_list)
else:
target_block = state._blocks[state.cur_block].copy(exclude_parent=True)
before = target_block.rescue[:state.cur_rescue_task]
after = target_block.rescue[state.cur_rescue_task:]
target_block.rescue = before + task_list + after
state._blocks[state.cur_block] = target_block
elif state.run_state == self.ITERATING_ALWAYS:
if state.always_child_state:
state.always_child_state = self._insert_tasks_into_state(state.always_child_state, task_list)
else:
target_block = state._blocks[state.cur_block].copy(exclude_parent=True)
before = target_block.always[:state.cur_always_task]
after = target_block.always[state.cur_always_task:]
target_block.always = before + task_list + after
state._blocks[state.cur_block] = target_block
return state
def add_tasks(self, host, task_list):
for b in task_list:
self.cache_block_tasks(b)
self._host_states[host.name] = self._insert_tasks_into_state(self.get_host_state(host), task_list)
|
[
"[email protected]"
] | |
17c0f628eec50d0bb168010b4109176082c7f0e8
|
1ea0e2b4f064ba0de45a73c527ee89a36771e8fc
|
/src/sentry/south_migrations/0005_auto.py
|
da6dbb394a1b0bfafb965133f97ea71ddf694688
|
[
"BSD-2-Clause"
] |
permissive
|
atlassian/sentry
|
6775e59c317f20f96982e91c2b3c88c02ecbb56b
|
b937615079d7b24dc225a83b99b1b65da932fc66
|
refs/heads/master
| 2023-08-27T15:45:47.699173 | 2017-09-18T22:14:55 | 2017-09-18T22:14:55 | 103,999,066 | 1 | 5 |
BSD-3-Clause
| 2023-04-01T07:49:37 | 2017-09-18T22:38:18 |
Python
|
UTF-8
|
Python
| false | false | 6,423 |
py
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding index on 'GroupedMessage', fields ['status']
db.create_index('sentry_groupedmessage', ['status'])
def backwards(self, orm):
# Removing index on 'GroupedMessage', fields ['status']
db.delete_index('sentry_groupedmessage', ['status'])
models = {
'sentry.filtervalue': {
'Meta': {
'unique_together': "(('key', 'value'),)",
'object_name': 'FilterValue'
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'value': ('django.db.models.fields.CharField', [], {
'max_length': '255'
})
},
'sentry.groupedmessage': {
'Meta': {
'unique_together': "(('logger', 'view', 'checksum'),)",
'object_name': 'GroupedMessage'
},
'checksum':
('django.db.models.fields.CharField', [], {
'max_length': '32',
'db_index': 'True'
}),
'class_name': (
'django.db.models.fields.CharField', [], {
'db_index': 'True',
'max_length': '128',
'null': 'True',
'blank': 'True'
}
),
'first_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'last_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'level': (
'django.db.models.fields.PositiveIntegerField', [], {
'default': '40',
'db_index': 'True',
'blank': 'True'
}
),
'logger': (
'django.db.models.fields.CharField', [], {
'default': "'root'",
'max_length': '64',
'db_index': 'True',
'blank': 'True'
}
),
'message': ('django.db.models.fields.TextField', [], {}),
'status': (
'django.db.models.fields.PositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {
'default': '1'
}),
'traceback':
('django.db.models.fields.TextField', [], {
'null': 'True',
'blank': 'True'
}),
'view': (
'django.db.models.fields.CharField', [], {
'db_index': 'True',
'max_length': '255',
'null': 'True',
'blank': 'True'
}
)
},
'sentry.message': {
'Meta': {
'object_name': 'Message'
},
'checksum':
('django.db.models.fields.CharField', [], {
'max_length': '32',
'db_index': 'True'
}),
'class_name': (
'django.db.models.fields.CharField', [], {
'db_index': 'True',
'max_length': '128',
'null': 'True',
'blank': 'True'
}
),
'data': ('django.db.models.fields.TextField', [], {
'null': 'True',
'blank': 'True'
}),
'datetime': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'group': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'blank': 'True',
'related_name': "'message_set'",
'null': 'True',
'to': "orm['sentry.GroupedMessage']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'level': (
'django.db.models.fields.PositiveIntegerField', [], {
'default': '40',
'db_index': 'True',
'blank': 'True'
}
),
'logger': (
'django.db.models.fields.CharField', [], {
'default': "'root'",
'max_length': '64',
'db_index': 'True',
'blank': 'True'
}
),
'message': ('django.db.models.fields.TextField', [], {}),
'server_name':
('django.db.models.fields.CharField', [], {
'max_length': '128',
'db_index': 'True'
}),
'traceback':
('django.db.models.fields.TextField', [], {
'null': 'True',
'blank': 'True'
}),
'url': (
'django.db.models.fields.URLField', [], {
'max_length': '200',
'null': 'True',
'blank': 'True'
}
),
'view': (
'django.db.models.fields.CharField', [], {
'db_index': 'True',
'max_length': '255',
'null': 'True',
'blank': 'True'
}
)
}
}
complete_apps = ['sentry']
|
[
"[email protected]"
] | |
441a838a11222598abb6d464a17d3eea057ef15f
|
aed3fa9b75944995f8441c28e3ef74cfe130a004
|
/test/conftest.py
|
1481114b302b275160a0f69af8d541512635c1af
|
[] |
no_license
|
kziolkowska/PyEMMA_IPython
|
ca95fadc5cfa7c7808592668abd6b2e080498838
|
7105ec66091df3ff9c1a5f0c8babbc0ec1b3f104
|
refs/heads/master
| 2020-12-24T22:21:03.405851 | 2015-02-26T09:07:18 | 2015-02-26T09:07:18 | 31,358,905 | 0 | 0 | null | 2015-02-26T09:02:18 | 2015-02-26T09:02:18 | null |
UTF-8
|
Python
| false | false | 291 |
py
|
'''
Created on Jun 20, 2014
@author: marscher
'''
def pytest_pycollect_makeitem(collector, name, obj):
"""
this is a hook for pytest to enforce the dynamic generated testcases
of 'TestNotebooks' are initialized.
"""
if name == 'TestNotebooks':
obj.setUpClass()
|
[
"[email protected]"
] | |
d1947ffbf45071eabaa38c2962280ba3fd7b0196
|
ff9fd1bae4ea538fcae66df96b4acc1abdef1556
|
/fabnet/mgmt/mgmt_db.py
|
84e7c574f548521ba85fe95e832c70da12c3d242
|
[] |
no_license
|
fabregas/fabnet
|
640b6cf9d96ef477f8db5038881bd448dfcbc2a1
|
6ad7f0791ca0fd08dcbbfc49f785ef634e5fcffa
|
refs/heads/master
| 2021-01-10T20:22:20.107342 | 2014-02-17T16:51:30 | 2014-02-17T16:51:30 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,142 |
py
|
#!/usr/bin/python
"""
Copyright (C) 2013 Konstantin Andrusenko
See the documentation for further information on copyrights,
or contact the author. All Rights Reserved.
@package fabnet.mgmt.mgmt_db
@author Konstantin Andrusenko
@date July 24, 2013
This module contains the implementation of MgmtDatabaseManager class
"""
import hashlib
from datetime import datetime
from pymongo import MongoClient
from pymongo.errors import ConnectionFailure
from fabnet.mgmt.exceptions import *
from fabnet.mgmt.constants import *
class MgmtDatabaseManager:
MGMT_DB_NAME = 'fabnet_mgmt_db'
def __init__(self, conn_str):
try:
self.__client = MongoClient(conn_str)
except ConnectionFailure, err:
raise MEDatabaseException('No database connection! Details: %s'%err)
self.__mgmt_db = self.__client[self.MGMT_DB_NAME]
self.__check_users()
def __check_users(self):
users_cnt = self.__mgmt_db[DBK_USERS].find({}).count()
if users_cnt:
return
self.create_user('admin', hashlib.sha1('admin').hexdigest(), [ROLE_UM])
def close(self):
self.__client.close()
def get_cluster_config(self):
config = self.__mgmt_db[DBK_CLUSTER_CONFIG].find_one({})
if not config:
return {}
return config
def set_cluster_config(self, config):
old_config = self.__mgmt_db[DBK_CLUSTER_CONFIG].find_one({})
if old_config:
old_config.update(config)
config = old_config
self.__mgmt_db[DBK_CLUSTER_CONFIG].update({}, config, upsert=True)
def get_user_info(self, username):
user = self.__mgmt_db[DBK_USERS].find_one({DBK_USERNAME: username})
return user
def __validate(self, value, c_type, minlen=None, val_name=None, possible_vals=None):
if not val_name:
val_name = value
if not isinstance(value, c_type):
raise MEInvalidArgException('"%s" should be an instance of %s (%s occured)'\
%(val_name, c_type, type(value)))
if minlen and len(value) < minlen:
raise MEInvalidArgException('len(%s) < %s raised'%(val_name, minlen))
if possible_vals:
if type(value) not in (list, tuple):
value = [value]
for item in value:
if item not in possible_vals:
raise MEInvalidArgException('"%s" does not supported! possible values: %s'\
%(item, possible_vals))
def create_user(self, username, pwd_hash, roles):
user = self.get_user_info(username)
if user:
raise MEAlreadyExistsException('User "%s" is already exists'%username)
self.__validate(username, str, minlen=3, val_name='user_name')
self.__validate(pwd_hash, str, minlen=1, val_name='password_hash')
self.__validate(roles, list, minlen=1, val_name='roles', possible_vals=ROLES_DESC.keys())
user = {DBK_USERNAME: username,
DBK_USER_PWD_HASH: pwd_hash,
DBK_ROLES: roles}
self.__mgmt_db[DBK_USERS].insert(user)
def remove_user(self, username):
self.__mgmt_db[DBK_USERS].remove({DBK_USERNAME: username})
def update_user_info(self, username, pwd_hash=None, roles=None):
user = self.__mgmt_db[DBK_USERS].find_one({DBK_USERNAME: username})
if not user:
raise MENotFoundException('User "%s" does not found!'%username)
if pwd_hash:
self.__validate(pwd_hash, str, minlen=1, val_name='password_hash')
user[DBK_USER_PWD_HASH] = pwd_hash
if roles:
self.__validate(roles, list, minlen=1, val_name='roles', possible_vals=ROLES_DESC.keys())
user[DBK_ROLES] = roles
self.__mgmt_db[DBK_USERS].update({DBK_USERNAME: username}, user)
def add_session(self, session_id, username):
self.__mgmt_db[DBK_SESSIONS].insert({DBK_ID: session_id, \
DBK_USERNAME: username, \
DBK_START_DT: datetime.now()})
def del_session(self, session_id):
self.__mgmt_db[DBK_SESSIONS].remove({DBK_ID: session_id})
def get_user_by_session(self, session_id):
session = self.__mgmt_db[DBK_SESSIONS].find_one({DBK_ID: session_id})
if not session:
return None
username = session[DBK_USERNAME]
user = self.get_user_info(username)
if not user:
return None
return user
def get_user_last_session(self, username):
sessions = self.__mgmt_db[DBK_SESSIONS].find({DBK_USERNAME: username}).sort([(DBK_START_DT, -1)])
for session in sessions:
return session
return None
def append_node(self, node_name, node_type, node_address):
self.__mgmt_db[DBK_NODES].insert({DBK_ID: node_name, \
DBK_NODETYPE: node_type, \
DBK_NODEADDR: node_address, \
DBK_INSTALLDATE: datetime.now()})
|
[
"[email protected]"
] | |
4d6735177428f2b7d045beca5a3d4f61592dca17
|
98c6ea9c884152e8340605a706efefbea6170be5
|
/examples/data/Assignment_1/gdnzik001/question3.py
|
0e872bf0558184090e56ea1fc1a16f0480345517
|
[] |
no_license
|
MrHamdulay/csc3-capstone
|
479d659e1dcd28040e83ebd9e3374d0ccc0c6817
|
6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2
|
refs/heads/master
| 2021-03-12T21:55:57.781339 | 2014-09-22T02:22:22 | 2014-09-22T02:22:22 | 22,372,174 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 820 |
py
|
#Zikho Godana GDNZIK001
#4 march 2014
#program to generate a spam message
first_name=input("Enter first name:\n")
last_name=input("Enter last name:\n")
money=eval(input("Enter sum of money in USD:\n"))
country=input("Enter country name:\n")
print("\nDearest ",first_name,"\nIt is with a heavy heart that I inform you of the death of my father,\nGeneral Fayk ",last_name,", your long lost relative from Mapsfostol.\nMy father left the sum of ",money,"USD for us, your distant cousins. \nUnfortunately, we cannot access the money as it is in a bank in ", country,".\nI desperately need your assistance to access this money.\nI will even pay you generously, 30% of the amount - ",(money*0.3),"USD,\nfor your help. Please get in touch with me at this email address asap.\nYours sincerely \nFrank ",last_name,sep="")
|
[
"[email protected]"
] | |
1408f27ecce4aa24ccc4ea574ef3aeb35d24887f
|
4b6046439b3411cbc30276e00ad712a3d4c95768
|
/pysaga/commons.py
|
b09fb685312fab7a85a2babab18b4eda9a5bcd58
|
[] |
no_license
|
Aluriak/pysaga
|
4c731a5d404d0ca0087997af4315e69ecf14fc37
|
54320e5b65a73e793ad18c306b9c004cf8cb0593
|
refs/heads/master
| 2020-03-30T00:09:21.380417 | 2018-09-27T13:21:22 | 2018-09-27T13:21:22 | 150,507,760 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 885 |
py
|
"""Various general definitions.
"""
import re
from . import utils
TEMPLATE_PATH = 'sagas-mp3/{saga_name}/Textes/'
REFWORDS = 'REF', 'LOL', 'TRI', 'JDM'
REG_CHAPTER = re.compile(r"^Chapitre ([0-9]+) - (.+)$")
REG_CHARACTER = re.compile(r"^([A-Z0-9ÉÈÊÀÄÂÔÛÏ.,!?' -]+) : ?")
REG_LINE = re.compile(r"^([A-Z0-9ÉÈÊÀÄÂÔÛÏ.,!?' -]+) : (.+)")
UNPARSABLES = {
TEMPLATE_PATH.format(saga_name='Reflets') + 'Fleau.html',
}
assert REG_CHAPTER.fullmatch('Chapitre 1 - Introduction à la quête')
SAGA_NAME = {
'Reflets': "Reflets d'Acide",
'Xantah': "La Légende de Xantah",
'Adoprixtoxis': "Adoprixtoxis",
}
SAGA_ALIASES = utils.reverse_multivalues_dict({
'Reflets': ('rda', 'reflets', 'reflet', 'reflets d\'acide', 'reflet d\'acide'),
'Xantah': ('xantah', 'xantha', 'xant'),
'Adoprixtoxis': ('adoprixtoxis', 'adop'),
}, unique_value=True)
|
[
"[email protected]"
] | |
3a99c35b78a898a0605d35c6f449c950a1532dc9
|
53e8c9feacc101f3bfefe013808a778c81a0aca7
|
/my_words/nounce.py
|
b536f3cbe57a331ec82d779d2f32f15d3e6b3e04
|
[] |
no_license
|
chouchouyu/my_words
|
0cc98c2b1e66b38c0311f871f390932215faecde
|
2227fcb28bd49f2daca8fcf0bfa8645a95d434c9
|
refs/heads/master
| 2020-11-24T10:42:17.802396 | 2019-12-02T00:35:41 | 2019-12-02T00:35:41 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,580 |
py
|
英语词源字典
nounc
announce, 宣布
【发音释义】, [ə'naʊns]vt, 宣布, 述说, 预示, 播报vi, 宣布参加竞选, 当播音员
【结构分析】, announce = an(=ad, 去)nounce(报告)→宣布
词源解析, nounce←拉丁语nuntiare(报告, 叙述)←拉丁语nuntius(信使)
enunciate(宣布), renounce(宣布放弃), denounce(谴责), pronounce(发音, 宣判)
【衍生词】, announcement(宣布), announcer(广播员)
announce, 通知
前缀an同ad, 词根nounce, 通知, 见pronounce, 通知,
denounce, 谴责
de, 向下, 强调, nounce, 通知, 呼喊, 词源同announce, pronounce, 即向下喊, 引申词义谴责,
mispronounce, 发音不当
mis, 坏的, 错的, 不好的, pronounce, 发音,
announce, 宣布, 通告, 发表
词根词缀, an加强意义nounc通知e
announcement, 通[布, 预]告, 告示, 声明
词根词缀, an加强意义nounc通知ement
denounce, 公开指责, 公然抨击, 谴责, 告发
词根词缀, de下降, 向下, 减少nounc通知e
announce, 宣佈
denounce, 聲討
pronounce, 發音
renounce, 放棄
# announce,宣佈,顯示,預告,當報幕員,宣佈參加競選
denounce - -0, 告發, 公然抨擊, 譴責, 向下宣佈譴責
denunciation, 記公開指責, 公開指責, 指控, 類, 苛責是十分譴責駁斥是十分否定贊頌優點, 痛罵指責, 反, 贊詞, 頌詞, 貢品, 頌詞, 贊美, 譴責, 告發
denunciatory, 譴責的, 指責的譴責
enunciate, 發音, 明確地敘述
noun 名詞
# pronounce,發音
# pronounced--0,明顯的,明確的,有人告訴我說,我在說法語時有明顯的英國口音
# pronouncement,聲明,財政部長在最近的公開發言中對經濟復甦持更樂觀的態度
renounce, 正式, 放棄, 如頭銜, 斷絕關系參, 公開指責, 宣言, 發音, 表達, 類背棄誓言廢除命令反, 要求, 聲稱擁有, 擁抱, 信奉
renunciation, 放棄, 廢棄, 棄權, 脫離關系
pronounce, 发音, 宣判, 宣布
词根词缀, pro前, 公开nounc通知e
pronounced, 讲出来的, 显著的, 断然的, 明确的
词根词缀, pro前, 公开nounc通知ed
renounce, 声明
词根词缀, re回, 向后nounc通知e→宣布收回
renouncement, 否认, 拒绝
词根词缀, re回, 向后nounc通知ement
pronounce, 发音, 读音, 宣布, 公布
pro, 向前, nounce, 发音, 词源同announce, nuncio, 引申诸相关词义,
pronouncement, 公布, 公告
来自pronounce, 宣布, 公布,
renounce, 声明放弃, 摒弃
re, 向后, 往回, nounc, 说话, 通知, 词源同announce, denounce, 即收回已经说过的话, 引申词义声明放弃, 摒弃,
|
[
"[email protected]"
] | |
b0ae0ec00f64dacd2002923982d4003e65fbdadf
|
d98fae9cd74992a2f81f8c267a71542475b27300
|
/img_core/img_mvp/woot/apps/img/cpmath/setup.py
|
e5f56b50cfa997c4ee985bf49748913c5f251ce1
|
[] |
no_license
|
NicholasPiano/img-desktop
|
f516c4da8041aabe3cd4a1af24fdbc42eda105fa
|
9a2eed6cc0d912b675d02d8b0b20f60a71a5b481
|
refs/heads/master
| 2021-01-23T13:17:49.601534 | 2015-10-07T11:28:25 | 2015-10-07T11:28:25 | 39,502,099 | 0 | 0 | null | 2015-10-07T11:28:26 | 2015-07-22T11:19:19 |
Python
|
UTF-8
|
Python
| false | false | 4,197 |
py
|
"""setup.py - setup to build C modules for cpmath
CellProfiler is distributed under the GNU General Public License,
but this file is licensed under the more permissive BSD license.
See the accompanying file LICENSE for details.
Copyright (c) 2003-2009 Massachusetts Institute of Technology
Copyright (c) 2009-2015 Broad Institute
All rights reserved.
Please see the AUTHORS file for credits.
Website: http://www.cellprofiler.org
"""
from distutils.core import setup,Extension
import glob
import os
import sys
is_win = sys.platform.startswith("win")
try:
from Cython.Distutils import build_ext
from numpy import get_include
except ImportError:
import site
site.addsitedir('../../site-packages')
from Cython.Distutils import build_ext
from numpy import get_include
def configuration():
if is_win:
extra_compile_args = None
extra_link_args = ['/MANIFEST']
else:
extra_compile_args = ['-O3']
extra_link_args = None
extensions = [Extension(name="_cpmorphology",
sources=["src/cpmorphology.c"],
include_dirs=['src']+[get_include()],
extra_compile_args=extra_compile_args,
extra_link_args=extra_link_args),
Extension(name="_cpmorphology2",
sources=["_cpmorphology2.pyx"],
include_dirs=[get_include()],
extra_compile_args=extra_compile_args,
extra_link_args=extra_link_args),
Extension(name="_watershed",
sources=["_watershed.pyx", "heap_watershed.pxi"],
include_dirs=['src']+[get_include()],
extra_compile_args=extra_compile_args,
extra_link_args=extra_link_args),
Extension(name="_propagate",
sources=["_propagate.pyx", "heap.pxi"],
include_dirs=['src']+[get_include()],
extra_compile_args=extra_compile_args,
extra_link_args=extra_link_args),
Extension(name="_filter",
sources=["_filter.pyx"],
include_dirs=['src']+[get_include()],
extra_compile_args=extra_compile_args,
extra_link_args=extra_link_args),
Extension(name="_lapjv",
sources=["_lapjv.pyx"],
include_dirs=['src']+[get_include()],
extra_compile_args=extra_compile_args,
extra_link_args=extra_link_args),
Extension(name="_convex_hull",
sources=["_convex_hull.pyx"],
include_dirs=['src']+[get_include()],
extra_compile_args=extra_compile_args,
extra_link_args=extra_link_args),
Extension(name="_fastemd",
sources=["_fastemd.pyx"],
include_dirs = [
"include", get_include(),
"../contrib/include/FastEMD"],
depends=["include/fastemd_hat.hpp",
"include/npy_helpers.hpp"] +
glob.glob("../contrib/include/FastEMD/*.hpp"),
extra_compile_args = extra_compile_args,
extra_link_args=extra_link_args,
language="c++")
]
dict = { "name":"cpmath",
"description":"algorithms for CellProfiler",
"maintainer":"Lee Kamentsky",
"maintainer_email":"[email protected]",
"cmdclass": {'build_ext': build_ext},
"ext_modules": extensions
}
return dict
if __name__ == '__main__':
if '/' in __file__:
os.chdir(os.path.dirname(__file__))
setup(**configuration())
|
[
"[email protected]"
] | |
eb229401f8c3da97149edefffe458e527e72e9ef
|
daee32bcc253773388d7bb9f171dc1f7f670b959
|
/qkeras/qtools/config_public.py
|
b91118303c2a1c2311a0f1ca59040b038d304a76
|
[
"Apache-2.0"
] |
permissive
|
isabella232/qkeras
|
888b9f2c14d48c5b309a9ba02b9b3b5683ca894f
|
6790f78597313eaf51af4f4dd8947700c8611ded
|
refs/heads/master
| 2023-03-09T14:00:48.789275 | 2020-09-25T06:29:37 | 2020-09-25T06:29:55 | 298,776,168 | 0 | 0 |
Apache-2.0
| 2021-02-23T10:21:01 | 2020-09-26T09:01:23 | null |
UTF-8
|
Python
| false | false | 1,541 |
py
|
# Lint as: python3
# Copyright 2019 Google LLC
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""configuration file for external usage."""
config_settings = {
"default_source_quantizer": "quantized_bits(8, 0, 1)",
"default_interm_quantizer": "quantized_bits(8, 0, 1)",
"horowitz": {
"fpm_add": [0.003125, 0],
"fpm_mul": [0.002994791667, 0.001041666667, 0],
"fp16_add": [0.4],
"fp16_mul": [1.1],
"fp32_add": [0.9],
"fp32_mul": [3.7],
"sram_rd": [9.02427321e-04, -2.68847858e-02, 2.08900804e-01, 0.0],
"dram_rd": [20.3125, 0]
},
"include_energy": {
"QActivation": ["outputs"],
"Activation": ["outputs"],
"QBatchNormalization": ["parameters"],
"BatchNormalization": ["parameters"],
"Add": ["op_cost"],
"Subtract": ["op_cost"],
"MaxPooling2D": ["op_cost"],
"default": ["inputs", "parameters", "op_cost"]
}
}
|
[
"[email protected]"
] | |
2f2fda6bb02e4e85836480bd1c827bc1e2f58350
|
22d68861e89f1090d3d248188e17fcca09bb3001
|
/hpcc/2020-07-24--competition-evo-arch/gen-comp-exp-phase1.py
|
ecb31c956ce5573dd85394d8430de0e2919c03ba
|
[
"MIT"
] |
permissive
|
amlalejini/Aagos
|
87077694689a8b63aff452a60e05b078583dec1a
|
bb82da3a177fe467a912c9f44f516b445b4eba3b
|
refs/heads/master
| 2021-05-24T14:25:21.107670 | 2021-05-13T22:12:47 | 2021-05-13T22:12:47 | 253,603,662 | 0 | 0 |
MIT
| 2020-04-06T20:05:15 | 2020-04-06T20:05:15 | null |
UTF-8
|
Python
| false | false | 10,835 |
py
|
'''
Generate slurm job submission script for 2020-05-18 -- environmental change rate experiment.
See 2020-05-18--env-chg-sweep/README.md for more details.
'''
import argparse, os, sys, errno, subprocess, csv
seed_offset = 970000
default_num_replicates = 100
job_time_request = "00:20:00"
job_memory_request = "2G"
job_name = "phase1"
nk_config = {
"environment_change": [
"-CHANGE_FREQUENCY 0"
]
}
gradient_config = {
"environment_change": [
"-CHANGE_FREQUENCY 0"
]
}
shared_config = {
"paired": [
"-BIT_FLIP_PROB 0.003",
"-BIT_FLIP_PROB 0.1"
]
}
base_resub_script = \
"""#!/bin/bash
########## Define Resources Needed with SBATCH Lines ##########
#SBATCH --time=<<TIME_REQUEST>> # limit of wall clock time - how long the job will run (same as -t)
#SBATCH --array=<<ARRAY_ID_RANGE>>
#SBATCH --mem=<<MEMORY_REQUEST>> # memory required per node - amount of memory (in bytes)
#SBATCH --job-name <<JOB_NAME>> # you can give your job a name for easier identification (same as -J)
#SBATCH --account=devolab
########## Command Lines to Run ##########
EXEC=Aagos
CONFIG_DIR=<<CONFIG_DIR>>
module load GCC/7.3.0-2.30
module load OpenMPI/3.1.1
module load Python/3.7.0
<<RESUBMISSION_LOGIC>>
mkdir -p ${RUN_DIR}
cd ${RUN_DIR}
cp ${CONFIG_DIR}/Aagos.cfg .
cp ${CONFIG_DIR}/${EXEC} .
./${EXEC} ${RUN_PARAMS} > run.log
rm Aagos.cfg
rm ${EXEC}
"""
base_run_logic = \
"""
if [[ ${SLURM_ARRAY_TASK_ID} -eq <<RESUB_ID>> ]] ; then
RUN_DIR=<<RUN_DIR>>
RUN_PARAMS=<<RUN_PARAMS>>
fi
"""
'''
This is functionally equivalent to the mkdir -p [fname] bash command
'''
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else: raise
def extract_settings(run_config_path):
content = None
with open(run_config_path, "r") as fp:
content = fp.read().strip().split("\n")
header = content[0].split(",")
header_lu = {header[i].strip():i for i in range(0, len(header))}
content = content[1:]
configs = [l for l in csv.reader(content, quotechar='"', delimiter=',', quoting=csv.QUOTE_ALL, skipinitialspace=True)]
return {param[header_lu["parameter"]]:param[header_lu["value"]] for param in configs}
def is_run_complete(path):
# (1) Does the run directory exist?
print(f" Run dir? {os.path.exists(path)}")
if not os.path.exists(path): return False
# (2) If the run directory exists, did the run complete?
# Is there a run config file?
run_config_path = os.path.join(path, "output", "run_config.csv")
print(f" Run config? {os.path.exists(run_config_path)}")
if not os.path.exists(run_config_path): return False
# The run config file exists, extract parameters.
run_params = extract_settings(run_config_path)
final_gen = run_params["TOTAL_GENS"] # We'll look for this generation in the fitness.csv file
fitness_file_path = os.path.join(path, "output", "fitness.csv")
print(f" Fitness file? {os.path.exists(fitness_file_path)}")
if not os.path.exists(fitness_file_path): return False
fitness_contents = None
with open(fitness_file_path, "r") as fp:
fitness_contents = fp.read().strip().split("\n")
if len(fitness_contents) == 0: return False
header = fitness_contents[0].split(",")
header_lu = {header[i].strip():i for i in range(0, len(header))}
last_line = fitness_contents[-1].split(",")
print(f" len(header) == len(last_line)? {len(header) == len(last_line)}")
if len(header) != len(last_line): return False
final_fitness_update = last_line[header_lu["update"]]
print(f" {final_fitness_update} =?= {final_gen}")
if final_fitness_update != final_gen: return False
return True
def main():
parser = argparse.ArgumentParser(description="Run submission script.")
parser.add_argument("--data_dir", type=str, help="Where is the base output directory for each run?")
parser.add_argument("--config_dir", type=str, help="Where is the configuration directory for experiment?")
parser.add_argument("--replicates", type=int, default=default_num_replicates, help="How many replicates should we run of each condition?")
parser.add_argument("--query_condition_cnt", action="store_true", help="How many conditions?")
args = parser.parse_args()
data_dir = args.data_dir
config_dir = args.config_dir
num_replicates = args.replicates
# Find all environments
nk_env_dir = os.path.join(config_dir, "environments", "nk")
nk_environments = [os.path.join(nk_env_dir, d) for d in os.listdir(nk_env_dir) if ".env" in d]
nk_environments.sort(key=lambda x : int(x.split(".env")[0].split("_")[-1]))
print(f"Found {len(nk_environments)} nk environments.")
gradient_env_dir = os.path.join(config_dir, "environments", "gradient")
gradient_environments = [os.path.join(gradient_env_dir, d) for d in os.listdir(gradient_env_dir) if ".env" in d]
gradient_environments.sort(key=lambda x : int(x.split(".env")[0].split("_")[-1]))
print(f"Found {len(gradient_environments)} gradient environments.")
if len(gradient_environments) != num_replicates:
print("num_replicates =/= number gradient environments")
exit(-1)
if len(nk_environments) != num_replicates:
print("num_replicates =/= number gradient environments")
exit(-1)
# Compute all combinations of NK fitness model settings and gradient fitness settings
nk_combos = [f"{chg} {mut} -GRADIENT_MODEL 0" for chg in nk_config["environment_change"] for mut in shared_config["paired"] ]
gradient_combos = [f"{chg} {mut} -GRADIENT_MODEL 1" for chg in gradient_config["environment_change"] for mut in shared_config["paired"] ]
# Combine
combos = gradient_combos + nk_combos
if (args.query_condition_cnt):
print("Conditions", combos)
print(f"Number of conditions: {len(combos)}")
exit(0)
# Find complete/incomplete runs.
num_finished = 0
resubmissions = []
nk_run_pairings = {i:[] for i in range(len(nk_environments))}
grad_run_pairings = {i:[] for i in range(len(gradient_environments))}
for condition_id in range(0, len(combos)):
condition_params = combos[condition_id]
print(f"Processing condition: {condition_params}")
# Run N replicates of this condition.
gradient_env_id = 0
nk_env_id = 0
for i in range(1, num_replicates+1):
# Compute seed for this replicate.
seed = seed_offset + (condition_id * num_replicates) + i
run_name = f"SEED_{seed}"
run_dir = os.path.join(data_dir, run_name)
env = None
if "-GRADIENT_MODEL 0" in condition_params:
env = nk_environments[nk_env_id]
nk_run_pairings[nk_env_id].append({"seed": seed, "run_dir": run_dir, "condition": condition_params})
nk_env_id += 1
elif "-GRADIENT_MODEL 1" in condition_params:
env = gradient_environments[gradient_env_id]
grad_run_pairings[gradient_env_id].append({"seed": seed, "run_dir": run_dir, "condition": condition_params})
gradient_env_id += 1
else:
print("????")
exit(-1)
# Generate run parameters, use to name run.
run_params = condition_params + f" -SEED {seed} -LOAD_ENV_FILE {env}"
# (1) Does the run directory exist?
print(f" {run_params}")
run_complete = is_run_complete(run_dir)
print(f" finished? {run_complete}")
num_finished += int(run_complete)
if not run_complete: resubmissions.append({"run_dir": run_dir, "run_params": run_params})
print(f"Runs finished: {num_finished}")
print(f"Resubmissions: {len(resubmissions)}")
print("Generating run pairings...")
pairings_header = ["gradient_model", "env", "seed_0", "run_dir_0", "condition_0", "seed_1", "run_dir_1", "condition_1"]
pairings_content = [",".join(pairings_header)]
for env_id in grad_run_pairings:
if len(grad_run_pairings[env_id]) != 2:
print("Gradient run pairing is not 2!")
exit(-1)
info = {
"gradient_model": "1",
"seed_0": str(grad_run_pairings[env_id][0]["seed"]),
"run_dir_0": str(grad_run_pairings[env_id][0]["run_dir"]),
"seed_1": str(grad_run_pairings[env_id][1]["seed"]),
"run_dir_1": str(grad_run_pairings[env_id][1]["run_dir"]),
"env": gradient_environments[env_id],
"condition_0": str(grad_run_pairings[env_id][0]["condition"]),
"condition_1": str(grad_run_pairings[env_id][1]["condition"])
}
pairings_content.append(",".join([info[key] for key in pairings_header]))
for env_id in nk_run_pairings:
if len(nk_run_pairings[env_id]) != 2:
print("NK run pairing is not 2!")
exit(-1)
info = {
"gradient_model": "0",
"seed_0": str(nk_run_pairings[env_id][0]["seed"]),
"run_dir_0": str(nk_run_pairings[env_id][0]["run_dir"]),
"seed_1": str(nk_run_pairings[env_id][1]["seed"]),
"run_dir_1": str(nk_run_pairings[env_id][1]["run_dir"]),
"env": nk_environments[env_id],
"condition_0": str(nk_run_pairings[env_id][0]["condition"]),
"condition_1": str(nk_run_pairings[env_id][1]["condition"])
}
pairings_content.append(",".join([info[key] for key in pairings_header]))
with open("run_pairings.csv", "w") as fp:
fp.write("\n".join(pairings_content))
print("Generating resubmission script...")
if len(resubmissions) == 0: return
resub_logic = ""
array_id = 1
for resub in resubmissions:
run_params = resub["run_params"]
run_logic = base_run_logic
run_logic = run_logic.replace("<<RESUB_ID>>", str(array_id))
run_logic = run_logic.replace("<<RUN_DIR>>", resub["run_dir"])
run_logic = run_logic.replace("<<RUN_PARAMS>>", f"'{run_params}'")
resub_logic += run_logic
array_id += 1
script = base_resub_script
script = script.replace("<<TIME_REQUEST>>", job_time_request)
script = script.replace("<<ARRAY_ID_RANGE>>", f"1-{len(resubmissions)}")
script = script.replace("<<MEMORY_REQUEST>>", job_memory_request)
script = script.replace("<<JOB_NAME>>", job_name)
script = script.replace("<<CONFIG_DIR>>", config_dir)
script = script.replace("<<RESUBMISSION_LOGIC>>", resub_logic)
with open("phase1-sub.sb", "w") as fp:
fp.write(script)
if __name__ == "__main__":
main()
|
[
"[email protected]"
] | |
c2773af266019d171d5db388a8485e13acd96789
|
d2628b4562ec2f7815ae9a5af546a9fe56492ff3
|
/sitch/sitchlib/config_helper.py
|
c94b725e51c79583c32dc5ae100c90651a40921a
|
[
"Apache-2.0"
] |
permissive
|
mtigas/feed_builder
|
6b5927e48a0d65d4a83830a99eda75decfb30202
|
59c65f50f221e170c229444fd8f3c7b3b1dce6a1
|
refs/heads/master
| 2021-01-11T14:02:44.662709 | 2017-06-20T23:57:32 | 2017-06-20T23:57:32 | 94,944,694 | 0 | 0 | null | 2017-06-20T23:53:53 | 2017-06-20T23:53:53 | null |
UTF-8
|
Python
| false | false | 806 |
py
|
import os
class ConfigHelper:
def __init__(self):
self.ocid_key = os.getenv("OCID_KEY")
self.base_path = "/var/feed/"
self.iso_country = "US"
self.twilio_sid = ConfigHelper.get_from_env("TWILIO_SID")
self.twilio_token = ConfigHelper.get_from_env("TWILIO_TOKEN")
self.ocid_base = "/var/"
self.fcc_tempfile = "/var/fcc.tmp.zip"
self.fcc_enclosed_file = "fcc_lic_vw.csv"
self.fcc_destination_file = "/var/fcc.csv.gz"
self.target_radio = "GSM"
return
@classmethod
def get_from_env(cls, k):
retval = os.getenv(k)
if retval is None:
print "Required config variable not set: %s" % k
print "Unable to continue. Exiting."
raise KeyError
return retval
|
[
"[email protected]"
] | |
b9ce33769d4545ddcaa1e2b7122e021dec4a6681
|
7c9173875ba6e20a9fc705753a5c553891d01a79
|
/Python_Bilibili/同济子豪兄/zihaowordcloud/code/example6.py
|
1722efd0b58e1763c9a8e46f96ad47fc63ebc500
|
[] |
no_license
|
zhy0313/children-python
|
1df120930637b8bd320ab090ea784aab7b7cfed2
|
941e29d5f39092b02f8486a435e61c7ec2bdcdb6
|
refs/heads/master
| 2021-01-07T00:59:31.332746 | 2020-02-10T11:45:52 | 2020-02-10T11:45:52 | 241,533,568 | 0 | 1 | null | 2020-02-19T04:36:38 | 2020-02-19T04:36:37 | null |
UTF-8
|
Python
| false | false | 1,035 |
py
|
# 6号词云:乡村振兴战略中央文件(五角星形状)
# B站专栏:同济子豪兄 2019-5-23
# 导入词云制作库wordcloud和中文分词库jieba
import jieba
import wordcloud
# 导入imageio库中的imread函数,并用这个函数读取本地图片,作为词云形状图片
import imageio
mk = imageio.imread("wujiaoxing.png")
# 构建并配置词云对象w,注意要加scale参数,提高清晰度
w = wordcloud.WordCloud(width=1000,
height=700,
background_color='white',
font_path='msyh.ttc',
mask=mk,
scale=15)
# 对来自外部文件的文本进行中文分词,得到string
f = open('关于实施乡村振兴战略的意见.txt',encoding='utf-8')
txt = f.read()
txtlist = jieba.lcut(txt)
string = " ".join(txtlist)
# 将string变量传入w的generate()方法,给词云输入文字
w.generate(string)
# 将词云图片导出到当前文件夹
w.to_file('output6-village.png')
|
[
"[email protected]"
] | |
a8639ef72bab2024a9a6515cc547b788964a64da
|
788bdd9e443a571bc8262323425317015303cf1d
|
/p1522.py
|
1aed2ee14ece3b5ccb7ddd473dcdf6620cf438ff
|
[] |
no_license
|
qyx2018/Xiaojiayu
|
e07dd5480babafb5f1940a2032ef30a92c7331f2
|
e7decd439ed95b5a6d3cf195444cabfedf0eff76
|
refs/heads/master
| 2020-03-11T06:21:27.609708 | 2018-04-17T01:33:40 | 2018-04-17T01:33:40 | 129,827,662 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 272 |
py
|
from tkinter import *
root = Tk()
s1 = Scale(root, from_ = 0, to = 42)
s1.pack()
s2 = Scale(root, from_ = 0, to = 200,orient = HORIZONTAL)
s2.pack()
def show():
print(s1.get(), s2.get())
Button(root, text = 'Position', command = show).pack()
mainloop()
|
[
"[email protected]"
] | |
28c527fbe2a4759a8226f709c73a8b020425f06e
|
f87c86c4dcb9192717a06ec41caa5c49b1fd0201
|
/adv/summer_verica.py
|
1dd6707c1ca4334defc697de0b3bec11834d4fce
|
[
"Apache-2.0"
] |
permissive
|
Caledor/dl
|
5377982f31f0c1890aff487d5eefc1ffb6f7115f
|
fc5e9b6855afb40c4c499a70dfa0e0503e8c8f05
|
refs/heads/master
| 2023-04-01T19:15:04.372285 | 2021-03-28T23:25:06 | 2021-03-28T23:25:06 | 277,363,765 | 0 | 0 |
Apache-2.0
| 2020-07-05T18:49:36 | 2020-07-05T18:49:36 | null |
UTF-8
|
Python
| false | false | 321 |
py
|
from core.advbase import *
class Summer_Verica(Adv):
def prerun(self):
self.s2.autocharge_init(self.s2_autocharge).on()
def s2_autocharge(self, t):
if self.s1.sp > self.s1.charged:
log("sp", "s2_autocharge", 1578)
self.s2.charge(1578)
variants = {None: Summer_Verica}
|
[
"[email protected]"
] | |
e7d4ce72e6fa92f7206ac7798833e5fd688d432e
|
6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4
|
/5bYXQfpyoithnQisa_13.py
|
a15674c0b040fae09a272140048ddb56764fc393
|
[] |
no_license
|
daniel-reich/ubiquitous-fiesta
|
26e80f0082f8589e51d359ce7953117a3da7d38c
|
9af2700dbe59284f5697e612491499841a6c126f
|
refs/heads/master
| 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 920 |
py
|
def encode_morse(message):
char_to_dots = {
'A': '.-', 'B': '-...', 'C': '-.-.', 'D': '-..', 'E': '.', 'F': '..-.',
'G': '--.', 'H': '....', 'I': '..', 'J': '.---', 'K': '-.-', 'L': '.-..',
'M': '--', 'N': '-.', 'O': '---', 'P': '.--.', 'Q': '--.-', 'R': '.-.',
'S': '...', 'T': '-', 'U': '..-', 'V': '...-', 'W': '.--', 'X': '-..-',
'Y': '-.--', 'Z': '--..', ' ': ' ', '0': '-----',
'1': '.----', '2': '..---', '3': '...--', '4': '....-', '5': '.....',
'6': '-....', '7': '--...', '8': '---..', '9': '----.',
'&': '.-...', "'": '.----.', '@': '.--.-.', ')': '-.--.-', '(': '-.--.',
':': '---...', ',': '--..--', '=': '-...-', '!': '-.-.--', '.': '.-.-.-',
'-': '-....-', '+': '.-.-.', '"': '.-..-.', '?': '..--..', '/': '-..-.',
' ': ' '
}
ans = ""
for i in message:
if i.isalpha():
a = i.upper()
else:
a = i
ans += char_to_dots[a]
ans += " "
return ans[:-1]
|
[
"[email protected]"
] | |
645619fc504e9d5a3b0b71b5a27481a3936dcb5c
|
85a9ffeccb64f6159adbd164ff98edf4ac315e33
|
/pysnmp/CISCO-H320-DIAL-CONTROL-MIB.py
|
03fbe47565d515a89302d026475c173ee9efa92c
|
[
"Apache-2.0"
] |
permissive
|
agustinhenze/mibs.snmplabs.com
|
5d7d5d4da84424c5f5a1ed2752f5043ae00019fb
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
refs/heads/master
| 2020-12-26T12:41:41.132395 | 2019-08-16T15:51:41 | 2019-08-16T15:53:57 | 237,512,469 | 0 | 0 |
Apache-2.0
| 2020-01-31T20:41:36 | 2020-01-31T20:41:35 | null |
UTF-8
|
Python
| false | false | 12,710 |
py
|
#
# PySNMP MIB module CISCO-H320-DIAL-CONTROL-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/CISCO-H320-DIAL-CONTROL-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 17:42:22 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, ObjectIdentifier, Integer = mibBuilder.importSymbols("ASN1", "OctetString", "ObjectIdentifier", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsUnion, SingleValueConstraint, ValueRangeConstraint, ValueSizeConstraint, ConstraintsIntersection = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsUnion", "SingleValueConstraint", "ValueRangeConstraint", "ValueSizeConstraint", "ConstraintsIntersection")
cCallHistoryIndex, = mibBuilder.importSymbols("CISCO-DIAL-CONTROL-MIB", "cCallHistoryIndex")
ciscoExperiment, = mibBuilder.importSymbols("CISCO-SMI", "ciscoExperiment")
CvcVideoCoderRate, CvcGUid, CvcH320CallType = mibBuilder.importSymbols("CISCO-VOICE-COMMON-DIAL-CONTROL-MIB", "CvcVideoCoderRate", "CvcGUid", "CvcH320CallType")
callActiveSetupTime, callActiveIndex, AbsoluteCounter32 = mibBuilder.importSymbols("DIAL-CONTROL-MIB", "callActiveSetupTime", "callActiveIndex", "AbsoluteCounter32")
ObjectGroup, ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ObjectGroup", "ModuleCompliance", "NotificationGroup")
Unsigned32, MibIdentifier, iso, Integer32, TimeTicks, Counter64, Gauge32, NotificationType, ObjectIdentity, IpAddress, Bits, Counter32, MibScalar, MibTable, MibTableRow, MibTableColumn, ModuleIdentity = mibBuilder.importSymbols("SNMPv2-SMI", "Unsigned32", "MibIdentifier", "iso", "Integer32", "TimeTicks", "Counter64", "Gauge32", "NotificationType", "ObjectIdentity", "IpAddress", "Bits", "Counter32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "ModuleIdentity")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
ciscoH320DialControlMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 9, 10, 128))
ciscoH320DialControlMIB.setRevisions(('2006-02-23 00:00', '2005-09-28 00:00',))
if mibBuilder.loadTexts: ciscoH320DialControlMIB.setLastUpdated('200602230000Z')
if mibBuilder.loadTexts: ciscoH320DialControlMIB.setOrganization('Cisco Systems, Inc.')
ciscoH320DialControlMIBNotifs = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 10, 128, 0))
ciscoH320DialControlMIBObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 10, 128, 1))
ciscoH320DialControlMIBConform = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 10, 128, 2))
cvH320CallHistory = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 10, 128, 1, 1))
cvH320CallActive = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 10, 128, 1, 2))
cvH320CallHistoryTable = MibTable((1, 3, 6, 1, 4, 1, 9, 10, 128, 1, 1, 1), )
if mibBuilder.loadTexts: cvH320CallHistoryTable.setStatus('current')
cvH320CallHistoryEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 10, 128, 1, 1, 1, 1), ).setIndexNames((0, "CISCO-DIAL-CONTROL-MIB", "cCallHistoryIndex"))
if mibBuilder.loadTexts: cvH320CallHistoryEntry.setStatus('current')
cvH320CallHistoryConnectionId = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 128, 1, 1, 1, 1, 1), CvcGUid()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cvH320CallHistoryConnectionId.setStatus('current')
cvH320CallHistoryIncomingConnectionId = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 128, 1, 1, 1, 1, 2), CvcGUid()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cvH320CallHistoryIncomingConnectionId.setStatus('current')
cvH320CallHistoryH320CallType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 128, 1, 1, 1, 1, 3), CvcH320CallType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cvH320CallHistoryH320CallType.setStatus('current')
cvH320CallHistoryUsedBandwidth = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 128, 1, 1, 1, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 10000000))).setUnits('kilobits per second').setMaxAccess("readonly")
if mibBuilder.loadTexts: cvH320CallHistoryUsedBandwidth.setStatus('current')
cvH320CallHistoryTxVideoCodec = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 128, 1, 1, 1, 1, 5), CvcVideoCoderRate()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cvH320CallHistoryTxVideoCodec.setStatus('current')
cvH320CallHistoryTxVideoPackets = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 128, 1, 1, 1, 1, 6), AbsoluteCounter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cvH320CallHistoryTxVideoPackets.setStatus('current')
cvH320CallHistoryTxVideoBytes = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 128, 1, 1, 1, 1, 7), AbsoluteCounter32()).setUnits('bytes').setMaxAccess("readonly")
if mibBuilder.loadTexts: cvH320CallHistoryTxVideoBytes.setStatus('current')
cvH320CallHistoryRxVideoCodec = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 128, 1, 1, 1, 1, 8), CvcVideoCoderRate()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cvH320CallHistoryRxVideoCodec.setStatus('current')
cvH320CallHistoryRxVideoPackets = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 128, 1, 1, 1, 1, 9), AbsoluteCounter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cvH320CallHistoryRxVideoPackets.setStatus('current')
cvH320CallHistoryRxVideoBytes = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 128, 1, 1, 1, 1, 10), AbsoluteCounter32()).setUnits('bytes').setMaxAccess("readonly")
if mibBuilder.loadTexts: cvH320CallHistoryRxVideoBytes.setStatus('current')
cvH320CallActiveTable = MibTable((1, 3, 6, 1, 4, 1, 9, 10, 128, 1, 2, 1), )
if mibBuilder.loadTexts: cvH320CallActiveTable.setStatus('current')
cvH320CallActiveEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 10, 128, 1, 2, 1, 1), ).setIndexNames((0, "DIAL-CONTROL-MIB", "callActiveSetupTime"), (0, "DIAL-CONTROL-MIB", "callActiveIndex"))
if mibBuilder.loadTexts: cvH320CallActiveEntry.setStatus('current')
cvH320CallActiveConnectionId = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 128, 1, 2, 1, 1, 1), CvcGUid()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cvH320CallActiveConnectionId.setStatus('current')
cvH320CallActiveIncomingConnectionId = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 128, 1, 2, 1, 1, 2), CvcGUid()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cvH320CallActiveIncomingConnectionId.setStatus('current')
cvH320CallActiveH320CallType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 128, 1, 2, 1, 1, 3), CvcH320CallType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cvH320CallActiveH320CallType.setStatus('current')
cvH320CallActiveUsedBandwidth = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 128, 1, 2, 1, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 10000000))).setUnits('kilobits per second').setMaxAccess("readonly")
if mibBuilder.loadTexts: cvH320CallActiveUsedBandwidth.setStatus('current')
cvH320CallActiveTxVideoCodec = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 128, 1, 2, 1, 1, 5), CvcVideoCoderRate()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cvH320CallActiveTxVideoCodec.setStatus('current')
cvH320CallActiveTxVideoPackets = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 128, 1, 2, 1, 1, 6), AbsoluteCounter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cvH320CallActiveTxVideoPackets.setStatus('current')
cvH320CallActiveTxVideoBytes = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 128, 1, 2, 1, 1, 7), AbsoluteCounter32()).setUnits('bytes').setMaxAccess("readonly")
if mibBuilder.loadTexts: cvH320CallActiveTxVideoBytes.setStatus('current')
cvH320CallActiveRxVideoCodec = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 128, 1, 2, 1, 1, 8), CvcVideoCoderRate()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cvH320CallActiveRxVideoCodec.setStatus('current')
cvH320CallActiveRxVideoPackets = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 128, 1, 2, 1, 1, 9), AbsoluteCounter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cvH320CallActiveRxVideoPackets.setStatus('current')
cvH320CallActiveRxVideoBytes = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 128, 1, 2, 1, 1, 10), AbsoluteCounter32()).setUnits('bytes').setMaxAccess("readonly")
if mibBuilder.loadTexts: cvH320CallActiveRxVideoBytes.setStatus('current')
ciscoH320DialControlMIBConformance = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 10, 128, 2, 1))
ciscoH320DialControlMIBCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 10, 128, 2, 1, 1))
ciscoH320DialControlMIBGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 10, 128, 2, 1, 2))
ciscoH320DialControlMIBCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 9, 10, 128, 2, 1, 1, 1)).setObjects(("CISCO-H320-DIAL-CONTROL-MIB", "cvH320CallHistoryGroup"), ("CISCO-H320-DIAL-CONTROL-MIB", "cvH320CallActiveGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoH320DialControlMIBCompliance = ciscoH320DialControlMIBCompliance.setStatus('current')
cvH320CallHistoryGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 10, 128, 2, 1, 2, 1)).setObjects(("CISCO-H320-DIAL-CONTROL-MIB", "cvH320CallHistoryConnectionId"), ("CISCO-H320-DIAL-CONTROL-MIB", "cvH320CallHistoryIncomingConnectionId"), ("CISCO-H320-DIAL-CONTROL-MIB", "cvH320CallHistoryH320CallType"), ("CISCO-H320-DIAL-CONTROL-MIB", "cvH320CallHistoryUsedBandwidth"), ("CISCO-H320-DIAL-CONTROL-MIB", "cvH320CallHistoryTxVideoCodec"), ("CISCO-H320-DIAL-CONTROL-MIB", "cvH320CallHistoryTxVideoPackets"), ("CISCO-H320-DIAL-CONTROL-MIB", "cvH320CallHistoryTxVideoBytes"), ("CISCO-H320-DIAL-CONTROL-MIB", "cvH320CallHistoryRxVideoCodec"), ("CISCO-H320-DIAL-CONTROL-MIB", "cvH320CallHistoryRxVideoPackets"), ("CISCO-H320-DIAL-CONTROL-MIB", "cvH320CallHistoryRxVideoBytes"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cvH320CallHistoryGroup = cvH320CallHistoryGroup.setStatus('current')
cvH320CallActiveGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 10, 128, 2, 1, 2, 2)).setObjects(("CISCO-H320-DIAL-CONTROL-MIB", "cvH320CallActiveConnectionId"), ("CISCO-H320-DIAL-CONTROL-MIB", "cvH320CallActiveIncomingConnectionId"), ("CISCO-H320-DIAL-CONTROL-MIB", "cvH320CallActiveH320CallType"), ("CISCO-H320-DIAL-CONTROL-MIB", "cvH320CallActiveUsedBandwidth"), ("CISCO-H320-DIAL-CONTROL-MIB", "cvH320CallActiveTxVideoCodec"), ("CISCO-H320-DIAL-CONTROL-MIB", "cvH320CallActiveTxVideoPackets"), ("CISCO-H320-DIAL-CONTROL-MIB", "cvH320CallActiveTxVideoBytes"), ("CISCO-H320-DIAL-CONTROL-MIB", "cvH320CallActiveRxVideoCodec"), ("CISCO-H320-DIAL-CONTROL-MIB", "cvH320CallActiveRxVideoPackets"), ("CISCO-H320-DIAL-CONTROL-MIB", "cvH320CallActiveRxVideoBytes"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cvH320CallActiveGroup = cvH320CallActiveGroup.setStatus('current')
mibBuilder.exportSymbols("CISCO-H320-DIAL-CONTROL-MIB", cvH320CallHistoryConnectionId=cvH320CallHistoryConnectionId, cvH320CallHistoryEntry=cvH320CallHistoryEntry, cvH320CallActiveIncomingConnectionId=cvH320CallActiveIncomingConnectionId, cvH320CallActiveRxVideoCodec=cvH320CallActiveRxVideoCodec, cvH320CallActiveRxVideoPackets=cvH320CallActiveRxVideoPackets, ciscoH320DialControlMIBNotifs=ciscoH320DialControlMIBNotifs, cvH320CallHistoryTable=cvH320CallHistoryTable, cvH320CallActiveH320CallType=cvH320CallActiveH320CallType, cvH320CallActiveTxVideoPackets=cvH320CallActiveTxVideoPackets, cvH320CallHistory=cvH320CallHistory, cvH320CallHistoryRxVideoPackets=cvH320CallHistoryRxVideoPackets, cvH320CallHistoryGroup=cvH320CallHistoryGroup, cvH320CallActiveEntry=cvH320CallActiveEntry, cvH320CallActiveGroup=cvH320CallActiveGroup, ciscoH320DialControlMIBCompliance=ciscoH320DialControlMIBCompliance, cvH320CallHistoryTxVideoBytes=cvH320CallHistoryTxVideoBytes, cvH320CallHistoryUsedBandwidth=cvH320CallHistoryUsedBandwidth, cvH320CallActiveRxVideoBytes=cvH320CallActiveRxVideoBytes, ciscoH320DialControlMIBConform=ciscoH320DialControlMIBConform, cvH320CallHistoryRxVideoBytes=cvH320CallHistoryRxVideoBytes, PYSNMP_MODULE_ID=ciscoH320DialControlMIB, cvH320CallHistoryTxVideoPackets=cvH320CallHistoryTxVideoPackets, cvH320CallHistoryRxVideoCodec=cvH320CallHistoryRxVideoCodec, cvH320CallActiveUsedBandwidth=cvH320CallActiveUsedBandwidth, cvH320CallActiveConnectionId=cvH320CallActiveConnectionId, cvH320CallActiveTxVideoCodec=cvH320CallActiveTxVideoCodec, cvH320CallActive=cvH320CallActive, cvH320CallActiveTable=cvH320CallActiveTable, ciscoH320DialControlMIB=ciscoH320DialControlMIB, cvH320CallActiveTxVideoBytes=cvH320CallActiveTxVideoBytes, ciscoH320DialControlMIBConformance=ciscoH320DialControlMIBConformance, ciscoH320DialControlMIBObjects=ciscoH320DialControlMIBObjects, ciscoH320DialControlMIBCompliances=ciscoH320DialControlMIBCompliances, cvH320CallHistoryH320CallType=cvH320CallHistoryH320CallType, ciscoH320DialControlMIBGroups=ciscoH320DialControlMIBGroups, cvH320CallHistoryTxVideoCodec=cvH320CallHistoryTxVideoCodec, cvH320CallHistoryIncomingConnectionId=cvH320CallHistoryIncomingConnectionId)
|
[
"[email protected]"
] | |
d4380185dc4b6ddf9ad0d4f353b0a695e968d5d4
|
90419da201cd4948a27d3612f0b482c68026c96f
|
/sdk/python/pulumi_azure_nextgen/consumption/v20180131/outputs.py
|
c66a207304ca209166f50facce58d6ca57713144
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
test-wiz-sec/pulumi-azure-nextgen
|
cd4bee5d70cb0d332c04f16bb54e17d016d2adaf
|
20a695af0d020b34b0f1c336e1b69702755174cc
|
refs/heads/master
| 2023-06-08T02:35:52.639773 | 2020-11-06T22:39:06 | 2020-11-06T22:39:06 | 312,993,761 | 0 | 0 |
Apache-2.0
| 2023-06-02T06:47:28 | 2020-11-15T09:04:00 | null |
UTF-8
|
Python
| false | false | 7,747 |
py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'BudgetTimePeriodResponse',
'CurrentSpendResponse',
'FiltersResponse',
'NotificationResponse',
]
@pulumi.output_type
class BudgetTimePeriodResponse(dict):
"""
The start and end date for a budget.
"""
def __init__(__self__, *,
start_date: str,
end_date: Optional[str] = None):
"""
The start and end date for a budget.
:param str start_date: The start date for the budget.
:param str end_date: The end date for the budget. If not provided, we default this to 10 years from the start date.
"""
pulumi.set(__self__, "start_date", start_date)
if end_date is not None:
pulumi.set(__self__, "end_date", end_date)
@property
@pulumi.getter(name="startDate")
def start_date(self) -> str:
"""
The start date for the budget.
"""
return pulumi.get(self, "start_date")
@property
@pulumi.getter(name="endDate")
def end_date(self) -> Optional[str]:
"""
The end date for the budget. If not provided, we default this to 10 years from the start date.
"""
return pulumi.get(self, "end_date")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class CurrentSpendResponse(dict):
"""
The current amount of cost which is being tracked for a budget.
"""
def __init__(__self__, *,
amount: float,
unit: str):
"""
The current amount of cost which is being tracked for a budget.
:param float amount: The total amount of cost which is being tracked by the budget.
:param str unit: The unit of measure for the budget amount.
"""
pulumi.set(__self__, "amount", amount)
pulumi.set(__self__, "unit", unit)
@property
@pulumi.getter
def amount(self) -> float:
"""
The total amount of cost which is being tracked by the budget.
"""
return pulumi.get(self, "amount")
@property
@pulumi.getter
def unit(self) -> str:
"""
The unit of measure for the budget amount.
"""
return pulumi.get(self, "unit")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class FiltersResponse(dict):
"""
May be used to filter budgets by resource group, resource, or meter.
"""
def __init__(__self__, *,
meters: Optional[Sequence[str]] = None,
resource_groups: Optional[Sequence[str]] = None,
resources: Optional[Sequence[str]] = None):
"""
May be used to filter budgets by resource group, resource, or meter.
:param Sequence[str] meters: The list of filters on meters, mandatory for budgets of usage category.
:param Sequence[str] resource_groups: The list of filters on resource groups, allowed at subscription level only.
:param Sequence[str] resources: The list of filters on resources.
"""
if meters is not None:
pulumi.set(__self__, "meters", meters)
if resource_groups is not None:
pulumi.set(__self__, "resource_groups", resource_groups)
if resources is not None:
pulumi.set(__self__, "resources", resources)
@property
@pulumi.getter
def meters(self) -> Optional[Sequence[str]]:
"""
The list of filters on meters, mandatory for budgets of usage category.
"""
return pulumi.get(self, "meters")
@property
@pulumi.getter(name="resourceGroups")
def resource_groups(self) -> Optional[Sequence[str]]:
"""
The list of filters on resource groups, allowed at subscription level only.
"""
return pulumi.get(self, "resource_groups")
@property
@pulumi.getter
def resources(self) -> Optional[Sequence[str]]:
"""
The list of filters on resources.
"""
return pulumi.get(self, "resources")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class NotificationResponse(dict):
"""
The notification associated with a budget.
"""
def __init__(__self__, *,
contact_emails: Sequence[str],
enabled: bool,
operator: str,
threshold: float,
contact_groups: Optional[Sequence[str]] = None,
contact_roles: Optional[Sequence[str]] = None):
"""
The notification associated with a budget.
:param Sequence[str] contact_emails: Email addresses to send the budget notification to when the threshold is exceeded.
:param bool enabled: The notification is enabled or not.
:param str operator: The comparison operator.
:param float threshold: Threshold value associated with a notification. Notification is sent when the cost exceeded the threshold. It is always percent and has to be between 0 and 1000.
:param Sequence[str] contact_groups: Action groups to send the budget notification to when the threshold is exceeded.
:param Sequence[str] contact_roles: Contact roles to send the budget notification to when the threshold is exceeded.
"""
pulumi.set(__self__, "contact_emails", contact_emails)
pulumi.set(__self__, "enabled", enabled)
pulumi.set(__self__, "operator", operator)
pulumi.set(__self__, "threshold", threshold)
if contact_groups is not None:
pulumi.set(__self__, "contact_groups", contact_groups)
if contact_roles is not None:
pulumi.set(__self__, "contact_roles", contact_roles)
@property
@pulumi.getter(name="contactEmails")
def contact_emails(self) -> Sequence[str]:
"""
Email addresses to send the budget notification to when the threshold is exceeded.
"""
return pulumi.get(self, "contact_emails")
@property
@pulumi.getter
def enabled(self) -> bool:
"""
The notification is enabled or not.
"""
return pulumi.get(self, "enabled")
@property
@pulumi.getter
def operator(self) -> str:
"""
The comparison operator.
"""
return pulumi.get(self, "operator")
@property
@pulumi.getter
def threshold(self) -> float:
"""
Threshold value associated with a notification. Notification is sent when the cost exceeded the threshold. It is always percent and has to be between 0 and 1000.
"""
return pulumi.get(self, "threshold")
@property
@pulumi.getter(name="contactGroups")
def contact_groups(self) -> Optional[Sequence[str]]:
"""
Action groups to send the budget notification to when the threshold is exceeded.
"""
return pulumi.get(self, "contact_groups")
@property
@pulumi.getter(name="contactRoles")
def contact_roles(self) -> Optional[Sequence[str]]:
"""
Contact roles to send the budget notification to when the threshold is exceeded.
"""
return pulumi.get(self, "contact_roles")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
|
[
"[email protected]"
] | |
7c0ef510fdaeb63ff3582c9253c1ae34e03d982b
|
66fe6eb64afeb7313a4c7685a8748455325b6726
|
/918-maximum-sum-circular-subarray.py
|
0150e754bb0889e9f423b95cd511f16e0fef3402
|
[] |
no_license
|
anantkaushik/leetcode
|
b54eb27b17ed95b02ab426392208c346f2d87aaa
|
06f0a6dbff2e2062fa4568efa5f01ad982d6ac94
|
refs/heads/master
| 2022-03-07T18:21:35.881943 | 2022-02-23T12:27:24 | 2022-02-23T12:27:24 | 120,501,367 | 40 | 13 | null | 2019-10-11T11:07:22 | 2018-02-06T18:05:51 |
Python
|
UTF-8
|
Python
| false | false | 1,949 |
py
|
"""
Problem Link: https://leetcode.com/problems/maximum-sum-circular-subarray/
Given a circular integer array nums of length n, return the maximum possible
sum of a non-empty subarray of nums.
A circular array means the end of the array connects to the beginning of the array.
Formally, the next element of nums[i] is nums[(i + 1) % n] and the previous
element of nums[i] is nums[(i - 1 + n) % n].
A subarray may only include each element of the fixed buffer nums at most once.
Formally, for a subarray nums[i], nums[i + 1], ..., nums[j], there does not
exist i <= k1, k2 <= j with k1 % n == k2 % n.
Example 1:
Input: nums = [1,-2,3,-2]
Output: 3
Explanation: Subarray [3] has maximum sum 3
Example 2:
Input: nums = [5,-3,5]
Output: 10
Explanation: Subarray [5,5] has maximum sum 5 + 5 = 10
Example 3:
Input: nums = [3,-1,2,-1]
Output: 4
Explanation: Subarray [2,-1,3] has maximum sum 2 + (-1) + 3 = 4
Example 4:
Input: nums = [3,-2,2,-3]
Output: 3
Explanation: Subarray [3] and [3,-2,2] both have maximum sum 3
Example 5:
Input: nums = [-2,-3,-1]
Output: -1
Explanation: Subarray [-1] has maximum sum -1
Constraints:
n == nums.length
1 <= n <= 3 * 104
-3 * 104 <= nums[i] <= 3 * 104
Solution:
There two cases:
1. When max sub array is in the middle.
2. When max sub array = prefix + suffix
= total sum - middle sub array (min sub array)
Edge case: If all items are negative.
"""
class Solution:
def maxSubarraySumCircular(self, nums: List[int]) -> int:
cur_max_sum = cur_min_sum = total = 0
max_sum = min_sum = nums[0]
for num in nums:
total += num
cur_max_sum = max(cur_max_sum + num, num)
max_sum = max(max_sum, cur_max_sum)
cur_min_sum = min(cur_min_sum + num, num)
min_sum = min(min_sum, cur_min_sum)
return max(max_sum, total - min_sum) if max_sum > 0 else max_sum
|
[
"[email protected]"
] | |
63d843377be4bde15c3f3fabfa13e644d1cd9f48
|
4111ca5a73a22174f189361bef654c3f91c3b7ed
|
/Lintcode/Ladder_11_15_A/362. Sliding Window Maximum.py
|
352cc5b57065b3b2edbc26517191dbbcd1fa9897
|
[
"MIT"
] |
permissive
|
ctc316/algorithm-python
|
58b541b654509ecf4e9eb8deebfcbdf785699cc4
|
ac4580d55e05e93e407c6156c9bb801808027d60
|
refs/heads/master
| 2020-03-16T06:09:50.130146 | 2019-08-02T02:50:49 | 2019-08-02T02:50:49 | 132,548,222 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,128 |
py
|
from queue import Queue
from collections import deque
class MaxQueue:
def __init__(self, capacity):
self.capacity = capacity
self.que = Queue()
self.max_deque = deque()
def put(self, val):
if self.que.qsize() >= self.capacity:
self.pop()
self.que.put(val)
while len(self.max_deque) > 0 and val > self.max_deque[-1]:
self.max_deque.pop()
self.max_deque.append(val)
def pop(self):
val = self.que.get()
if val == self.max_deque[0]:
self.max_deque.popleft()
def getMax(self):
return self.max_deque[0]
class Solution:
"""
@param nums: A list of integers.
@param k: An integer
@return: The maximum number inside the window at each moving.
"""
def maxSlidingWindow(self, nums, k):
if k == 0 or len(nums) < k:
return []
mq = MaxQueue(k)
res = []
for i in range(k - 1):
mq.put(nums[i])
for i in range(k - 1, len(nums)):
mq.put(nums[i])
res.append(mq.getMax())
return res
|
[
"[email protected]"
] | |
a84ac7c91ffa178cf474ff9a25b396af980e9159
|
0a0efc02319e01b9393ac56e4bf144267510c148
|
/submission/Softimage/Main/SoftimageToDeadline.py
|
713f9a423a194b0596a7f491b05834492fb69944
|
[] |
no_license
|
applemin/Deadline_Development
|
791c37d44002ea6010ce45a0798ae126201f63d4
|
75ccfeaf31bc259ceb89a38e65f8f4cdc4cdb0a0
|
refs/heads/master
| 2022-04-14T01:34:19.623922 | 2020-02-19T04:16:04 | 2020-02-19T04:16:04 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 27,764 |
py
|
from __future__ import with_statement # with_statement is needed for python 2.5 compatability
import json
import os
import traceback
import win32com.client
from win32com.client import constants
import SoftimageToDeadlineFunctions
import SoftimageToDeadlineLogic
#Get handles to the xsi application
Application = win32com.client.Dispatch( 'XSI.Application' )
XSIUtils = win32com.client.Dispatch( 'XSI.Utils' )
XSIUIToolkit = win32com.client.Dispatch( 'XSI.UIToolkit' )
##-------------------------------------------------------------------------------------
## SoftimageToDeadline.py
## Thinkbox Software Inc, 2016
##
## This main script submits a Softimage scene to Deadline to be rendered.
##-------------------------------------------------------------------------------------
#Main Function Called by the proxy script
def Main():
SubmitToDeadline()
def SubmitToDeadline():
#check the version
if SoftimageToDeadlineFunctions.GetIntVersion() < 8:
Application.LogMessage("Only Softimage versions 2010 and later are supported by Deadline")
XSIUIToolkit.MsgBox("Only Softimage versions 2010 and later are supported by Deadline")
return
#save the scene first
if SoftimageToDeadlineFunctions.IsSaveRequired():
Application.LogMessage( "A save is required, saving scene..." )
try:
Application.ExecuteCommand("SaveScene",None)
except:
# Exception thrown when canceled, so just return
return
progressBar = SoftimageToDeadlineFunctions.CreateProgressBar( "Launching Submission Script", 6, False )
progressBar.Increment()
progressBar.StatusText = "Loading scene data..."
#Get the active scene and scene filename
scene = Application.ActiveProject.ActiveScene
#check how many we are going to need
passCount = 0
for currPass in scene.Passes:
passCount += 1
passesListBoxItems = [0] * (passCount * 2)
listCount = 0
for currPass in scene.Passes:
passesListBoxItems[listCount] = currPass.Name
passesListBoxItems[listCount+1] = currPass.Name
listCount += 2
passesListBoxItems.sort()
#Figure out what passes should be check by default (if any)
initPassesListValue = ";".join([currPass.name for currPass in scene.Passes if currPass.Selected])
progressBar.Increment()
progressBar.StatusText = "Getting maximum priority..."
maximumPriorityArray = SoftimageToDeadlineFunctions.GetDeadlineArray(["-getmaximumpriority",])
if(len(maximumPriorityArray) >1 and maximumPriorityArray[1] == "Error"):
Application.LogMessage("There was an error getting the maximum priority from Deadline: " + maximumPriorityArray[0])
return
try:
maximumPriority = int(maximumPriorityArray[0])
except:
maximumPriority = 100
progressBar.Increment()
progressBar.StatusText = "Loading groups..."
groupComboItems = SoftimageToDeadlineFunctions.GetDeadlineArray(["-groups",])
if(len(groupComboItems) >1 and groupComboItems[1] == "Error"):
Application.LogMessage("There was an error getting the groups from Deadline: " + groupComboItems[0])
return
progressBar.Increment()
progressBar.StatusText = "Loading pools..."
poolsComboItems = SoftimageToDeadlineFunctions.GetDeadlineArray(["-pools",])
if(len(poolsComboItems)>1 and poolsComboItems[1] == "Error"):
Application.LogMessage("There was an error getting the pools from Deadline: " + poolComboItems[0])
return
secondaryPoolsComboItems = []
secondaryPoolsComboItems.append("")
secondaryPoolsComboItems.append("")
for currPool in poolsComboItems:
secondaryPoolsComboItems.append(currPool)
progressBar.Increment()
progressBar.StatusText = "Loading initial settings..."
Application.LogMessage(str(groupComboItems))
fxTrees = Application.ActiveSceneRoot.Properties.Filter("FxTree")
index=0
size=0
for tree in fxTrees:
for op in tree.FXOperators:
if(op.Type == "FileOutputOp"):
size+=1
fxTreeOutputNodeCollection = [0]*size
for tree in fxTrees:
for op in tree.FXOperators:
if(op.Type == "FileOutputOp"):
fxTreeOutputNodeCollection[index]=(tree.Name + "." + op.Name)
index+=1
fxTreeComboItems=["",""]
if(len(fxTreeOutputNodeCollection) > 0):
size = len(fxTreeOutputNodeCollection)*2
fxTreeComboItems =[0]*size
index=0
for fxTreeOutputNode in fxTreeOutputNodeCollection:
fxTreeComboItems[index] = fxTreeOutputNode
fxTreeComboItems[index+1] = fxTreeOutputNode
index+=2
#Check if any passes use redshift as the renderer.
isRedshift = False
for currPass in scene.Passes:
if SoftimageToDeadlineFunctions.IsRedshift(currPass):
isRedshift = True
break
buildValue = "None"
if(XSIUtils.Is64BitOS()):
buildValue = "64bit"
else:
buildValue = "32bit"
# (key, value) => (variable, (UI name, default value, type, save))
settingsDict = {
"defaultJobName" : ["JobNameTextBox", Application.ActiveProject.ActiveScene, 'str', False],
"defaultComment" : ["CommentTextBox", "", 'str', True],
"defaultDepartment" : ["DepartmentTextBox", "", 'str', True],
"defaultPool" : ["PoolComboBox", SoftimageToDeadlineFunctions.GetDefaultItem(poolsComboItems,"none"), 'str', True],
"defaultSecondaryPool" : ["SecondaryPoolComboBox", SoftimageToDeadlineFunctions.GetDefaultItem(secondaryPoolsComboItems,""), 'str', True],
"defaultGroup" : ["GroupComboBox", SoftimageToDeadlineFunctions.GetDefaultItem(groupComboItems,"none"), 'str', True],
"defaultPriority" : ["PriorityNumeric", 50, 'int', True],
"defaultConcurrentTasks" : ["ConcurrentTasksNumeric", 1, 'int', True],
"defaultMachineLimit" : ["MachineLimitNumeric", 0, 'int', True],
"defaultSlaveTimeout" : ["SlaveTimeoutNumeric", 0, 'int', True],
"defaultAutoTimeout" : ["AutoTimeout", 0, 'bool', True],
"defaultLimitGroups" : ["LimitGroupsTextBox", "", 'str', True],
"defaultDependencies" : ["DependenciesTextBox", "", 'str', True],
"defaultMachineList" : ["MachineListTextBox", "", 'str', True],
"defaultIsBlacklist" : ["IsBlacklist", False, 'bool', True],
"defaultSuspended" : ["SubmitSuspended", False, 'bool', True],
"defaultOnComplete" : ["OnCompleteComboBox", "Nothing", 'str', False],
"defaultChunkSize" : ["ChunkSizeNumeric", 1, 'int', True],
"defaultWorkgroup" : ["WorkgroupFolder", SoftimageToDeadlineFunctions.GetWorkgroupPath(), 'str', False],
"defaultBuild" : ["BuildComboBox", buildValue, 'str', True],
"defaultThreads" : ["ThreadsNumeric", 0, 'int', True],
"defaultSubmitScene" : ["SubmitXsiSceneCheckBox", False, 'bool', True],
"defaultBatch" : ["BatchBox", True, 'bool', True],
"defaultLocalRendering" : ["LocalRenderingBox", False, 'bool', True],
"defaultSkipBatchLicense" : ["SkipBatchLicenseBox", False, 'bool', True],
"defaultRedshiftGPUsPerTask" : ["RedshiftGPUsPerTaskNumeric", 0, 'int', False],
"defaultRedshiftGPUsSelectDevices" : ["RedshiftGPUsSelectDevicesBox", "", 'str', False],
"defaultOverridePasses" : ["OverridePasses", False, 'bool', True],
"defaultRenderAll" : ["RenderAllPasses", False, 'bool', True],
"defaultPassesList" : ["PassesListToRender", initPassesListValue, 'str', True],
"defaultRegion" : ["RegionRenderingCheckBox", False, 'bool', True],
"defaultRegionX" : ["RegionXNumeric", 2, 'int', True],
"defaultRegionY" : ["RegionYNumeric", 2, 'int', True],
"defaultRegionError" : ["RegionErrorOnMissingCheckBox", True, 'bool', True],
"defaultRegionLeft" : ["RegionLeft", 0, 'int', True],
"defaultRegionTop" : ["RegionTop", 0, 'int', True],
"defaultRegionRight" : ["RegionRight", 0, 'int', True],
"defaultRegionBottom" : ["RegionBottom", 0, 'int', True],
"defaultRegionSingleJob" : ["RegionSingleJobCheckBox", 0, 'bool', True],
"defaultRegionAssemblyJob" : ["RegionAssemblyJobCheckBox", 1, 'bool', True],
"defaultRegionCleanupJob" : ["RegionCleanupJobCheckBox", 0, 'bool', True],
"defaultFXTree" : ["FxTreeCheckBox", False, 'bool', True],
"defaultFXValue" : ["FxTreeComboBox", "", 'str', True],
"defaultFXOffset" : ["FxTreeFrameOffsetNumeric", 0, 'int', True]
}
opSet = Application.ActiveSceneRoot.Properties("SubmitSoftimageToDeadline")
# Persist submission settings within current scene
if opSet:
for key in settingsDict:
# if this setting is to be updated
shouldUpdate = settingsDict[key][3]
if shouldUpdate:
value = SoftimageToDeadlineFunctions.GetOpSetValue( opSet, settingsDict[key][0], settingsDict[key][1] )
settingType = settingsDict[key][2]
if settingType == 'int':
value = int(value)
elif settingType == 'bool':
value = bool(value)
settingsDict[key][1] = value
opSet = None
Application.ExecuteCommand("DeleteObj",[str(Application.ActiveSceneRoot) + ".SubmitSoftimageToDeadline"])
# For the very first submission, attempt to load in stickySettings from last scene
else:
try:
settingsPath = SoftimageToDeadlineFunctions.GetDeadlineLine(["-GetCurrentUserHomeDirectory"], False)
with open(os.path.join(settingsPath, "settings", "softimageSticky.json"), 'r') as stickySettingsFile:
stickySettings = json.load(stickySettingsFile)
for key in stickySettings:
settingsDict[key][1] = stickySettings[key]
# If file does not yet exist, catch the exception (or some other exception that may occur)
except Exception as e:
Application.LogMessage( str(e) )
if settingsDict['defaultPriority'][1] > maximumPriority:
settingsDict['defaultPriority'][1] = (maximumPriority / 2)
opSet = Application.ActiveSceneRoot.AddProperty("CustomProperty",False,"SubmitSoftimageToDeadline")
opSet.AddParameter3("JobNameTextBox", constants.siString, settingsDict['defaultJobName'][1], False, False)
opSet.AddParameter3("CommentTextBox", constants.siString, settingsDict['defaultComment'][1], False, False)
opSet.AddParameter3("DepartmentTextBox", constants.siString, settingsDict['defaultDepartment'][1], False, False)
#Render info parameters
opSet.AddParameter3("PoolComboBox",constants.siString, SoftimageToDeadlineFunctions.GetDefaultItem(poolsComboItems, settingsDict['defaultPool'][1]), False, False)
opSet.AddParameter3("SecondaryPoolComboBox",constants.siString, SoftimageToDeadlineFunctions.GetDefaultItem(secondaryPoolsComboItems, settingsDict['defaultSecondaryPool'][1]), False, False)
opSet.AddParameter3("GroupComboBox",constants.siString,SoftimageToDeadlineFunctions.GetDefaultItem(groupComboItems, settingsDict['defaultGroup'][1]), False, False)
opSet.AddParameter3("PriorityNumeric", constants.siInt2, settingsDict['defaultPriority'][1], 0, maximumPriority, False, False)
opSet.AddParameter3("ConcurrentTasksNumeric", constants.siInt2, settingsDict['defaultConcurrentTasks'][1], 1, 16, False, False)
opSet.AddParameter3("MachineLimitNumeric", constants.siInt2, settingsDict['defaultMachineLimit'][1], 0, 1000, False, False)
opSet.AddParameter3("SlaveTimeoutNumeric", constants.siInt2, settingsDict['defaultSlaveTimeout'][1], 0, 5000, False, False)
opSet.AddParameter3("AutoTimeout", constants.siBool, settingsDict['defaultAutoTimeout'][1], False, False, False)
opSet.AddParameter3("LimitGroupsTextBox", constants.siString, settingsDict['defaultLimitGroups'][1], False, False)
opSet.AddParameter3("DependenciesTextBox", constants.siString, settingsDict['defaultDependencies'][1], False, False)
opSet.AddParameter3("MachineListTextBox", constants.siString, settingsDict['defaultMachineList'][1], False, False)
opSet.AddParameter3("IsBlacklist", constants.siBool, settingsDict['defaultIsBlacklist'][1], False, False, False)
opSet.AddParameter3("SubmitSuspended",constants.siBool, settingsDict['defaultSuspended'][1], False, False, False)
opSet.AddParameter3("OnCompleteComboBox", constants.siString, settingsDict['defaultOnComplete'][1], False, False)
#XSI Info Params
frameStr = SoftimageToDeadlineFunctions.GetFrameRange(scene.ActivePass)
if not SoftimageToDeadlineFunctions.GetCreateMovie(scene.ActivePass):
frameStr = frameStr + str(SoftimageToDeadlineFunctions.GetFrameStep(scene.ActivePass))
singleFrame = SoftimageToDeadlineFunctions.GetCurrentFrame()
opSet.AddParameter3("Frames",constants.siString,frameStr, False, False)
opSet.AddParameter3("ChunkSizeNumeric", constants.siInt2, settingsDict['defaultChunkSize'][1], 1, 1000, False, False)
opSet.AddParameter3("WorkgroupFolder", constants.siString, settingsDict['defaultWorkgroup'][1], False, False)
opSet.AddParameter3("BuildComboBox", constants.siString, settingsDict['defaultBuild'][1], False, False)
opSet.AddParameter3("ThreadsNumeric", constants.siInt2, settingsDict['defaultThreads'][1], 0, 256, False, False)
opSet.AddParameter3("SubmitXsiSceneCheckBox", constants.siBool, settingsDict['defaultSubmitScene'][1], False, False, False)
opSet.AddParameter3("BatchBox",constants.siBool, settingsDict['defaultBatch'][1], False, False, False)
opSet.AddParameter3("LocalRenderingBox",constants.siBool, settingsDict['defaultLocalRendering'][1], False, False, False)
opSet.AddParameter3("SkipBatchLicenseBox",constants.siBool, settingsDict['defaultSkipBatchLicense'][1], False, False, False)
opSet.AddParameter3("RedshiftGPUsPerTaskNumeric", constants.siInt2, settingsDict['defaultRedshiftGPUsPerTask'][1], 0, 16, False, False)
opSet.AddParameter3("RedshiftGPUsSelectDevicesBox", constants.siString, settingsDict['defaultRedshiftGPUsSelectDevices'][1], False, False)
#XSI PASSES
opSet.AddParameter3("OverridePasses",constants.siBool, settingsDict['defaultOverridePasses'][1], False, False, False)
opSet.AddParameter3("RenderAllPasses",constants.siBool, settingsDict['defaultRenderAll'][1], False, False, False)
opSet.AddParameter3("PassesListToRender", constants.siString, "", False, False)
opSet.Parameters("PassesListToRender").Value = settingsDict['defaultPassesList'][1]
#Region Rendering Parameters
opSet.AddParameter3("RegionRenderingCheckBox",constants.siBool, settingsDict['defaultRegion'][1], False, False, False)
opSet.AddParameter3("RegionXNumeric", constants.siInt4, settingsDict['defaultRegionX'][1], 1, 20, False, False)
opSet.AddParameter3("RegionYNumeric", constants.siInt4, settingsDict['defaultRegionY'][1], 1, 20, False, False)
opSet.AddParameter3("RegionSingleJobCheckBox", constants.siBool, settingsDict['defaultRegionSingleJob'][1], False, False, False)
opSet.AddParameter3("RegionSingleFrameBox", constants.siInt4, singleFrame, -100000, 100000, False, False)
opSet.AddParameter3("RegionAssemblyJobCheckBox", constants.siBool, settingsDict['defaultRegionAssemblyJob'][1], False, False, False)
opSet.AddParameter3("RegionCleanupJobCheckBox", constants.siBool, settingsDict['defaultRegionCleanupJob'][1], False, False, False)
opSet.AddParameter3("RegionErrorOnMissingCheckBox",constants.siBool, settingsDict['defaultRegionError'][1], False, False, False)
opSet.AddParameter3("RegionLeft", constants.siInt4, settingsDict['defaultRegionLeft'][1], 0, 1000000, False, False)
opSet.AddParameter3("RegionTop", constants.siInt4, settingsDict['defaultRegionTop'][1], 0, 1000000, False, False)
opSet.AddParameter3("RegionRight", constants.siInt4, settingsDict['defaultRegionRight'][1], 0, 1000000, False, False)
opSet.AddParameter3("RegionBottom", constants.siInt4, settingsDict['defaultRegionBottom'][1], 0, 1000000, False, False)
opSet.AddParameter3("RegionSingleLeft", constants.siString, "", False, False)
opSet.AddParameter3("RegionSingleTop", constants.siString, "", False, False)
opSet.AddParameter3("RegionSingleRight", constants.siString, "", False, False)
opSet.AddParameter3("RegionSingleBottom", constants.siString, "", False, False)
opSet.AddParameter3("RegionSinglePrefix", constants.siString, "", False, False)
opSet.AddParameter3("RegionSingleTiles", constants.siInt4, settingsDict['defaultRegionLeft'][1], 0, 1000000, False, False)
opSet.AddParameter3("FxTreeCheckBox", constants.siBool, settingsDict['defaultFXTree'][1], False, False, False)
opSet.AddParameter3("FxTreeComboBox", constants.siString, settingsDict['defaultFXValue'][1], False, False)
opSet.AddParameter3("FxTreeFrameOffsetNumeric", constants.siInt4, settingsDict['defaultFXOffset'][1], -10, 10, False, False)
opSet.AddParameter3("integrationSettingsPath",constants.siString, None, False, False)
opSet.AddParameter3("extraKVPIndex",constants.siString, None, False, False)
opSet.AddParameter3("batchMode",constants.siString, None, False, False)
#script filename
scriptFilename = SoftimageToDeadlineFunctions.GetRepositoryPath("submission/Softimage/Main")
opSet.AddParameter3("ScriptFilename", constants.siString, scriptFilename, False, False)
#Run the sanity check script if it exists, this can be used to change some initial values
sanityCheckFile = os.path.join( scriptFilename, "CustomSanityChecks.py" )
if os.path.isfile( sanityCheckFile ):
Application.LogMessage( "Running sanity check script: " + sanityCheckFile )
try:
import CustomSanityChecks
sanityResult = CustomSanityChecks.RunSanityCheck( opSet )
if not sanityResult:
Application.LogMessage( "Sanity check returned false, exiting" )
progressBar.Visible = False
return
except:
Application.LogMessage( "Could not run CustomSanityChecks.py script: " + traceback.format_exc() )
#Set up the layout of the dialog
oPPGLayout = opSet.PPGLayout
##################
oPPGLayout.Clear()
oPPGLayout.AddTab("Submission Options")
#Job Info
oPPGLayout.AddGroup("Job Description", True)
oPPGLayout.AddRow()
oPPGLayout.AddItem("JobNameTextBox","Job Name", constants.siControlString)
oPPGLayout.AddButton("SetJobNameButton", "< Scene")
oPPGLayout.EndRow()
oPPGLayout.AddItem("CommentTextBox","Comment", constants.siControlString)
oPPGLayout.AddItem("DepartmentTextBox","Department",constants.siControlString)
oPPGLayout.EndGroup()
#Render Info
oPPGLayout.AddGroup("Job Scheduling", True)
oPPGLayout.AddEnumControl("PoolComboBox", poolsComboItems, "Pool", constants.siControlCombo)
oPPGLayout.AddEnumControl("SecondaryPoolComboBox", secondaryPoolsComboItems, "Secondary Pool", constants.siControlCombo)
oPPGLayout.AddEnumControl("GroupComboBox",groupComboItems,"Group",constants.siControlCombo)
oPPGLayout.AddItem("PriorityNumeric", "Priority", constants.siControlNumber)
oPPGLayout.AddItem("ConcurrentTasksNumeric", "Concurrent Tasks", constants.siControlNumber)
oPPGLayout.AddItem("MachineLimitNumeric", "Machine Limit", constants.siControlNumber)
oPPGLayout.AddItem("SlaveTimeoutNumeric", "Task Timeout", constants.siControlNumber)
oPPGLayout.AddItem("AutoTimeout", "Enable Auto Timeout", constants.siControlBoolean)
oPPGLayout.AddRow()
limitsButton = oPPGLayout.AddButton("LimitGroupsButton", "Limits")
limitsButton.SetAttribute( constants.siUICX, 140 )
limitsTextBox = oPPGLayout.AddItem("LimitGroupsTextBox", " ", constants.siControlString)
limitsTextBox.SetAttribute( constants.siUINoLabel, True )
oPPGLayout.EndRow()
oPPGLayout.AddRow()
dependenciesButton = oPPGLayout.AddButton("DependenciesButton", "Dependencies")
dependenciesButton.SetAttribute( constants.siUICX, 140 )
dependenciesTextBox = oPPGLayout.AddItem ("DependenciesTextBox", " ", constants.siControlString)
dependenciesTextBox.SetAttribute( constants.siUINoLabel, True )
oPPGLayout.EndRow()
oPPGLayout.AddRow()
machineListButton = oPPGLayout.AddButton("MachineListButton", "Machine List")
machineListButton.SetAttribute( constants.siUICX, 140 )
machineListTextBox = oPPGLayout.AddItem ("MachineListTextBox", " ", constants.siControlString)
machineListTextBox.SetAttribute( constants.siUINoLabel, True )
oPPGLayout.EndRow()
oPPGLayout.AddItem ("IsBlacklist", "Machine List is a Blacklist", constants.siControlBoolean)
oPPGLayout.AddEnumControl("OnCompleteComboBox",( "Nothing", "Nothing", "Archive", "Archive", "Delete", "Delete" ),"On Complete",constants.siControlCombo)
oPPGLayout.AddItem ("SubmitSuspended", "Submit Job As Suspended", constants.siControlBoolean)
oPPGLayout.EndGroup()
#XSI INFO
oPPGLayout.AddGroup("Softimage Options", True)
oPPGLayout.AddItem("Frames", "Frame List", constants.siControlString)
oPPGLayout.AddItem("OverridePasses", "Ignore Per Pass Frame List (Use Frame List)", constants.siControlBoolean)
oPPGLayout.AddItem("ChunkSizeNumeric", "Group Size", constants.siControlNumber)
oPPGLayout.AddItem("WorkgroupFolder", "Workgroup", constants.siControlFolder)
oPPGLayout.AddRow()
oPPGLayout.AddEnumControl("BuildComboBox", ( "None", "None", "32bit", "32bit", "64bit", "64bit" ), "Force Build", constants.siControlCombo)
oPPGLayout.AddItem("SubmitXsiSceneCheckBox", "Submit Softimage Scene File", constants.siControlBoolean)
oPPGLayout.EndRow()
oPPGLayout.AddRow()
oPPGLayout.AddItem("ThreadsNumeric", "Threads", constants.siControlNumber)
oPPGLayout.AddItem("BatchBox","Use Softimate Batch Plugin", constants.siControlBoolean)
oPPGLayout.EndRow()
oPPGLayout.AddItem("LocalRenderingBox","Enable Local Rendering", constants.siControlBoolean)
oPPGLayout.AddItem("SkipBatchLicenseBox","Skip Batch Licensing Check (non-MentalRay renders only)", constants.siControlBoolean)
#Only show this option if at least one of the passes has Redshift as the current renderer.
if isRedshift:
oPPGLayout.AddItem("RedshiftGPUsPerTaskNumeric", "GPUs Per Task (Redshift only)", constants.siControlNumber)
oPPGLayout.AddItem("RedshiftGPUsSelectDevicesBox", "Select GPU Devices (Redshift only)", constants.siControlString)
oPPGLayout.EndGroup()
#Buttons (Job Options Tab)
oPPGLayout.AddRow()
oPPGLayout.AddButton("IntegrationButton", "Pipeline Tools")
oPPGLayout.AddButton("SubmitButton", "Submit To Deadline")
oPPGLayout.AddButton("CloseButton", "Close Dialog")
oPPGLayout.AddButton("ResetButton", "Close Dialog And Reset Options")
oPPGLayout.EndRow()
oPPGLayout.AddTab("Passes To Render")
#XSI Passes
oPPGLayout.AddGroup("Passes To Render (current pass is used if none are selected)", True)
oPPGLayout.AddItem("RenderAllPasses", "Render All " + str(passCount) + " Passes", constants.siControlBoolean)
passesList = oPPGLayout.AddItem("PassesListToRender", "Select Passes", constants.siControlListBox)
passesList.SetAttribute( constants.siUICY, 424 )
passesList.SetAttribute( constants.siUIMultiSelectionListBox, True )
passesList.UIItems = passesListBoxItems
oPPGLayout.EndGroup()
#Buttons (Passes Tab)
oPPGLayout.AddRow()
oPPGLayout.AddButton("IntegrationButton", "Pipeline Tools")
oPPGLayout.AddButton("SubmitButton", "Submit To Deadline")
oPPGLayout.AddButton("CloseButton", "Close Dialog")
oPPGLayout.AddButton("ResetButton", "Close Dialog And Reset Options")
oPPGLayout.EndRow()
oPPGLayout.AddTab("Tile Rendering")
# Region Rendering
oPPGLayout.AddGroup("Tile Rendering", True)
oPPGLayout.AddItem("RegionRenderingCheckBox", "Enable Tile Rendering", constants.siControlBoolean)
oPPGLayout.AddItem("RegionXNumeric", "Tiles in X", constants.siControlNumber)
oPPGLayout.AddItem("RegionYNumeric", "Tiles in Y", constants.siControlNumber)
oPPGLayout.EndGroup()
oPPGLayout.AddGroup("Single Job Tile Rendering", True)
oPPGLayout.AddItem("RegionSingleJobCheckBox", "Submit All Tiles As A Single Job", constants.siControlBoolean)
oPPGLayout.AddItem("RegionSingleFrameBox", "Single Job Frame", constants.siControlNumber)
oPPGLayout.AddItem("RegionAssemblyJobCheckBox", "Submit Dependent Assembly Job", constants.siControlBoolean)
oPPGLayout.AddItem("RegionCleanupJobCheckBox", "Cleanup Tile Files After Assembly Job Completes", constants.siControlBoolean)
oPPGLayout.AddItem("RegionErrorOnMissingCheckBox", "Error On Missing Tiles", constants.siControlBoolean)
oPPGLayout.EndGroup()
#Buttons (Tile Rendering Tab)
oPPGLayout.AddRow()
oPPGLayout.AddButton("IntegrationButton", "Pipeline Tools")
oPPGLayout.AddButton("SubmitButton", "Submit To Deadline")
oPPGLayout.AddButton("CloseButton", "Close Dialog")
oPPGLayout.AddButton("ResetButton", "Close Dialog And Reset Options")
oPPGLayout.EndRow()
oPPGLayout.AddTab("FxTree Rendering")
#FxTree Rendering
oPPGLayout.AddGroup("FxTree Rendering", True)
oPPGLayout.AddItem("FxTreeCheckBox", "Submit An FxTree Render Job (ignores Passes and Tile Rendering options)", constants.siControlBoolean)
oPPGLayout.AddEnumControl("FxTreeComboBox", fxTreeComboItems, "FxTree Output", constants.siControlCombo)
oPPGLayout.AddItem("FxTreeFrameOffsetNumeric", "Frame Offset", constants.siControlNumber)
oPPGLayout.EndGroup()
#Buttons (FxTree Tab)
oPPGLayout.AddRow()
oPPGLayout.AddButton("IntegrationButton", "Pipeline Tools")
oPPGLayout.AddButton("SubmitButton", "Submit To Deadline")
oPPGLayout.AddButton("CloseButton", "Close Dialog")
oPPGLayout.AddButton("ResetButton", "Close Dialog And Reset Options")
oPPGLayout.EndRow()
#Use get script file name to get the full path then just change the script
script = os.path.join( scriptFilename, "SoftimageToDeadlineLogic.py" )
Application.LogMessage("Script file: " + script)
if(os.path.exists(script)):
textStream = open(script,"r")
logic = textStream.read()
oPPGLayout.Logic = logic
textStream.close()
else:
Application.LogMessage("Script Logic File Not Found")
progressBar.Increment()
progressBar.StatusText = "Finished"
progressBar.Visible = False
#oPPGLayout.Language = "Python"
oPPGLayout.Language = "pythonscript"
oView = Application.Desktop.ActiveLayout.CreateView( "Property Panel", "DeadlineProperties" )
oView.BeginEdit()
oView.Move(10, 10)
if isRedshift:
oView.Resize(580, 735)
else:
oView.Resize(580, 695)
oView.SetAttributeValue("targetcontent", opSet.FullName)
oView.EndEdit()
#Read in the button logic from another script
#######################################################################################################
## Uncomment the following line when debugging this script from within Softimage's script editor.
#######################################################################################################
#SubmitToDeadline()
|
[
"[email protected]"
] | |
ab6d7110652b2747b17988bb0bd3149367742ad8
|
2ebd0bd42a0925fa9cbc913f9db0509d37aa0123
|
/src/main.py
|
b34001c23d38c7833135a9f3c5213ad1884b1f18
|
[] |
no_license
|
oknuutti/reposenet
|
0965c12731990453cd56b40e3ec629460c860864
|
e20050a3dc5f90e5c86782d92eaf03538f83540d
|
refs/heads/master
| 2020-05-24T08:31:43.963222 | 2019-05-19T20:57:21 | 2019-05-19T20:57:21 | 187,186,148 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 19,140 |
py
|
import argparse
import random
import shutil
import os
import time
import csv
import sys
import numpy as np
from PIL import Image
import torch
from torch.backends import cudnn
from torch.utils.data import DataLoader
import torchvision
from posenet import PoseNet, PoseDataset
# random seed used
RND_SEED = 10
# for my own convenience
DEFAULT_DATA_DIR = 'd:\\projects\\densepose\\data\\cambridge\\StMarysChurch'
DEFAULT_CACHE_DIR = 'd:\\projects\\densepose\\data\\models'
DEFAULT_OUTPUT_DIR = os.path.join(os.path.dirname(__file__), '..', 'output')
# Basic structure inspired by https://github.com/pytorch/examples/blob/master/imagenet/main.py
model_names = sorted(name for name in torchvision.models.__dict__
if name.islower() and not name.startswith("__")
and callable(torchvision.models.__dict__[name]))
parser = argparse.ArgumentParser(description='PyTorch PoseNet Training')
parser.add_argument('--data', '-d', metavar='DIR', default=DEFAULT_DATA_DIR,
help='path to dataset')
parser.add_argument('--cache', metavar='DIR', default=DEFAULT_CACHE_DIR,
help='path to cache dir')
parser.add_argument('--output', metavar='DIR', default=DEFAULT_OUTPUT_DIR,
help='path to output dir')
parser.add_argument('--arch', '-a', metavar='ARCH', default='googlenet',
choices=model_names,
help='model architecture: ' +
' | '.join(model_names) +
' (default: googlenet)')
parser.add_argument('-n', '--features', default=2048, type=int, metavar='N',
help='number of localization features (default: 2048)')
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('--epochs', default=1500, type=int, metavar='N',
help='number of total epochs to run (default: 1500)')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('-b', '--batch-size', default=8, type=int,
metavar='N', help='mini-batch size (default: 8)')
parser.add_argument('--lr', '--learning-rate', default=1e-4, type=float,
metavar='LR', help='initial learning rate (default: 1e-4)')
parser.add_argument('--optimizer', '-o', default='adam', type=str, metavar='OPT',
help='optimizer, only [adam] currently available', choices=('adam',))
parser.add_argument('--weight-decay', '--wd', default=0.0005, type=float,
metavar='W', help='weight decay (default: 0)')
parser.add_argument('--dropout', '--do', default=0, type=float,
metavar='R', help='dropout ratio (default: 0)')
parser.add_argument('--loss', default='L1', type=str,
metavar='L', help='loss metric [L1|MSE] (default: L1)')
parser.add_argument('--beta', default=250, type=float,
metavar='B', help='fixed orientation loss weight, set to zero '
'to learn sx and sq instead (default: 250)')
parser.add_argument('--sx', default=0.0, type=float,
metavar='SX', help='initial position loss weight (default: 0.0)')
parser.add_argument('--sq', default=-6.0, type=float,
metavar='SQ', help='initial orientation loss weight (default: -6.0)')
parser.add_argument('--print-freq', '-p', default=10, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--test-freq', '--tf', default=1, type=int,
metavar='N', help='test frequency (default: 1)')
parser.add_argument('--save-freq', '--sf', default=10, type=int,
metavar='N', help='save frequency (default: 10)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',
help='evaluate model on validation set')
parser.add_argument('--pretrained', dest='pretrained', default=True, action='store_true',
help='use pre-trained model')
parser.add_argument('--split-opt-params', default=False, action='store_true',
help='use different optimization params for bias, weight and loss function params')
parser.add_argument('--excl-bn', default=False, action='store_true',
help='exclude batch norm params from optimization')
parser.add_argument('--adv-tr-eps', default=0, type=float, metavar='eps',
help='use adversarial training with given epsilon')
parser.add_argument('--save-adv-img', default=None, type=str, metavar='FILE',
help='save first adversarial training image to given file')
parser.add_argument('--center-crop', default=False, action='store_true',
help='use center crop instead of random crop for training')
parser.add_argument('--early-stopping', default=0, type=int, metavar='N',
help='stop training, if loss on validation set does not decrease for this many epochs')
parser.add_argument('--name', '--pid', default='', type=str, metavar='NAME',
help='experiment name for out file names')
def main():
global args
args = parser.parse_args()
os.makedirs(args.output, exist_ok=True)
# if don't call torch.cuda.current_device(), fails later with
# "RuntimeError: cuda runtime error (30) : unknown error at ..\aten\src\THC\THCGeneral.cpp:87"
torch.cuda.current_device()
use_cuda = torch.cuda.is_available() and True
device = torch.device("cuda:0" if use_cuda else "cpu")
# try to get consistent results across runs
# => currently still fails, however, makes runs a bit more consistent
_set_random_seed()
# create model
model = PoseNet(arch=args.arch, num_features=args.features, dropout=args.dropout,
pretrained=True, cache_dir=args.cache, loss=args.loss, excl_bn_affine=args.excl_bn,
beta=args.beta, sx=args.sx, sq=args.sq)
# create optimizer
# - currently only Adam supported
if args.optimizer == 'adam':
eps = 0.1
if args.split_opt_params:
new_biases, new_weights, biases, weights, others = model.params_to_optimize(split=True, excl_batch_norm=args.excl_bn)
optimizer = torch.optim.Adam([
{'params': new_biases, 'lr': args.lr * 2, 'weight_decay': 0.0, 'eps': eps},
{'params': new_weights, 'lr': args.lr, 'weight_decay': args.weight_decay, 'eps': eps},
{'params': biases, 'lr': args.lr * 2, 'weight_decay': 0.0, 'eps': eps},
{'params': weights, 'lr': args.lr, 'weight_decay': args.weight_decay, 'eps': eps},
{'params': others, 'lr': 0, 'weight_decay': 0, 'eps': eps},
])
else:
params = model.params_to_optimize(excl_batch_norm=args.excl_bn)
optimizer = torch.optim.Adam(params, lr=args.lr, weight_decay=args.weight_decay, eps=eps)
else:
assert False, 'Invalid optimizer: %s' % args.optimizer
# optionally resume from a checkpoint
best_loss = float('inf')
best_epoch = -1
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch']
best_epoch = checkpoint['best_epoch']
best_loss = checkpoint['best_loss']
model.load_state_dict(checkpoint['model'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
quit()
# define overall training dataset, set output normalization, load model to gpu
all_tr_data = PoseDataset(args.data, 'dataset_train.txt', random_crop=not args.center_crop)
model.set_target_transform(all_tr_data.target_mean, all_tr_data.target_std)
model.to(device)
# split overall training data to training and validation sets
# validation set is used for early stopping, or possibly in future for hyper parameter optimization
lengths = [round(len(all_tr_data) * 0.75), round(len(all_tr_data) * 0.25)]
tr_data, val_data = torch.utils.data.random_split(all_tr_data, lengths)
# define data loaders
train_loader = DataLoader(tr_data, batch_size=args.batch_size, num_workers=args.workers,
shuffle=True, pin_memory=True, worker_init_fn=_worker_init_fn)
val_loader = DataLoader(val_data, batch_size=args.batch_size, num_workers=args.workers,
shuffle=False, pin_memory=True, worker_init_fn=_worker_init_fn)
test_loader = DataLoader(PoseDataset(args.data, 'dataset_test.txt', random_crop=False),
batch_size=args.batch_size, num_workers=args.workers,
shuffle=False, pin_memory=True, worker_init_fn=_worker_init_fn)
# evaluate model only
if args.evaluate:
validate(test_loader, model)
return
# training loop
for epoch in range(args.start_epoch, args.epochs):
# train for one epoch
lss, pos, ori = process(train_loader, model, optimizer, epoch, device, adv_tr_eps=args.adv_tr_eps)
stats = np.zeros(16)
stats[:6] = [epoch, lss.avg, pos.avg, pos.median, ori.avg, ori.median]
# evaluate on validation set
if (epoch+1) % args.test_freq == 0:
lss, pos, ori = validate(val_loader, model, device)
stats[6:11] = [lss.avg, pos.avg, pos.median, ori.avg, ori.median]
# remember best loss and save checkpoint
is_best = lss.avg < best_loss
best_epoch = epoch if is_best else best_epoch
best_loss = lss.avg if is_best else best_loss
# save best model
if is_best:
_save_checkpoint({
'epoch': epoch + 1,
'best_epoch': best_epoch,
'best_loss': best_loss,
'arch': args.arch,
'model': model.state_dict(),
'optimizer': optimizer.state_dict(),
}, True)
else:
is_best = False
# maybe save a checkpoint even if not best model
if (epoch+1) % args.save_freq == 0 and not is_best:
_save_checkpoint({
'epoch': epoch + 1,
'best_epoch': best_epoch,
'best_loss': best_loss,
'arch': args.arch,
'model': model.state_dict(),
'optimizer': optimizer.state_dict(),
}, False)
# evaluate on test set if best yet result on validation set
if is_best:
lss, pos, ori = validate(test_loader, model, device)
stats[11:] = [lss.avg, pos.avg, pos.median, ori.avg, ori.median]
# add row to log file
_save_log(stats, epoch == 0)
# early stopping
if args.early_stopping > 0 and epoch - best_epoch >= args.early_stopping:
print('=====\nEARLY STOPPING CRITERION MET (%d epochs since best validation loss)' % args.early_stopping)
break
print('=====\n')
if epoch+1 == args.epochs:
print('MAX EPOCHS (%d) REACHED' % args.epochs)
print('BEST VALIDATION LOSS: %.3f' % best_loss)
def process(loader, model, optimizer, epoch, device, validate_only=False, adv_tr_eps=0):
data_time = Meter()
batch_time = Meter()
losses = Meter()
positions = Meter(median=True)
orientations = Meter(median=True)
if validate_only:
# switch to evaluate mode
model.eval()
else:
# switch to train mode
model.train()
end = time.time()
for i, (input, target) in enumerate(loader):
input = input.to(device, non_blocking=True)
target = target.to(device, non_blocking=True)
# measure elapsed data loading time
data_time.update(time.time() - end)
end = time.time()
if adv_tr_eps > 0:
input.requires_grad = True
# compute output
output = model(input)
loss = model.cost(output, target)
# measure accuracy and record loss
with torch.no_grad():
output = output[0] if isinstance(output, (list, tuple)) else output
pos, orient = accuracy(output, target)
positions.update(pos)
orientations.update(orient)
losses.update(loss.data)
# compute gradient and optimize params
if not validate_only:
optimizer.zero_grad()
loss.backward()
if adv_tr_eps > 0:
# adversarial training sample
alt_input = input + adv_tr_eps * input.grad.data.sign()
if args.save_adv_img and i == 0:
# maybe save that img
_save_adv_img(input, input.grad.data, alt_input)
alt_output = model(alt_input)
alt_loss = model.cost(alt_output, target)
alt_loss.backward()
optimizer.step()
# measure elapsed processing time
batch_time.update(time.time() - end)
end = time.time()
if (i+1) % args.print_freq == 0 or i+1 == len(loader):
print((('Test [{1}/{2}]' if validate_only else 'Epoch: [{0}][{1}/{2}]\t') +
' Load: {data_time.pop_recent:.3f} ({data_time.avg:.3f})\t'
' Proc: {batch_time.pop_recent:.3f} ({batch_time.avg:.3f})\t'
' Loss: {loss.pop_recent:.4f} ({loss.avg:.4f})\t'
' Pos: {pos.pop_recent:.3f} ({pos.median:.3f})\t'
' Ori: {orient.pop_recent:.3f} ({orient.median:.3f})'
' CF: ({cost_sx:.3f}, {cost_sq:.3f})').format(
epoch, i+1, len(loader), batch_time=batch_time,
data_time=data_time, loss=losses, pos=positions, orient=orientations,
cost_sx=float(model.cost_fn.sx.data), cost_sq=float(model.cost_fn.sq.data)))
return losses, positions, orientations
def validate(test_loader, model, device):
with torch.no_grad():
result = process(test_loader, model, None, None, device, validate_only=True)
return result
def accuracy(output, target):
""" Computes position and orientation accuracy """
err_pos = torch.sum((output[:, :3] - target[:, :3])**2, dim=1)**(1/2)
err_orient = _angle_between_q(output[:, 3:], target[:, 3:])
return err_pos, err_orient
def _angle_between_q(q1, q2):
# from https://github.com/hazirbas/poselstm-pytorch/blob/master/models/posenet_model.py
abs_distance = torch.clamp(torch.abs(torch.sum(q2.mul(q1), dim=1)), 0, 1)
ori_err = 2 * 180 / np.pi * torch.acos(abs_distance)
return ori_err
def _set_random_seed(seed=RND_SEED): #, fanatic=False):
# doesnt work even if fanatic & use_cuda
# if fanatic:
# # if not disabled, still some variation between runs, however, makes training painfully slow
# cudnn.enabled = False # ~double time
# if use_cuda:
# torch.cuda.manual_seed(seed)
# torch.cuda.manual_seed_all(seed)
cudnn.deterministic = True # 7% slower
cudnn.benchmark = False # also uses extra mem if True
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
def _worker_init_fn(id):
np.random.seed(RND_SEED)
def _filename_pid(filename):
ext = len(filename) - max(filename.find('.'), 0)
filename = (filename[:-ext] + '_' + args.name + filename[-ext:]) if len(args.name) > 0 else filename
return os.path.join(args.output, filename)
def _save_adv_img(orig_img, grad, adv_img):
orig_img = _scale_img(orig_img[0, :, :, :].detach().cpu().numpy().transpose((1, 2, 0)))
grad_img = _scale_img(grad.data[0, :, :, :].detach().cpu().numpy().transpose((1, 2, 0)))
adv_img = _scale_img(adv_img[0, :, :, :].detach().cpu().numpy().transpose((1, 2, 0)))
img = np.hstack((orig_img, grad_img, adv_img))
result = Image.fromarray(img)
result.save(_filename_pid(args.save_adv_img))
def _scale_img(arr):
a, b = np.min(arr), np.max(arr)
return (255*(arr - a) / (b - a)).astype('uint8')
def _save_log(stats, write_header, filename='stats.csv'):
with open(_filename_pid(filename), 'a', newline='') as fh:
w = csv.writer(fh, delimiter='\t', quotechar='"', quoting=csv.QUOTE_MINIMAL)
# maybe write header
if write_header:
w.writerow([' '.join(sys.argv)])
w.writerow(['epoch', 'tr_loss', 'tr_err_v_avg', 'tr_err_v_med', 'tr_err_q_avg', 'tr_err_q_med',
'val_loss', 'val_err_v_avg', 'val_err_v_med', 'val_err_q_avg', 'val_err_q_med',
'tst_loss', 'tst_err_v_avg', 'tst_err_v_med', 'tst_err_q_avg', 'tst_err_q_med'])
# write stats one epoch at a time
w.writerow(stats)
def _save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
torch.save(state, _filename_pid(filename))
if is_best:
shutil.copyfile(_filename_pid(filename), _filename_pid('model_best.pth.tar'))
class Meter(object):
""" Stores current values and calculates stats """
def __init__(self, median=False):
self.default_median = median
self.reset()
@property
def pop_recent(self):
if self.default_median:
val = np.median(self.recent_values)
else:
val = np.mean(self.recent_values)
self.recent_values.clear()
return val
@property
def sum(self):
return np.sum(self.values)
@property
def count(self):
return len(self.values)
@property
def avg(self):
return np.mean(self.values)
@property
def median(self):
return np.median(self.values)
def reset(self):
self.recent_values = []
self.values = []
def update(self, val):
if torch.is_tensor(val):
val = val.detach().cpu().numpy()
if isinstance(val, (list, tuple)):
val = np.array(val)
if not isinstance(val, np.ndarray):
val = np.array([val])
self.recent_values.extend(val)
self.values.extend(val)
if __name__ == '__main__':
main()
|
[
"none@none"
] |
none@none
|
146995e0e0ef52d28990b4198ac8a86ec561fe8e
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02883/s186596180.py
|
a67dc653be0d152e0a20b9113874949cc3e810b6
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 559 |
py
|
import sys
import os,io
input=io.BytesIO(os.read(0,os.fstat(0).st_size)).readline
n,k=map(int,input().split())
a=list(map(int,input().split()))
f=list(map(int,input().split()))
a.sort()
f.sort()
f.reverse()
if sum(a)<=k:
print(0)
sys.exit()
pointer=0
l=0
r=0
for i in range(n):
r=max(r,a[i]*f[i])
while l+1<r:
try1=(l+r)//2
required=0
for i in range(n):
required+=(max(0,a[i]-try1//f[i]))
if required>k:
l=try1
else:
r=try1
required=0
for i in range(n):
required+=(max(0,a[i]-l//f[i]))
if required>k:
print(r)
else:
print(l)
|
[
"[email protected]"
] | |
06ecff7cf3f3c22f2ea456bbf36099ddd67e66dd
|
9413df76327c871563d481d82e076b7869d9daf7
|
/polyaxon/layers/recurrent.py
|
a5b95dadaaac054c344140caa51e354eb5b848af
|
[
"MIT"
] |
permissive
|
cryptopascal/polyaxon
|
f03774e7407d8ee6cf1fcb2feae4785adb8f09cf
|
496837aff2da3fb0013c96b2b624aee81dfb7eba
|
refs/heads/master
| 2021-01-01T06:09:53.571485 | 2017-07-13T14:59:52 | 2017-07-13T14:59:52 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 38,502 |
py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
import abc
import logging
from collections import OrderedDict
import six
from six.moves import xrange
import numpy as np
import tensorflow as tf
import tensorflow.contrib.rnn as rnn
from tensorflow.python.ops import array_ops
from tensorflow.python.util import nest
from polyaxon.layers.core import Dropout
from polyaxon.layers.normalizations import BatchNormalization
from polyaxon.libs import getters
from polyaxon.libs.template_module import BaseLayer
from polyaxon.libs.utils import get_shape, get_variable_scope, track
from polyaxon.variables import variable
@six.add_metaclass(abc.ABCMeta)
class CoreRNN(BaseLayer):
@property
def w(self):
return self._w
@property
def b(self):
return self._b
@staticmethod
def _set_dropout(cell, mode, dropout):
"""Apply dropout to the outputs and inputs of `cell`."""
if not dropout:
return cell
if type(dropout) in [tuple, list]:
in_keep_prob, out_keep_prob = (1 - d for d in dropout)
elif isinstance(dropout, float):
in_keep_prob, out_keep_prob = 1 - dropout, 1 - dropout
else:
raise Exception('Invalid dropout type (must be a 2-D tuple of float)')
return DropoutWrapper(mode, cell, in_keep_prob, out_keep_prob)
@staticmethod
def _stack_layers(cell_fn, mode, num_layers, state_is_tuple=True):
"""Stask multiple layers of the incoming cell."""
if num_layers and num_layers > 1:
return MultiRNNCell(mode, [cell_fn() for _ in xrange(num_layers)], state_is_tuple)
return cell_fn()
def _declare_dependencies(self):
raise NotImplemented
def _build(self, incoming, *args, **kwargs):
"""
Args:
incoming: `Tensor`. 3-D Tensor [samples, timesteps, input dim].
"""
self._declare_dependencies()
sequence_length = None
if self.dynamic:
sequence_length = retrieve_seq_length_op(
incoming if isinstance(incoming, tf.Tensor) else tf.stack(incoming))
input_shape = get_shape(incoming)
inference = incoming
# If a tensor given, convert it to a per timestep list
if type(inference) not in [list, np.array]:
ndim = len(input_shape)
assert ndim >= 3, 'Input dim should be at least 3.'
axes = [1, 0] + list(xrange(2, ndim))
inference = tf.transpose(inference, (axes))
inference = tf.unstack(value=inference)
if self.dynamic:
outputs, state = tf.nn.dynamic_rnn(
cell=self._cell, inputs=inference, dtype=tf.float32,
initial_state=self.initial_state, sequence_length=sequence_length,
scope=self.module_name)
else:
outputs, state = rnn.static_rnn(
cell=self._cell, inputs=inference, dtype=tf.float32,
initial_state=self.initial_state, sequence_length=sequence_length,
scope=self.module_name)
for v in [self._cell.w, self._cell.b]:
if hasattr(v, '__len__'):
for var in v:
track(var, tf.GraphKeys.LAYER_VARIABLES, self.module_name)
else:
track(v, tf.GraphKeys.LAYER_VARIABLES, self.module_name)
track(outputs[-1], tf.GraphKeys.ACTIVATIONS, self.module_name)
if self.dynamic:
if self.return_seq:
o = outputs
else:
outputs = tf.transpose(tf.stack(outputs), [1, 0, 2])
o = advanced_indexing_op(outputs, sequence_length)
else:
o = outputs if self.return_seq else outputs[-1]
track(o, tf.GraphKeys.LAYER_TENSOR, self.module_name)
return (o, state) if self.return_state else o
class SimpleRNN(CoreRNN):
"""Simple RNN (Simple Recurrent Layer.)
Output:
if `return_seq`: 3-D Tensor [samples, timesteps, output dim].
else: 2-D Tensor [samples, output dim].
Args:
mode: `str`, Specifies if this training, evaluation or prediction. See `Modes`.
num_units: `int`, number of units for this layer.
activation: `str` (name) or `function` (returning a `Tensor`). Default: 'sigmoid'.
dropout: `tuple` of `float`: (1 - input_keep_prob, 1 - output_keep_prob). The
input and output keep probability.
num_layers: `int` how many times to stack the cell.
bias: `bool`. If True, a bias is used.
weights_init: `str` (name) or `Tensor`. Weights initialization.
return_seq: `bool`. If True, returns the full sequence instead of
last sequence output only.
return_state: `bool`. If True, returns a tuple with output and
states: (output, states).
initial_state: `Tensor`. An initial state for the RNN. This must be
a tensor of appropriate type and shape [batch_size x cell.state_size].
dynamic: `bool`. If True, dynamic computation is performed. It will not
compute RNN steps above the sequence length. Note that because TF
requires to feed sequences of same length, 0 is used as a mask.
So a sequence padded with 0 at the end must be provided. When
computation is performed, it will stop when it meets a step with
a value of 0.
trainable: `bool`. If True, weights will be trainable.
restore: `bool`. If True, this layer weights will be restored when loading a model.
name: `str`. A name for this layer (optional).
"""
def __init__(self, mode, num_units, activation='sigmoid', dropout=None, num_layers=1,
bias=True, weights_init=None, return_seq=False, return_state=False,
initial_state=None, dynamic=False, trainable=True,
restore=True, name='SimpleRNN'):
super(SimpleRNN, self).__init__(mode, name)
self.num_units = num_units
self.activation = activation
self.bias = bias
self.weights_init = weights_init
self.return_seq = return_seq
self.return_state = return_state
self.initial_state = initial_state
self.dynamic = dynamic
self.dropout = dropout
self.num_layers = num_layers
self.trainable = trainable
self.restore = restore
def _cell_fn(self):
cell = BasicRNNCell(
self.mode, num_units=self.num_units, activation=self.activation,
bias=self.bias, weights_init=self.weights_init,
trainable=self.trainable, restore=self.restore)
return self._set_dropout(cell, self.mode, self.dropout)
def _declare_dependencies(self):
self._cell = self._stack_layers(self._cell_fn, self.mode, self.num_layers)
class LSTM(CoreRNN):
"""LSTM (Long Short Term Memory Recurrent Layer).
Output:
if `return_seq`: 3-D Tensor [samples, timesteps, output dim].
else: 2-D Tensor [samples, output dim].
Args:
mode: `str`, Specifies if this training, evaluation or prediction. See `Modes`.
num_units: `int`, number of units for this layer.
activation: `str` (name) or `function` (returning a `Tensor`). Default: 'tanh'.
inner_activation: `str` (name) or `function` (returning a `Tensor`).
LSTM inner activation. Default: 'sigmoid'.
dropout: `tuple` of `float`: (1 - input_keep_prob, 1 - output_keep_prob). The
input and output keep probability.
num_layers: `int` how many times to stack the cell.
bias: `bool`. If True, a bias is used.
weights_init: `str` (name) or `Tensor`. Weights initialization.
forget_bias: `float`. Bias of the forget gate. Default: 1.0.
return_seq: `bool`. If True, returns the full sequence instead of
last sequence output only.
return_state: `bool`. If True, returns a tuple with output and
states: (output, states).
initial_state: `Tensor`. An initial state for the RNN. This must be
a tensor of appropriate type and shape [batch_size x cell.state_size].
dynamic: `bool`. If True, dynamic computation is performed. It will not
compute RNN steps above the sequence length. Note that because TF
requires to feed sequences of same length, 0 is used as a mask.
So a sequence padded with 0 at the end must be provided. When
computation is performed, it will stop when it meets a step with
a value of 0.
trainable: `bool`. If True, weights will be trainable.
restore: `bool`. If True, this layer weights will be restored when loading a model.
name: `str`. A name for this layer (optional).
References:
Long Short Term Memory, Sepp Hochreiter & Jurgen Schmidhuber,
Neural Computation 9(8): 1735-1780, 1997.
Links:
[http://deeplearning.cs.cmu.edu/pdfs/Hochreiter97_lstm.pdf]
(http://deeplearning.cs.cmu.edu/pdfs/Hochreiter97_lstm.pdf)
"""
def __init__(self, mode, num_units, activation='tanh', inner_activation='sigmoid', dropout=None,
num_layers=1, bias=True, weights_init=None, forget_bias=1.0, return_seq=False,
return_state=False, initial_state=None, dynamic=False, trainable=True,
restore=True, name='LSTM'):
super(LSTM, self).__init__(mode, name)
self.num_units = num_units
self.activation = activation
self.inner_activation = inner_activation
self.bias = bias
self.weights_init = weights_init
self.forget_bias = forget_bias
self.return_seq = return_seq
self.return_state = return_state
self.initial_state = initial_state
self.dynamic = dynamic
self.dropout = dropout
self.num_layers = num_layers
self.trainable = trainable
self.restore = restore
def _cell_fn(self):
cell = BasicLSTMCell(
self.mode, num_units=self.num_units, activation=self.activation,
inner_activation=self.inner_activation, forget_bias=self.forget_bias,
bias=self.bias, weights_init=self.weights_init, trainable=self.trainable,
restore=self.restore)
return self._set_dropout(cell, self.mode, self.dropout)
def _declare_dependencies(self):
self._cell = self._stack_layers(self._cell_fn, self.mode, self.num_layers)
class GRU(CoreRNN):
"""GRU (Gated Recurrent Unit Layer).
Output:
if `return_seq`: 3-D Tensor [samples, timesteps, output dim].
else: 2-D Tensor [samples, output dim].
Args:
mode: `str`, Specifies if this training, evaluation or prediction. See `Modes`.
num_units: `int`, number of units for this layer.
activation: `str` (name) or `function` (returning a `Tensor`). Default: 'tanh'.
inner_activation: `str` (name) or `function` (returning a `Tensor`).
GRU inner activation. Default: 'sigmoid'.
dropout: `tuple` of `float`: (1 - input_keep_prob, 1 - output_keep_prob). The
input and output keep probability.
num_layers: `int` how many times to stack the cell.
bias: `bool`. If True, a bias is used.
weights_init: `str` (name) or `Tensor`. Weights initialization.
return_seq: `bool`. If True, returns the full sequence instead of
last sequence output only.
return_state: `bool`. If True, returns a tuple with output and
states: (output, states).
initial_state: `Tensor`. An initial state for the RNN. This must be
a tensor of appropriate type and shape [batch_size x cell.state_size].
dynamic: `bool`. If True, dynamic computation is performed. It will not
compute RNN steps above the sequence length. Note that because TF
requires to feed sequences of same length, 0 is used as a mask.
So a sequence padded with 0 at the end must be provided. When
computation is performed, it will stop when it meets a step with
a value of 0.
trainable: `bool`. If True, weights will be trainable.
restore: `bool`. If True, this layer weights will be restored when loading a model.
name: `str`. A name for this layer (optional).
References:
Learning Phrase Representations using RNN Encoder–Decoder for
Statistical Machine Translation, K. Cho et al., 2014.
Links:
[http://arxiv.org/abs/1406.1078](http://arxiv.org/abs/1406.1078)
"""
def __init__(self, mode, num_units, activation='tanh', inner_activation='sigmoid',
dropout=None, num_layers=1, bias=True, weights_init=None, return_seq=False,
return_state=False, initial_state=None, dynamic=False,
trainable=True, restore=True, name='GRU'):
super(GRU, self).__init__(mode, name)
self.num_units = num_units
self.activation = activation
self.inner_activation = inner_activation
self.bias = bias
self.weights_init = weights_init
self.return_seq = return_seq
self.return_state = return_state
self.initial_state = initial_state
self.dynamic = dynamic
self.dropout = dropout
self.num_layers = num_layers
self.trainable = trainable
self.restore = restore
def _cell_fn(self):
cell = GRUCell(
self.mode, num_units=self.num_units, activation=self.activation,
inner_activation=self.inner_activation, bias=self.bias, weights_init=self.weights_init,
trainable=self.trainable, restore=self.restore)
return self._set_dropout(cell, self.mode, self.dropout)
def _declare_dependencies(self):
self._cell = self._stack_layers(self._cell_fn, self.mode, self.num_layers)
class BidirectionalRNN(BaseLayer):
"""Bidirectional RNN.
Build a bidirectional recurrent neural network, it requires 2 RNN Cells
to process sequence in forward and backward order. Any RNN Cell can be
used i.e. SimpleRNN, LSTM, GRU... with its own parameters. But the two
cells number of units must match.
Output:
if `return_seq`: 3-D Tensor [samples, timesteps, output dim].
else: 2-D Tensor Layer [samples, output dim].
Args:
mode: `str`, Specifies if this training, evaluation or prediction. See `Modes`.
rnncell_fw: `RNNCell`. The RNN Cell to use for foward computation.
rnncell_bw: `RNNCell`. The RNN Cell to use for backward computation.
return_seq: `bool`. If True, returns the full sequence instead of
last sequence output only.
return_states: `bool`. If True, returns a tuple with output and
states: (output, states).
initial_state_fw: `Tensor`. An initial state for the forward RNN.
This must be a tensor of appropriate type and shape [batch_size
x cell.state_size].
initial_state_bw: `Tensor`. An initial state for the backward RNN.
This must be a tensor of appropriate type and shape [batch_size
x cell.state_size].
dynamic: `bool`. If True, dynamic computation is performed. It will not
compute RNN steps above the sequence length. Note that because TF
requires to feed sequences of same length, 0 is used as a mask.
So a sequence padded with 0 at the end must be provided. When
computation is performed, it will stop when it meets a step with
a value of 0.
name: `str`. A name for this layer (optional).
"""
def __init__(self, mode, rnncell_fw, rnncell_bw, return_seq=False, return_states=False,
initial_state_fw=None, initial_state_bw=None, dynamic=False, name='BiRNN'):
super(BidirectionalRNN, self).__init__(mode, name)
self.rnncell_fw = rnncell_fw
self.rnncell_bw = rnncell_bw
self.return_seq = return_seq
self.return_states = return_states
self.initial_state_fw = initial_state_fw
self.initial_state_bw = initial_state_bw
self.dynamic = dynamic
def _build(self, incoming, *args, **kwargs):
"""
Args:
incoming: `Tensor`. 3-D Tensor Layer [samples, timesteps, input dim].
"""
assert (self.rnncell_fw.output_size ==
self.rnncell_bw.output_size), "RNN Cells number of units must match!"
input_shape = get_shape(incoming)
# TODO: DropoutWrapper
inference = incoming
# If a tensor given, convert it to a per timestep list
if type(inference) not in [list, np.array]:
ndim = len(input_shape)
assert ndim >= 3, 'Input dim should be at least 3.'
axes = [1, 0] + list(xrange(2, ndim))
inference = tf.transpose(inference, (axes,))
inference = tf.unstack(inference)
sequence_length = None
if self.dynamic:
sequence_length = retrieve_seq_length_op(
incoming if isinstance(incoming, tf.Tensor) else tf.stack(incoming))
outputs, states_fw, states_bw = tf.nn.bidirectional_dynamic_rnn(
cell_fw=self.rnncell_fw, cell_bw=self.rnncell_bw, inputs=inference,
initial_state_fw=self.initial_state_fw,
initial_state_bw=self.initial_state_bw,
sequence_length=sequence_length,
dtype=tf.float32)
else:
outputs, states_fw, states_bw = rnn.static_bidirectional_rnn(
cell_fw=self.rnncell_fw, cell_bw=self.rnncell_bw, inputs=inference,
initial_state_fw=self.initial_state_fw,
initial_state_bw=self.initial_state_bw,
dtype=tf.float32)
for v in [self.rnncell_fw.w, self.rnncell_fw.b, self.rnncell_bw.w, self.rnncell_bw.b]:
if hasattr(v, '__len__'):
for var in v:
track(var, tf.GraphKeys.LAYER_VARIABLES, self.module_name)
else:
track(v, tf.GraphKeys.LAYER_VARIABLES, self.module_name)
tf.add_to_collection(tf.GraphKeys.ACTIVATIONS, outputs[-1])
if self.dynamic:
if self.return_seq:
o = outputs
else:
outputs = tf.transpose(tf.stack(outputs), [1, 0, 2])
o = advanced_indexing_op(outputs, sequence_length)
else:
o = outputs if self.return_seq else outputs[-1]
track(o, tf.GraphKeys.LAYER_TENSOR, self.module_name)
return (o, states_fw, states_bw) if self.return_states else o
@six.add_metaclass(abc.ABCMeta)
class CoreRNNCell(BaseLayer, rnn.RNNCell):
@property
def w(self):
return self._w
@property
def b(self):
return self._b
def _build(self, incoming, state, *args, **kwargs):
"""Subclasses should implement their logic here."""
raise NotImplementedError
class BasicRNNCell(CoreRNNCell):
"""The most basic RNN cell with custom params.
Args:
mode: `str`, Specifies if this training, evaluation or prediction. See `Modes`.
num_units: `int`, number of units for this layer.
activation: `str` (name) or `function` (returning a `Tensor`). Default: 'tanh'.
bias: `bool`. If True, a bias is used.
weights_init: `str` (name) or `Tensor`. Weights initialization.
trainable: `bool`. If True, weights will be trainable.
restore: `bool`. If True, this layer weights will be restored when loading a model.
name: `str`. A name for this layer (optional).
"""
def __init__(self, mode, num_units, activation='tanh', bias=True, weights_init=None,
trainable=True, restore=True, name='BasicRNNCell'):
super(BasicRNNCell, self).__init__(mode, name)
self.num_units = num_units
self.activation = activation
self.bias = bias
self.weights_init = weights_init
self.trainable = trainable
self.restore = restore
@property
def state_size(self):
return self._num_units
@property
def output_size(self):
return self._num_units
def _build(self, inputs, state, *args, **kwargs):
"""Most basic RNN: output = new_state = activation(W * input + U * state + B)."""
weights_init = getters.get_initializer(self.weights_init)
output = getters.get_activation(self.activation)(
_linear([inputs, state], self.num_units, True, 0.,
weights_init, self.trainable, self.restore))
# Retrieve RNN Variables
with get_variable_scope(name='Linear', reuse=True):
self._w = tf.get_variable(name='w')
self._b = tf.get_variable(name='b')
return output, output
class GRUCell(CoreRNNCell):
"""Gated Recurrent Unit cell with custom params.
Args:
mode: `str`, Specifies if this training, evaluation or prediction. See `Modes`.
num_units: `int`, number of units for this layer.
activation: `str` (name) or `function` (returning a `Tensor`). Default: 'tanh'.
inner_activation: `str` (name) or `function` (returning a `Tensor`).
GRU inner activation. Default: 'sigmoid'.
bias: `bool`. If True, a bias is used.
weights_init: `str` (name) or `Tensor`. Weights initialization.
trainable: `bool`. If True, weights will be trainable.
restore: `bool`. If True, this layer weights will be restored when loading a model.
name: `str`. A name for this layer (optional).
"""
def __init__(self, mode, num_units, activation='tanh', inner_activation='sigmoid', bias=True,
weights_init=None, trainable=True, restore=True, name='GRUCell'):
super(GRUCell, self).__init__(mode, name)
self._num_units = num_units
self.activation = activation
self.inner_activation = inner_activation
self.bias = bias
self.weights_init = weights_init
self.trainable = trainable
self.restore = restore
@property
def state_size(self):
return self._num_units
@property
def output_size(self):
return self._num_units
def _build(self, incoming, state, *args, **kwargs):
"""Gated recurrent unit (GRU) with nunits cells."""
with get_variable_scope('Gates'): # Reset gate and update gate.
weights_init = getters.get_initializer(self.weights_init)
# We start with bias of 1.0 to not reset and not update.
r, u = array_ops.split(
axis=1, num_or_size_splits=2,
value=_linear([incoming, state], 2 * self._num_units, True, 1.0,
weights_init, self.trainable, self.restore))
inner_activation = getters.get_activation(self.inner_activation)
r, u = inner_activation(r), inner_activation(u)
with get_variable_scope('Candidate'):
activation = getters.get_activation(self.activation)
c = activation(
_linear([incoming, r * state], self._num_units, True, 0.,
weights_init, self.trainable, self.restore))
new_h = u * state + (1 - u) * c
self._w, self._b = list(), list()
# Retrieve RNN Variables
with get_variable_scope(scope='Gates/Linear', reuse=True):
self._w.append(x=tf.get_variable('w'))
self._b.append(x=tf.get_variable('b'))
with get_variable_scope(scope='Candidate/Linear', reuse=True):
self._w.append(x=tf.get_variable('w'))
self._b.append(x=tf.get_variable('b'))
return new_h, new_h
class BasicLSTMCell(CoreRNNCell):
"""Basic LSTM recurrent network cell with custo\m params.
The implementation is based on: http://arxiv.org/abs/1409.2329.
We add forget_bias (default: 1) to the biases of the forget gate in order to
reduce the scale of forgetting in the beginning of the training.
It does not allow cell clipping, a projection layer, and does not
use peep-hole connections: it is the basic baseline.
For advanced models, please use the full LSTMCell that follows.
Args:
mode: `str`, Specifies if this training, evaluation or prediction. See `Modes`.
num_units: `int`, number of units for this layer.
forget_bias: `float`. Bias of the forget gate. Default: 1.0.
state_is_tuple: If True, accepted and returned states are n-tuples, where
`n = len(cells)`. If False, the states are all
concatenated along the column axis. This latter behavior will soon be
deprecated.
activation: `str` (name) or `function` (returning a `Tensor`). Default: 'tanh'.
inner_activation: `str` (name) or `function` (returning a `Tensor`).
GRU inner activation. Default: 'sigmoid'.
bias: `bool`. If True, a bias is used.
weights_init: `str` (name) or `Tensor`. Weights initialization.
batch_norm: `bool`. If True, use batch normalization for this cell.
trainable: `bool`. If True, weights will be trainable.
restore: `bool`. If True, this layer weights will be restored when loading a model.
name: `str`. A name for this layer (optional).
"""
def __init__(self, mode, num_units, forget_bias=1.0, state_is_tuple=True, activation='tanh',
inner_activation='sigmoid', bias=True, weights_init=None,
batch_norm=False, trainable=True, restore=True, name='BasicLSTMCell'):
super(BasicLSTMCell, self).__init__(mode, name)
if not state_is_tuple:
logging.warning(
'{}: Using a concatenated state is slower and will soon be '
'deprecated. Use state_is_tuple=True.'.format(self))
self._num_units = num_units
self._forget_bias = forget_bias
self._state_is_tuple = state_is_tuple
self.batch_norm = batch_norm
self.activation = activation
self.inner_activation = inner_activation
self.bias = bias
self.weights_init = getters.get_initializer(weights_init)
self.trainable = trainable
self.restore = restore
def _declare_dependencies(self):
self._batch_norm_i = BatchNormalization(self.mode, gamma=0.1, trainable=self.trainable,
restore=self.restore)
self._batch_norm_j = BatchNormalization(self.mode, gamma=0.1, trainable=self.trainable,
restore=self.restore)
self._batch_norm_f = BatchNormalization(self.mode, gamma=0.1, trainable=self.trainable,
restore=self.restore)
self._batch_norm_o = BatchNormalization(self.mode, gamma=0.1, trainable=self.trainable,
restore=self.restore)
self._batch_norm_c = None
if self.batch_norm:
self._batch_norm_c = BatchNormalization(self.mode, gamma=0.1, trainable=self.trainable,
restore=self.restore)
@property
def state_size(self):
return (rnn.LSTMStateTuple(self._num_units, self._num_units)
if self._state_is_tuple else 2 * self._num_units)
@property
def output_size(self):
return self._num_units
def _build(self, incoming, state, *args, **kwargs):
"""Long short-term memory cell (LSTM)."""
self._declare_dependencies()
activation = getters.get_activation(self.activation)
inner_activation = getters.get_activation(self.inner_activation)
# Parameters of gates are concatenated into one multiply for efficiency.
if self._state_is_tuple:
c, h = state
else:
c, h = array_ops.split(axis=1, num_or_size_splits=2, value=state)
concat = _linear(
[incoming, h], 4 * self._num_units, True, 0., self.weights_init,
self.trainable, self.restore)
# i = input_gate, j = new_input, f = forget_gate, o = output_gate
i, j, f, o = array_ops.split(axis=1, num_or_size_splits=4, value=concat)
# apply batch normalization to inner state and gates
if self.batch_norm:
i = self._batch_norm_i(i)
j = self._batch_norm_j(j)
f = self._batch_norm_f(f)
o = self._batch_norm_o(o)
new_c = (c * inner_activation(f + self._forget_bias) + inner_activation(i) * activation(j))
# hidden-to-hidden batch normalizaiton
if self.batch_norm:
batch_norm_new_c = self._batch_norm_c(new_c)
new_h = activation(batch_norm_new_c) * inner_activation(o)
else:
new_h = activation(new_c) * inner_activation(o)
if self._state_is_tuple:
new_state = rnn.LSTMStateTuple(new_c, new_h)
else:
new_state = tf.concat(values=[new_c, new_h], axis=1)
# Retrieve RNN Variables
with get_variable_scope(scope='Linear', reuse=True):
self._w = tf.get_variable('w')
self._b = tf.get_variable('b')
return new_h, new_state
class DropoutWrapper(CoreRNNCell):
"""Operator adding dropout to inputs and outputs of the given cell.
Creates a cell with added input and/or output dropout.
Dropout is never used on the state.
Args:
mode: `str`, Specifies if this training, evaluation or prediction. See `Modes`.
cell: an RNNCell, a projection to output_size is added to it.
input_keep_prob: unit Tensor or float between 0 and 1, input keep probability;
if it is float and 1, no input dropout will be added.
output_keep_prob: unit Tensor or float between 0 and 1, output keep
probability; if it is float and 1, no output dropout will be added.
seed: (optional) integer, the randomness seed.
Raises:
TypeError: if cell is not an RNNCell.
ValueError: if keep_prob is not between 0 and 1.
"""
def __init__(self, mode, cell, input_keep_prob=1.0, output_keep_prob=1.0, seed=None,
name='DropoutWrapper'):
super(DropoutWrapper, self).__init__(mode, name)
if not isinstance(cell, CoreRNNCell):
raise TypeError("The parameter cell is not a RNNCell.")
if isinstance(input_keep_prob, float) and not (0.0 <= input_keep_prob <= 1.0):
raise ValueError(
'Parameter input_keep_prob must be between 0 and 1: {}'.format(input_keep_prob))
if isinstance(output_keep_prob, float) and not (0.0 <= output_keep_prob <= 1.0):
raise ValueError(
'Parameter output_keep_prob must be between 0 and 1: {}'.format(output_keep_prob))
self._cell = cell
self._input_keep_prob = input_keep_prob
self._output_keep_prob = output_keep_prob
self._seed = seed
def _declare_dependencies(self):
self._inputs_dropout = None
if not isinstance(self._input_keep_prob, float) or self._input_keep_prob < 1:
self._inputs_dropout = Dropout(self.mode, self._input_keep_prob, seed=self._seed)
self._outputs_dropout = None
if not isinstance(self._input_keep_prob, float) or self._input_keep_prob < 1:
self._outputs_dropout = Dropout(self.mode, self._output_keep_prob, seed=self._seed)
@property
def state_size(self):
return self._cell.state_size
@property
def output_size(self):
return self._cell.output_size
@property
def w(self):
return self._cell.w
@property
def b(self):
return self._cell.b
def _build(self, incoming, state, *args, **kwargs):
"""Run the cell with the declared dropouts."""
self._declare_dependencies()
if self._inputs_dropout:
incoming = self._inputs_dropout(incoming)
output, new_state = self._cell(incoming, state)
if self._outputs_dropout:
output = self._outputs_dropout(output, self._output_keep_prob, seed=self._seed)
return output, new_state
class MultiRNNCell(CoreRNNCell):
"""RNN cell composed sequentially of multiple simple cells.
Create a RNN cell composed sequentially of a number of RNNCells.
Args:
mode: `str`, Specifies if this training, evaluation or prediction. See `Modes`.
cells: list of RNNCells that will be composed in this order.
state_is_tuple: If True, accepted and returned states are n-tuples, where
`n = len(cells)`. If False, the states are all
concatenated along the column axis. This latter behavior will soon be
deprecated.
Raises:
ValueError: if cells is empty (not allowed), or at least one of the cells
returns a state tuple but the flag `state_is_tuple` is `False`.
"""
def __init__(self, mode, cells, state_is_tuple=True, name='MultiRNNCell'):
super(MultiRNNCell, self).__init__(mode, name)
if not cells:
raise ValueError("Must specify at least one cell for MultiRNNCell.")
for cell in cells:
if not isinstance(cell, CoreRNNCell):
raise TypeError("The parameter cells: one cell is not a RNNCell.")
self._cells = cells
self._state_is_tuple = state_is_tuple
if not state_is_tuple:
if any(nest.is_sequence(c.state_size) for c in self._cells):
raise ValueError("Some cells return tuples of states, but the flag "
"state_is_tuple is not set. State sizes are: %s"
% str([c.state_size for c in self._cells]))
@property
def w(self):
return [cell.w for cell in self._cells]
@property
def b(self):
return [cell.b for cell in self._cells]
@property
def state_size(self):
if self._state_is_tuple:
return tuple(cell.state_size for cell in self._cells)
else:
return sum([cell.state_size for cell in self._cells])
@property
def output_size(self):
return self._cells[-1].output_size
def _build(self, incoming, state, *args, **kwargs):
"""Run this multi-layer cell on inputs, starting from state."""
cur_state_pos = 0
cur_inp = incoming
new_states = []
for i, cell in enumerate(self._cells):
with get_variable_scope("cell_{}".format(i)):
if self._state_is_tuple:
if not nest.is_sequence(state):
raise ValueError(
"Expected state to be a tuple of length %d, but received: {}".format(
len(self.state_size), state))
cur_state = state[i]
else:
cur_state = array_ops.slice(state, [0, cur_state_pos], [-1, cell.state_size])
cur_state_pos += cell.state_size
cur_inp, new_state = cell(cur_inp, cur_state)
new_states.append(new_state)
new_states = (tuple(new_states) if self._state_is_tuple else
array_ops.concat(values=new_states, axis=1))
return cur_inp, new_states
def _linear(args, output_size, bias, bias_start=0.0, weights_init=None,
trainable=True, restore=True, scope=None):
"""Linear map: sum_i(args[i] * W[i]), where W[i] is a variable.
Args:
args: a 2D Tensor or a list of 2D, batch x n, Tensors.
output_size: int, second dimension of W[i].
bias: boolean, whether to add a bias term or not.
bias_start: starting value to initialize the bias; 0 by default.
scope: VariableScope for the created subgraph; defaults to "Linear".
Returns:
A 2D Tensor with shape [batch x output_size] equal to
sum_i(args[i] * W[i]), where W[i]s are newly created matrices.
Raises:
ValueError: if some of the arguments has unspecified or wrong shape.
"""
if args is None or (nest.is_sequence(args) and not args):
raise ValueError('`args` must be specified')
if not nest.is_sequence(args):
args = [args]
# Calculate the total size of arguments on dimension 1.
total_arg_size = 0
shapes = [a.get_shape().as_list() for a in args]
for shape in shapes:
if len(shape) != 2:
raise ValueError('Linear is expecting 2D arguments: %s' % str(shapes))
if not shape[1]:
raise ValueError('Linear expects shape[1] of arguments: %s' % str(shapes))
else:
total_arg_size += shape[1]
# Now the computation.
with get_variable_scope(scope or 'Linear'):
_w = variable(name='w', shape=[total_arg_size, output_size], initializer=weights_init,
trainable=trainable, restore=restore)
if len(args) == 1:
res = tf.matmul(a=args[0], b=_w)
else:
res = tf.matmul(a=array_ops.concat(values=args, axis=1), b=_w)
if not bias:
return res
_b = variable(name='b', shape=[output_size],
initializer=tf.constant_initializer(bias_start),
trainable=trainable, restore=restore)
return res + _b
def retrieve_seq_length_op(data):
"""An op to compute the length of a sequence. 0 are masked. """
with tf.name_scope('GetLength'):
used = tf.sign(x=tf.reduce_max(tf.abs(data), axis=2))
length = tf.reduce_sum(input_tensor=used, axis=1)
length = tf.cast(x=length, dtype=tf.int32)
return length
def advanced_indexing_op(input, index):
"""Advanced Indexing for Sequences. """
batch_size = get_shape(input)[0]
max_length = int(input.get_shape()[1])
dim_size = int(input.get_shape()[2])
index = tf.range(0, batch_size) * max_length + (index - 1)
flat = tf.reshape(input, [-1, dim_size])
relevant = tf.gather(flat, index)
return relevant
RNN_LAYERS = OrderedDict([
('GRU', GRU),
('LSTM', LSTM),
('BasicLSTMCell', BasicLSTMCell),
('BasicRNNCell', BasicRNNCell),
('BidirectionalRNN', BidirectionalRNN),
('GRUCell', GRUCell),
('SimpleRNN', SimpleRNN),
])
|
[
"[email protected]"
] | |
8cb6515893f4750b2e3a962fcd4192b44c3107ae
|
913fb9ec1e709a5140676ba7b2371b1976afca72
|
/endoClip/miRandaTest/parseAlignment.py
|
c08d09bcc7dc0a4a54c27a157d1eb854ad369bb4
|
[] |
no_license
|
cgreer/ResearchScripts
|
171cfe9555ea06fdeb91084c12d07d1b45a2335c
|
1107803bb1459d6b6e1dfb1a89679d2b6fd49062
|
refs/heads/master
| 2016-09-05T10:43:19.090247 | 2012-04-12T21:38:11 | 2012-04-12T21:38:11 | 1,673,080 | 0 | 3 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,998 |
py
|
import bioLibCG
import cgNexusFlat
import cgDL
from cgAutoCast import autocast
import string
from cgNexus import Nexus
@autocast
def updateAdjustedMismatches(fN, fF, guValue = .5, otherValue = 1.0):
NX = Nexus(fN, fF)
NX.load(['sigMask', 'adjustedNumMismatches'])
while NX.nextID():
mask = NX.sigMask
numGU = mask.count('G')
numGapAndMM = mask.count('X')
NX.adjustedNumMismatches = (numGU * guValue) + (numGapAndMM * otherValue)
NX.save()
@autocast
def updateAdjustedMismatchesFlat(fN, fOut, guValue = .5, otherValue = 1.0):
f = open(fN, 'r')
fOut = open(fOut, 'w')
for line in f:
ls = line.strip().split('\t')
mask = ls[18]
numGU = mask.count('G')
numGapAndMM = mask.count('X')
adjustedNumMismatches = (numGU * guValue) + (numGapAndMM * otherValue)
ls.append(str(adjustedNumMismatches))
fOut.write('\t'.join(ls) + '\n')
f.close()
fOut.close()
#TAG::lowercase,count from end::
def countLowerEnd(theString, fromEnd = False):
if fromEnd:
theString = theString[::-1]
numLower = 0
for c in theString:
if c in string.lowercase:
numLower += 1
else:
return numLower
#TAG::collapse,repeat,string::
def collapseRuns(theString):
prevChar = None
newString = []
for c in theString:
if c == prevChar:
continue
else:
newString.append(c)
prevChar = c
return ''.join(newString)
#TAG::read multiple file lines,read file,file::
def lineFileParser(inFile, linesPerEntry = 4):
'''retrieve X lines at a time'''
f = open(inFile, 'r')
allLines = []
while True:
newEntry = [f.readline() for i in range(linesPerEntry)]
for l in newEntry:
if l == '':
return allLines
allLines.append(newEntry)
def parseRandaOutput(inFile, smallFN, degFN, oFN):
'''miRanda Output will be grepped to make it every alignment is 10lines'''
allAlignments = lineFileParser(inFile, 10)
smallID_length = getIDLength(smallFN)
degID_length = getIDLength(degFN)
outF = open(oFN, 'w')
for i, alignment in enumerate(allAlignments):
oldScore, qRange, rRange, query, matchInfo, reference, sID, dID = parseAlignmentBasic(alignment)
complexResult = parseAlignmentComplex(query, reference)
numMM, numM, numGU, numQGaps, numRGaps, significantTargetMask, numExtensionsQ, numExtensionsR = complexResult
#check for alignments with N in them
if all([x == 0 for x in complexResult]): continue
sLen = smallID_length[int(sID)]
dLen = degID_length[int(dID)]
pString = [i, sID, dID, qRange[0], qRange[1], rRange[0], rRange[1], sLen, dLen, query, reference, numM, numMM, numGU, numQGaps, numRGaps, numExtensionsQ, numExtensionsR, significantTargetMask]
pString = '\t'.join([str(x) for x in pString])
outF.write(pString + '\n')
outF.close()
#TAG::aligning,parse alignment,miRanda::
def parseAlignmentBasic(alignment):
#parse raw data
info, n1, query, matchInfo, reference, n3, n4, n5, idInfo, n6 = alignment
oldScore = float(info.split()[2])
qRange = (int(info.split()[3][2:]), int(info.split()[5]))
rRange = (int(info.split()[6][2:]), int(info.split()[8]))
query = query.split()[2]
matchInfo = matchInfo.strip()
reference = reference.split()[2]
sID = idInfo.split()[0][1:]
dID = idInfo.split()[1]
#calculate read qRange and rRange
'''qrange is weird, the lower number is correct and the higher one is to high by one (2 to 14) should be (2 to 13)
In addition, the qRange has to have the lower case letters added to it whereas the rRange already includes it'''
qRange = qRange[0] - countLowerEnd(query, fromEnd = True), qRange[1] + countLowerEnd(query) - 1 #should always be (1,N)
output = [oldScore, qRange, rRange, query, matchInfo, reference, sID, dID]
if any([x == '' for x in output]):
print output
raise NameError("missing some parsing info")
return output
def parseAlignmentComplex(query, reference):
'''Get num missmatches and other parsed data from alignment'''
allLetters = ['A', 'T', 'C', 'G']
queryGapPairs = ['-%s' % x for x in allLetters]
referenceGapPairs = ['%s-' % x for x in allLetters]
#TAG::genomic letter combinations,combinations,letters::
guPairs = ['GT', 'TG']
compPairs = ['AT', 'TA', 'CG', 'GC']
misPairs = ['TT', 'TC', 'AG', 'AC', 'AA', 'CC', 'CT', 'CA', 'GG', 'GA']
if len(reference) != len(query):
raise NameError("ALIGNMENTS ARE DIFFERENT SIZE!")
#calculate # gaps// ALLOW TARGET GAPS???
collQ = collapseRuns(query)
collR = collapseRuns(reference)
numQGaps = collQ.count('-')
numRGaps = collR.count('-')
numExtensionsQ = query.count('-') - numQGaps
numExtensionsR = reference.count('-') - numRGaps
#calc match/mismatch (rev to get from small 5-->3)
query = query.upper()[::-1]
reference = reference.upper()[::-1]
matchPairs = ['%s%s' % (x,y) for x,y in zip(query, reference)]
significantTargetMask = [] #Mask is entire small string masked
numM = numMM = numGU = 0
gapShift = 0
for i,pair in enumerate(matchPairs):
if pair in queryGapPairs:
gapShift -= 1
numMM += 1
significantTargetMask.append('X')
elif pair in guPairs:
numGU += 1
significantTargetMask.append('G')
elif pair in compPairs:
numM += 1
significantTargetMask.append('N')
elif pair in misPairs:
numMM += 1
significantTargetMask.append('X')
elif pair in referenceGapPairs:
numMM += 1
significantTargetMask.append('X')
elif 'N' in pair:
return [0,0,0,0,0,0,0,0] #dont take N alignments
else:
print query
print reference
print pair
raise NameError("COMBINATION NOT ACCOUNTED FOR!!!")
significantTargetMask = ''.join(significantTargetMask)
significantTargetMask = significantTargetMask[::-1] # did re-aligning reverse from miRanda...for sanity
return [numMM, numM, numGU, numQGaps, numRGaps, significantTargetMask, numExtensionsQ, numExtensionsR]
def getIDLength(fN):
allSeqs = lineFileParser(fN, 3)
id_length = {}
for small in allSeqs:
id, seq, blank = small
id = int(id.strip().split('>')[-1])
id_length[id] = len(seq.strip())
return id_length
def parseRandaInclusiveCheck(inFile, smallFN):
'''miRanda Output will be grepped to make it every alignment is 10lines'''
id_length = getIDLength(smallFN)
allAlignments = lineFileParser(inFile, 10)
for i, alignment in enumerate(allAlignments):
checkInclusiveSmallLength(alignment, id_length)
def checkInclusiveSmallLength(alignment, id_length):
'''hacked script to check if miRanda always shows the full length small
RNA at the QUERY part of the alignment...needed for alignment calculations'''
#parse raw data
info, n1, query, matchInfo, reference, n3, n4, n5, idInfo, n6 = alignment
oldScore = float(info.split()[2])
qRange = (int(info.split()[3][2:]), int(info.split()[5]))
rRange = (int(info.split()[6][2:]), int(info.split()[8]))
query = query.split()[2]
matchInfo = matchInfo.strip()
reference = reference.split()[2]
sID = idInfo.split()[0][1:]
dID = idInfo.split()[1]
query = list(query)
dashCount = query.count('-')
if len(query) - dashCount != id_length[int(sID)]:
print sID, dID
raise NameError("MISMATCH")
if __name__ == "__main__":
import sys
if sys.argv[1] == "help":
bioLibCG.gd(sys.argv[0])
else:
bioLibCG.submitArgs(globals()[sys.argv[1]], sys.argv[1:])
|
[
"[email protected]"
] | |
c2c2302c19cd27a8c388c3076648b346e94027f1
|
4f98acd943807cfa593c61b4332548fd3cd6fb72
|
/CodeChef/2016 Nov/1ALEXTASK.py
|
18583b9192db4d160449ec59b1d559bba8f3e51b
|
[] |
no_license
|
mrkimkim/Competitive-Programming-Summary
|
9086986782bef615db2002a3be3f3d8317c80345
|
fa75693818c8ec65f623d98b11c1fe0123bc7200
|
refs/heads/master
| 2021-01-20T05:25:10.793615 | 2017-11-12T10:33:14 | 2017-11-12T10:33:14 | 89,775,003 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 397 |
py
|
def lcm(a1, a2):
a, b = a1, a2
while a % b != 0:
a %= b
a, b = b, a
return a1 * a2 / b
T = int(raw_input())
for tt in xrange(T):
N = int(raw_input())
A = [int(x) for x in raw_input().split(' ')]
LCM = []
for i in range(N):
for j in range(i+1, N):
LCM.append(lcm(A[i], A[j]))
LCM.sort()
print LCM[0]
|
[
"="
] |
=
|
f2e706a3ce884256f22aef75a2af9a5311ac1719
|
5004c5592ead17f55acacce36d880bb3b12a1255
|
/graphql/client.py
|
33eb2cdc977f484ead2333602eac2a2dc6967621
|
[] |
no_license
|
xav-b/stacks
|
f856c9efd5b5c3fb5165d31fb9b928cca14038f4
|
2faa0ca759509174920eca88c6553f1b55a14f09
|
refs/heads/master
| 2021-10-08T05:07:44.878158 | 2018-12-08T02:29:46 | 2018-12-08T02:29:55 | 64,149,347 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 294 |
py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
import requests
HEADERS = {'Content-Type': 'application/json'}
QUERY = {'query': '{ hello }'}
res = requests.post('http://localhost:4000/graphql', headers=HEADERS, json=QUERY)
if res.status_code == 200:
print(res.json())
|
[
"[email protected]"
] | |
7dd3116a2797092f761d2f59ec02bc3ba7272e5a
|
9d862dd68f8b4ea4e7de9397fef8592824c77449
|
/app/top/api/rest/WlbWmsSkuCreateRequest.py
|
6598145c2d04de7ebb441ec797e5897dc69e60ee
|
[] |
no_license
|
hi-noikiy/tmall-sku-outer_id
|
ffaca630dfb288ca33d962b8a050932d1047b9c8
|
1bcf29386a513bcb210bf5d91016e0dcb1ebc1ad
|
refs/heads/master
| 2021-05-09T18:20:27.150316 | 2017-03-08T06:43:57 | 2017-03-08T06:43:57 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,200 |
py
|
'''
Created by auto_sdk on 2016.05.25
'''
from app.top.api.base import RestApi
class WlbWmsSkuCreateRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
self.advent_lifecycle = None
self.approval_number = None
self.bar_code = None
self.brand = None
self.brand_name = None
self.category = None
self.category_name = None
self.color = None
self.cost_price = None
self.extend_fields = None
self.gross_weight = None
self.height = None
self.is_area_sale = None
self.is_batch_mgt = None
self.is_danger = None
self.is_hygroscopic = None
self.is_shelflife = None
self.is_sn_mgt = None
self.item_code = None
self.item_id = None
self.item_price = None
self.length = None
self.lifecycle = None
self.lockup_lifecycle = None
self.name = None
self.net_weight = None
self.origin_address = None
self.pcs = None
self.reject_lifecycle = None
self.size = None
self.specification = None
self.store_code = None
self.tag_price = None
self.title = None
self.type = None
self.use_yn = None
self.volume = None
self.width = None
def getapiname(self):
return 'taobao.wlb.wms.sku.create'
|
[
"[email protected]"
] | |
d77bce91c76a6901bd11109520eb8a28a3dd9eae
|
7000895fad6f4c23084122ef27b3292d5e57df9f
|
/tests/core/p2p/test_p2pprotocol.py
|
efc60f785338aa0ad31408c367a16083daa57d7b
|
[
"MIT"
] |
permissive
|
jack3343/xrd-core
|
1302cefe2a231895a53fcef73e558cdbc1196884
|
48a6d890d62485c627060b017eadf85602268caf
|
refs/heads/master
| 2022-12-15T07:36:16.618507 | 2020-08-27T09:21:36 | 2020-08-27T09:21:36 | 290,652,706 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 8,265 |
py
|
# coding=utf-8
# Distributed under the MIT software license, see the accompanying
# file LICENSE or http://www.opensource.org/licenses/mit-license.php.
from collections import namedtuple
from unittest import TestCase
from mock import Mock, patch, MagicMock
from pyxrdlib.pyxrdlib import hstr2bin, bin2hstr
from xrd.core.misc import logger
from xrd.core.p2p import p2pPeerManager
from xrd.core.p2p.p2pfactory import P2PFactory, p2p_msg_priority
from xrd.core.p2p.p2pprotocol import P2PProtocol
from xrd.core.xrdnode import xrdNode
from xrd.generated import xrdlegacy_pb2
from tests.misc.helper import replacement_getTime
logger.initialize_default()
Host = namedtuple('Host', ['host', 'port'])
# FIXME: These tests will soon be removed
class TestP2PProtocol(TestCase):
def __init__(self, *args, **kwargs):
super(TestP2PProtocol, self).__init__(*args, **kwargs)
def setUp(self):
with patch('xrd.core.misc.ntp.getTime', new=replacement_getTime):
self.channel = P2PProtocol()
self.channel._observable = Mock()
self.channel.factory = Mock(autospec=P2PFactory)
self.channel.factory.p2p_msg_priority = p2p_msg_priority
self.channel.factory._xrd_node = Mock(autospec=xrdNode)
self.channel.factory._xrd_node.peer_manager = Mock(autospec=p2pPeerManager)
self.channel.factory._xrd_node.peer_manager.is_banned = MagicMock(return_value=False)
self.channel.factory._xrd_node.peer_manager.trusted_addresses = MagicMock(return_value=[])
sample_peer_1 = Host('127.0.0.2', '9000')
sample_host = Host('127.0.0.1', '9000')
attrs = {'getPeer.return_value': sample_peer_1, 'getHost.return_value': sample_host}
self.channel.transport = Mock(**attrs)
def tearDown(self):
del self.channel
def test_addr_remote_works(self):
"""
This is more to ensure that the transport is mocked correctly.
"""
self.assertEqual('127.0.0.2:9000', self.channel.peer.full_address)
def test_addr_local_works(self):
"""
This is more to ensure that the transport is mocked correctly.
"""
self.assertEqual('127.0.0.1:9000', self.channel.host.full_address)
@patch('xrd.core.misc.ntp.getTime')
def test_connectionMade_behavior(self, getTime):
"""
When connectionMade, the Managers (Observers) must be informed once, and peer list, version request messages
must be in the outgoing message queue.
ntp.getTime() is patched everywhere, not just in p2pprotocol, because OutgoingMessage uses it too.
"""
self.channel.factory.get_connected_peer_addrs.return_value = {'127.0.0.2:9000'}
getTime.return_value = 1525078652.9991353
self.channel.connectionMade()
# Managers were notified
self.channel.p2pchain_manager.new_channel.assert_called_once_with(self.channel)
self.channel.peer_manager.new_channel.assert_called_once_with(self.channel)
self.channel.tx_manager.new_channel.assert_called_once_with(self.channel)
# send_peer_list and send_version_request messages should be in the outgoing queue.
self.assertEqual(self.channel.outgoing_queue.unfinished_tasks, 2)
def test_connectionLost_behavior(self):
"""
When connectionLost, peer_manager (an Observer) is notified. (why not other Observers?)
"""
self.channel.connectionLost()
self.channel.peer_manager.remove_channel.assert_called_once_with(self.channel)
@patch('xrd.core.misc.ntp.getTime')
def test_dataReceived_normal_message(self, getTime):
"""
Ensure that dataReceived works as expected with a normal message.
"""
getTime.return_value = 1525078652.9991353
data = b'\x00\x00\x00\x80\x08\x01"|\n\x0e66.175.217.203\n\x0e138.195.214.85\n\r35.177.72.178\n\x0e173.249.22.240\n\x0c2.238.131.20\n\r77.64.144.198\n\r34.208.138.15\n\x0f144.202.107.148\x00\x00\x00\x00' # noqa
self.channel.dataReceived(data)
# Twisted transport should have received acknowledgement message to send out
acknowledgement_bytes = b'\x00\x00\x00\x08\x08\x13\xaa\x01\x03\x08\x84\x01'
self.channel.transport.write.assert_called_once_with(acknowledgement_bytes)
@patch('xrd.core.misc.ntp.getTime')
@patch('xrd.core.p2p.p2pprotocol.logger', autospec=True)
@patch('xrd.core.p2p.p2pprotocol.config.dev', autospec=True)
def test_dataReceived_too_big(self, config_dev, logger, getTime):
"""
Normally the buffer size upper limit is 10MB. But we're going to patch it smaller here.
"""
config_dev.max_bytes_out = 10
config_dev.trust_min_msgcount = 10
getTime.return_value = 1525078652.9991353
acknowledgement_bytes = b'\x00\x00\x00\x08\x08\x13\xaa\x01\x03\x08\x88\x01'
self.channel._buffer = 10 * acknowledgement_bytes
self.channel.dataReceived(acknowledgement_bytes)
self.channel.transport.loseConnection.assert_called()
@patch('xrd.core.misc.ntp.getTime')
def test_dataReceived_spam_ban_peer(self, getTime):
getTime.return_value = 1525078652.9991353
self.channel.rate_limit = 2
acknowledgement_bytes = b'\x00\x00\x00\x08\x08\x13\xaa\x01\x03\x08\x88\x01'
self.channel._buffer = 10 * acknowledgement_bytes
self.channel.dataReceived(acknowledgement_bytes)
self.channel.peer_manager.ban_channel.assert_called_with(self.channel)
@patch('xrd.core.misc.ntp.getTime')
def test_send_version_request(self, getTime):
getTime.return_value = 1525078652.9991353
version_request = b'\x00\x00\x00\x02\x1a\x00'
self.channel.send_version_request()
self.channel.transport.write.assert_called_with(version_request)
@patch('xrd.core.misc.ntp.getTime')
def test_send_sync(self, getTime):
getTime.return_value = 1525078652.9991353
self.channel.send_sync(synced=True)
synced = b'\x00\x00\x00\r\x08\x10\x92\x01\x08\n\x06Synced'
self.channel.transport.write.assert_called_with(synced)
self.channel.send_sync(synced=False)
unsynced = b'\x00\x00\x00\x05\x08\x10\x92\x01\x00'
self.channel.transport.write.assert_called_with(unsynced)
@patch('xrd.core.misc.ntp.getTime')
def test_send_fetch_block(self, getTime):
getTime.return_value = 1525078652.9991353
block_request = b'\x00\x00\x00\x06\x08\x06B\x02\x08\x01'
self.channel.send_fetch_block(1)
self.channel.transport.write.assert_called_with(block_request)
@patch('xrd.core.misc.ntp.getTime')
def test_get_headerhash_list(self, getTime):
getTime.return_value = 1525078652.9991353
get_headerhash_request = b'\x00\x00\x00\x05\x08\x12\xa2\x01\x00'
self.channel.send_get_headerhash_list(1)
self.channel.transport.write.assert_called_with(get_headerhash_request)
def test_parse_buffer_works(self):
self.channel._buffer = bytes(hstr2bin('000000191a170a0776657273696f6e120c67656e657369735f68617368' +
'000000191a170a0776657273696f6e120c67656e657369735f68617368'))
messages = self.channel._parse_buffer([0])
self.assertEqual(2, len(list(messages)))
@patch('xrd.core.p2p.p2pprotocol.logger', autospec=True)
def test_parse_buffer_invalid_data(self, logger):
self.channel._buffer = bytes(hstr2bin('0000000000000000000000000000000000000000000000000000000000' +
'1111111111111111111111111111111111111111111111111111111111'))
messages = self.channel._parse_buffer([0])
messages_list = list(messages)
self.assertEqual(0, len(messages_list))
logger.warning.assert_called_with("Problem parsing message. Banning+Dropping connection")
def test_wrap_message_works(self):
veData = xrdlegacy_pb2.VEData(version="version", genesis_prev_hash=b'genesis_hash')
msg = xrdlegacy_pb2.LegacyMessage(func_name=xrdlegacy_pb2.LegacyMessage.VE, veData=veData)
self.assertEqual('000000191a170a0776657273696f6e120c67656e657369735f68617368',
bin2hstr(P2PProtocol._wrap_message(msg)))
|
[
"[email protected]"
] | |
2a93997c42a613a172bca0bdc009d9f471283b9a
|
3449e5511dc8da19fc841af767dbe8d216e26ffb
|
/manage.py
|
580284fcc718e6d888b0f90ffd61253c58d00251
|
[] |
no_license
|
erikwestra/mm-server
|
8ba2af0ee7acd372949589b6f8d429099a38ea58
|
bead1ad439541211e33fdc60264a869f18a99ae9
|
refs/heads/master
| 2021-01-10T21:14:23.636707 | 2015-05-27T21:22:54 | 2015-05-27T21:22:54 | 28,573,174 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 251 |
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "mmServer.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
[
"[email protected]"
] | |
80a9d22fbfed25b61183a249814b1f3d269d6cc3
|
70cf356dc87130ce9b70672af0646b70e51d7eb7
|
/sphinxext/autorun.py
|
8373340e2ff6fb3d8ad47b09f758fb0a4bd88d75
|
[] |
no_license
|
yarikoptic/pydagogue
|
d5ecff8302b743b11c97649d47ff2530c666646d
|
e77f65392afc8d5773a2bff91d53d8787c316474
|
refs/heads/master
| 2021-01-15T20:44:11.500452 | 2014-09-23T21:20:21 | 2014-09-23T21:20:21 | 2,068,505 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 8,579 |
py
|
# -*- coding: utf-8 -*-
"""
Based on:
sphinxcontrib.autorun
~~~~~~~~~~~~~~~~~~~~~~
Run the code and insert stdout after the code block.
:copyright: Copyright 2007-2009 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
Here's the LICENSE referred to:
If not otherwise noted, the extensions in this package are licensed
under the following license.
Copyright (c) 2009 by the contributors (see AUTHORS file).
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import os
from subprocess import Popen,PIPE
import re
from docutils import nodes
from sphinx.util.compat import Directive
from docutils.parsers.rst.directives import flag, unchanged
from sphinx.errors import SphinxError
class RunBlockError(SphinxError):
pass
# category = 'runblock error'
class AutoRun(object):
here = os.path.abspath(__file__)
pycon = os.path.join(os.path.dirname(here), 'pycon.py')
config = dict(
pycon = 'python ' + pycon,
pycon_prefix_chars = 4,
pycon_show_source = False,
console = 'bash',
console_prefix_chars = 1,
bash = 'bash',
bash_prefix_chars = 0,
bash_prompt_prefix = '$ ',
bash_output_encoding = 'utf8',
)
@classmethod
def builder_init(cls,app):
cls.config.update(app.builder.config.autorun_languages)
def subst_vars(in_str, vars):
""" Do jinja-like variable substitution """
out_str = in_str
for key, value in vars.items():
out_str = re.sub('{{ ' + key + ' }}', value, out_str)
return out_str
class VarsMixin(object):
default_env_vars_name = 'runblock_vars'
@property
def env_vars_name(self):
return self.options.get('env_vars_name', self.default_env_vars_name)
def _get_env_vars(self):
env = self.state.document.settings.env
return getattr(env, self.env_vars_name, {})
def _set_env_vars(self, env_vars):
env = self.state.document.settings.env
return setattr(env, self.env_vars_name, env_vars)
def add_var(self, name, value):
vars = self._get_env_vars()
vars[name] = value
self._set_env_vars(vars)
class _Params(object):
pass
class LangMixin(VarsMixin):
default_cwd = '/'
default_exe_pre = ''
default_exe_post = ''
def run_prepare(self):
p = _Params()
env = self.state.document.settings.env
config = AutoRun.config
try:
language = self.arguments[0]
except IndexError:
language = 'bash'
if language not in config:
raise RunBlockError('Unknown language %s' % language)
# Get configuration values for the language
args = config[language].split()
p.language = language
p.input_encoding = config.get(language+'_input_encoding','ascii')
p.output_encoding = config.get(language+'_output_encoding','ascii')
p.prefix_chars = config.get(language+'_prefix_chars', 0)
p.show_source = config.get(language+'_show_source', True)
p.prompt_prefix = config.get(language+'_prompt_prefix', '')
# Build the code text
_, p.cwd = env.relfn2path(self.options.get('cwd', self.default_cwd))
proc = Popen(args,
bufsize=1,
stdin=PIPE,
stdout=PIPE,
stderr=PIPE,
cwd=p.cwd)
# Remove prefix
p.codelines = (line[p.prefix_chars:] for line in self.content)
# Make executable code
p.exe_code = u'\n'.join(p.codelines).encode(p.input_encoding)
# Prepost, postpend extra code lines
exe_pre = self.options.get('exe_pre', self.default_exe_pre)
exe_post = self.options.get('exe_post', self.default_exe_post)
exe_code = '\n'.join((exe_pre, p.exe_code, exe_post))
# Do env substitution
exe_code = subst_vars(exe_code, self._get_env_vars())
# Run the code
stdout, stderr = proc.communicate(exe_code)
# Process output
if stdout:
p.out = ''.join(stdout).decode(p.output_encoding)
elif stderr:
p.out = ''.join(stderr).decode(p.output_encoding)
else:
p.out = ''
p.returncode = proc.returncode
return p
class RunBlock(Directive, LangMixin):
has_content = True
required_arguments = 0
optional_arguments = 1
final_argument_whitespace = True
option_spec = {
'linenos': flag,
'hide': flag,
'cwd': unchanged,
'env_vars_name': unchanged,
'exe_pre': unchanged,
'exe_post': unchanged
}
def run(self):
params = self.run_prepare()
# Get the original code with prefixes
if params.show_source:
code = params.prompt_prefix + (
u'\n' + params.prompt_prefix).join(self.content)
else:
code = ''
code_out = u'\n'.join((code, params.out))
# Do env substitution
code_out = subst_vars(code_out, self._get_env_vars())
# Make nodes
if 'hide' in self.options:
return [nodes.comment(code_out, code_out)]
literal = nodes.literal_block(code_out, code_out)
literal['language'] = params.language
literal['linenos'] = 'linenos' in self.options
return [literal]
SPLITTER_RE = re.compile(r'.. \|(.*)\| replace:: (.*)')
def prefixes_match(prefixes, line):
match = SPLITTER_RE.match(line)
if match is None:
return False
return match.groups()[0] in prefixes
def add_links(links, link_fname):
# Write into links file
link_lines = []
if os.path.exists(link_fname):
with open(link_fname, 'rt') as fobj:
link_lines = fobj.readlines()
link_lines = [line for line in link_lines
if not prefixes_match(links, line)]
for name, value in links.items():
link_prefix = '.. |{0}|'.format(name)
link_line = '{0} replace:: ``{1}``\n'.format(link_prefix, value)
link_lines.append(link_line)
with open(link_fname, 'wt') as fobj:
fobj.write(''.join(link_lines))
class LinksMixin(object):
default_links_file = '/object_names.inc'
def add_links(self, links):
env = self.state.document.settings.env
links_file = self.options.get('links_file', self.default_links_file)
_, link_fname = env.relfn2path(links_file)
# Write links
add_links(links, link_fname)
class AddVars(Directive, LangMixin, VarsMixin, LinksMixin):
has_content = True
required_arguments = 1
optional_arguments = 1
final_argument_whitespace = True
input_encoding = 'ascii'
option_spec = {
'runblock_vars': unchanged,
'links_file': unchanged,
}
def run(self):
name = self.arguments.pop(0)
params = self.run_prepare()
value = params.out.strip()
self.add_var(name, value)
self.add_links({name: value})
code = u'\n'.join(self.content)
return [nodes.comment(code, code)]
def setup(app):
app.add_directive('runblock', RunBlock)
app.add_directive('addvars', AddVars)
app.connect('builder-inited', AutoRun.builder_init)
app.add_config_value('autorun_languages', AutoRun.config, 'env')
# vim: set expandtab shiftwidth=4 softtabstop=4 :
|
[
"[email protected]"
] | |
7a2fda54a67c368b387b99cdca294856aff50611
|
40b9c3020bda615a7c097ea962c039fc08a90a2f
|
/bonsai/core/admin.py
|
108699c3a43ee6c0619aa03ee7c034ed2673ed10
|
[] |
no_license
|
mahongquan/bonsailims
|
980e9d606a58a81c7637e7f9e640cb16b9cabb61
|
9f5d58a49d24ba86c8d9472ea7e6449bc8c61538
|
refs/heads/master
| 2020-12-24T14:53:56.886079 | 2013-11-27T02:35:15 | 2013-11-27T02:35:15 | 14,733,537 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 279 |
py
|
from django.contrib import admin
from models import *
admin.site.register(ProjectCode)
admin.site.register(CollectionMethod)
admin.site.register(Material)
admin.site.register(StorageMethod)
admin.site.register(Project)
admin.site.register(Subject)
admin.site.register(Sample)
|
[
"[email protected]"
] | |
f944801874341c8211053e8eeb0fde3287cd620e
|
9ce4292954000fd66bcdbd0797a280c306308d08
|
/quizzes/00.organize.me/Cracking the Coding Interview/17-14-2.py
|
47dc8b3fc7400cb532246997349c31584ce6d361
|
[
"MIT"
] |
permissive
|
JiniousChoi/encyclopedia-in-code
|
0c786f2405bfc1d33291715d9574cae625ae45be
|
77bc551a03a2a3e3808e50016ece14adb5cfbd96
|
refs/heads/master
| 2021-06-27T07:50:10.789732 | 2020-05-29T12:50:46 | 2020-05-29T12:50:46 | 137,426,553 | 2 | 0 |
MIT
| 2020-10-13T08:56:12 | 2018-06-15T01:29:31 |
Python
|
UTF-8
|
Python
| false | false | 3,161 |
py
|
'''
17.14 - 구둣점이 없어진 문장을 최대한 복원하라.
'''
WORD_LIST = []
with open('english_dictionary.txt', 'r', encoding="ISO-8859-1") as fp:
WORD_LIST = fp.read().splitlines()
WORD_LIST = [word.lower() for word in WORD_LIST]
WORD_LIST = list(set(WORD_LIST))
_WORD_LIST = []
for word in WORD_LIST:
for ch in word:
if not ch in 'abcdefghijklmnopqrstuvwxyz':
continue
_WORD_LIST.append(word)
WORD_LIST = _WORD_LIST
WORD_LIST.sort(key = lambda x: (len(x), x))
print('done sorting word_list')
print('words length ranges from {} to {}'.format(len(WORD_LIST[0]), len(WORD_LIST[-1])))
print('Creating rainbow_table')
rainbow_table = {}
for word in WORD_LIST:
if word not in rainbow_table:
rainbow_table[word] = True
print('Done!')
assert 'dog' in rainbow_table
def recursive(broken_sentence):
if not broken_sentence:
return [[]]
result = []
candidates = get_candidates(broken_sentence)
for candidate in candidates:
word, rest = candidate
for each in recursive(rest):
tmp = [word]
tmp.extend(each)
result.append(tmp)
return result
def get_candidates(sentence):
yield (sentence[:1].upper(), sentence[1:])
for i in range(1, len(sentence)+1):
word = sentence[:i]
rest = sentence[i:] #성능상은 if절 안으로 보내는게 남.
if word in rainbow_table:
yield (word, rest)
def concat_capitals_together(words):
on_capital = False
range_list = []
for i, word in enumerate(words):
if word.isupper() and not on_capital:
on_capital = True
start_idx = i
elif word.isupper() and on_capital:
if i==(len(words)-1):
range_list.append((start_idx, len(words)))
elif not word.isupper() and on_capital:
on_capital=False
end_idx = i
range_list.append((start_idx, end_idx))
elif not word.isupper() and not on_capital:
pass
else:
assert False
#range_list is prepared
for i,j in range_list[::-1]:
words[i:j] = [''.join(words[i:j])]
return words
broken_sentence = input('input a broken sentence: ')
#broken_sentence = 'ilovejinsungheleftthismorning'
#broken_sentence = 'jesslookedjustliketimherbrother'
#broken_sentence = 'dog'
result = recursive(broken_sentence)
sentences = []
for each_list in result:
#assert isinstance(each, list)
each_list = concat_capitals_together(each_list)
sentence = ' '.join(each_list)
sentences.append(sentence)
print('numbers of sentences : {}'.format(len(sentences)))
def lesser_capitals(sentence):
count = 0
for c in sentence:
if c.isupper():
count +=1
return count, sentence.count(' ')
sentence_in_order = sorted(sentences, key = lesser_capitals)
#print(sentence_in_order)
print('restored sentence :',sentence_in_order[:1])
for stc in sentence_in_order:
if 'brother' in stc:
print('found')
print(stc)
import sys
sys.exit(0)
print('not found')
|
[
"[email protected]"
] | |
6d18e5b463a36cc4e08cb5c8a534bab7b40eed17
|
d36c4c882089b9b81e6e3b6323eeb9c43f5160a9
|
/7KYU/Basic Calculator/solution.py
|
b3781c325d408bf5bcf4057d678e10d871a6578b
|
[] |
no_license
|
stuartstein777/CodeWars
|
a6fdc2fa6c4fcf209986e939698d8075345dd16f
|
d8b449a16c04a9b883c4b5e272cc90a4e6d8a2e6
|
refs/heads/master
| 2023-08-27T20:32:49.018950 | 2023-08-24T23:23:29 | 2023-08-24T23:23:29 | 233,281,814 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 282 |
py
|
def calculate(num1, operation, num2):
if operation == '+':
return num1 + num2
elif operation == '-':
return num1 - num2
elif operation == '*':
return num1 * num2
elif operation == '/' and num2 != 0:
return num1 / num2
return None
|
[
"[email protected]"
] | |
f8d124ea1738661ae00c133c22ce07c03b60fac3
|
1d60c5a7b8ce6277bff514e376f79848f706344c
|
/Machine Learning Scientist with Python/13. Feature Engineering for NLP in Python/01. Basic features and readability scores/04. Hashtags and mentions in Russian tweets.py
|
993f21900df53d45c8ed5930dbf0aecac0dd49e1
|
[] |
no_license
|
DidiMilikina/DataCamp
|
338c6e6d3b4f5b6c541c1aba155a36e9ee24949d
|
3bf2cf3c1430190a7f8e54efda7d50a5fd66f244
|
refs/heads/master
| 2020-12-15T13:16:54.178967 | 2020-05-06T17:30:54 | 2020-05-06T17:30:54 | 235,113,616 | 4 | 3 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,693 |
py
|
'''
Hashtags and mentions in Russian tweets
Let's revisit the tweets dataframe containing the Russian tweets. In this exercise, you will compute the number of hashtags and mentions in each tweet by defining two functions count_hashtags() and count_mentions() respectively and applying them to the content feature of tweets.
In case you don't recall, the tweets are contained in the content feature of tweets.
Instructions 1/2
50 XP
1
In the list comprehension, use startswith() to check if a particular word starts with '#'.
2
In the list comprehension, use startswith() to check if a particular word starts with '@'.
'''
SOLUTION
1
# Function that returns numner of hashtags in a string
def count_hashtags(string):
# Split the string into words
words = string.split()
# Create a list of words that are hashtags
hashtags = [word for word in words if word.startswith('#')]
# Return number of hashtags
return(len(hashtags))
# Create a feature hashtag_count and display distribution
tweets['hashtag_count'] = tweets['content'].apply(count_hashtags)
tweets['hashtag_count'].hist()
plt.title('Hashtag count distribution')
plt.show()
2
# Function that returns number of mentions in a string
def count_mentions(string):
# Split the string into words
words = string.split()
# Create a list of words that are mentions
mentions = [word for word in words if word.startswith('@')]
# Return number of mentions
return(len(mentions))
# Create a feature mention_count and display distribution
tweets['mention_count'] = tweets['content'].apply(count_mentions)
tweets['mention_count'].hist()
plt.title('Mention count distribution')
plt.show()
|
[
"[email protected]"
] | |
83ff8724593b0b4cb2fbcade365b2021df9e4454
|
4dd695521343d56ff943e8c1768343d7680714e3
|
/experiments/experiments_SBrT_2018/scripts_bob/ivector_T200_fold4.py
|
ed7c89dd5e015998081776051f44612c8bdf818c
|
[] |
no_license
|
natharb/environment
|
ea659ee541f6473e92b5b30c549e52b66f47b280
|
86e6cee6e01d2370abeb7c55a2c8a15001735919
|
refs/heads/master
| 2021-09-28T02:39:02.222966 | 2018-11-13T12:03:34 | 2018-11-13T12:03:34 | 139,762,646 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,195 |
py
|
import sys
sys.path.insert(0, '.')
import bob.bio.spear
import bob.bio.gmm
import numpy
import scipy.spatial
temp_directory = './closedset_ynogutti/ivector/256/T200/fold_4/temp/'
result_directory = './closedset_ynogutti/ivector/256/T200/fold_4/results/'
sub_directory = 'subdirectory'
database = 'database_ivector_T200_fold4.py'
groups = ['dev']
#groups = ['dev', 'eval']
preprocessor = bob.bio.spear.preprocessor.Energy_2Gauss(max_iterations = 10, convergence_threshold = 0.0005, variance_threshold = 0.0005, win_length_ms = 20., win_shift_ms = 10., smoothing_window = 10)
extractor = bob.bio.spear.extractor.Cepstral(win_length_ms = 25, win_shift_ms = 10, n_filters = 24 , dct_norm = False, f_min = 0, f_max = 4000, delta_win = 2, mel_scale = True,
with_energy = True, with_delta = True, with_delta_delta = True, n_ceps = 19, pre_emphasis_coef = 0.97)
algorithm = bob.bio.gmm.algorithm.IVector(subspace_dimension_of_t = 200, tv_training_iterations = 10, update_sigma = True, use_whitening = True, use_lda = False,
use_wccn = False, use_plda = False, lda_dim = 50, plda_dim_F = 50, plda_dim_G = 50, plda_training_iterations = 50, number_of_gaussians = 256)
parallel = 10
verbose = 2
|
[
"[email protected]"
] | |
9b723091bf86c7c061fc6eceeef607fde9cef2a1
|
b10e3b9e797af8fd100746a520541bf5bf11d707
|
/SciGraph/wsgi.py
|
c844e5759fe084b73fbb677c879efe83af98d268
|
[] |
no_license
|
done520/SciGraph
|
20adf85f2359e9e9b18b16ff3868388fdc4ba295
|
e2bcff74bb46a00ce5b9b528857f5b6e8a4d491f
|
refs/heads/master
| 2023-08-25T08:50:47.659532 | 2021-10-15T08:17:21 | 2021-10-15T08:17:21 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 393 |
py
|
"""
WSGI config for SciGraph project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'SciGraph.settings')
application = get_wsgi_application()
|
[
"[email protected]"
] | |
ec4b251935be7880dbdd7fd3803ed95f553826b8
|
f07c7e3966de00005230ebe31ab0579b92b66872
|
/tests/test_conversions.py
|
fabd5966ca8f99127821f8602853abe727b673c4
|
[
"Apache-2.0"
] |
permissive
|
Algomorph/LevelSetFusion-Python
|
30d990228e3d63a40668ade58e7879ae6e581719
|
46625cd185da4413f9afaf201096203ee72d3803
|
refs/heads/master
| 2021-06-25T11:30:44.672555 | 2020-11-11T14:47:33 | 2020-11-11T14:47:33 | 152,263,399 | 12 | 2 |
Apache-2.0
| 2019-05-30T23:12:33 | 2018-10-09T14:15:03 |
Python
|
UTF-8
|
Python
| false | false | 2,949 |
py
|
# ================================================================
# Created by Gregory Kramida on 2/6/19.
# Copyright (c) 2019 Gregory Kramida
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ================================================================
# stdlib
from unittest import TestCase
# libraries
import numpy as np
# local
# C++ extension
import level_set_fusion_optimization as cpp_extension
class CoonversionTest(TestCase):
def test_tensor_f3_basic(self):
t = np.array([[[1, 2, 3, 4],
[5, 6, 7, 8]],
[[9, 10, 11, 12],
[13, 14, 15, 16]]], dtype=np.float32)
t2 = cpp_extension.return_input_f3(t)
self.assertTrue(np.allclose(t, t2))
t3 = np.array([[[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]],
[[13, 14, 15], [16, 17, 18], [19, 20, 21], [22, 23, 24]],
[[25, 26, 27], [28, 29, 30], [31, 32, 33], [34, 35, 36]]]
, dtype=np.float32)
t4 = cpp_extension.return_input_f3(t3)
self.assertTrue(np.allclose(t3, t4))
def test_tensor_f4_basic(self):
t = np.arange(1, 49, dtype=np.float32).reshape((2, 4, 2, 3))
t2 = cpp_extension.return_input_f4(t)
self.assertTrue(np.allclose(t, t2))
def test_tensor_f3rm_basic(self):
t = np.arange(1, 25, dtype=np.float32).reshape((2, 4, 3))
t2 = cpp_extension.return_tensor_f3rm()
self.assertTrue(np.allclose(t, t2))
def test_tensor_f4rm_basic(self):
t = np.arange(1, 49, dtype=np.float32).reshape((2, 4, 2, 3))
t2 = cpp_extension.return_tensor_f4rm()
self.assertTrue(np.allclose(t, t2))
def test_tensor_f3_scale(self):
t = np.arange(1, 25, dtype=np.float32).reshape((2, 4, 3))
factor = 2.5
t2 = cpp_extension.scale(t, factor)
self.assertTrue(np.allclose(t * factor, t2))
def test_tensor_f3_add_constant(self):
t = np.arange(1, 25, dtype=np.float32).reshape((2, 4, 3))
constant = 95.2
t2 = cpp_extension.add_constant(t, constant)
self.assertTrue(np.allclose(t + constant, t2))
def test_tensor_f3_add_2_tensors(self):
t1 = np.arange(1, 25, dtype=np.float32).reshape((2, 4, 3))
t2 = np.random.rand(2, 4, 3).astype(np.float32) * 15.0
t3 = cpp_extension.add_tensors(t1, t2)
self.assertTrue(np.allclose(t1 + t2, t3))
|
[
"[email protected]"
] | |
0b18e9c90c6d35661c619353909e746c7833e730
|
8272944ef520d9f013e7e5083ac201a148f11728
|
/src/examples/regression/sklearn_lasso_diabetes.py
|
0097200b76aa2196441d3a285851bb2bb66637fd
|
[] |
no_license
|
alecordev/data-science
|
4709a46ee31e21286913548317bdbffba1b51fd3
|
9b152fa8c03cca33abcc65cc572d15815917bd05
|
refs/heads/master
| 2023-04-09T17:56:29.336037 | 2023-03-25T13:15:42 | 2023-03-25T13:15:42 | 157,546,453 | 1 | 0 | null | 2022-07-23T20:21:51 | 2018-11-14T12:35:55 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 1,807 |
py
|
from sklearn.datasets import load_diabetes
from sklearn.model_selection import train_test_split, GridSearchCV, KFold
from sklearn.pipeline import Pipeline
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import Lasso
from sklearn.metrics import make_scorer, r2_score, mean_squared_error
from joblib import dump, load
import numpy as np
# Load the diabetes dataset
diabetes = load_diabetes()
# Split the dataset into training and testing sets
X_train, X_test, y_train, y_test = train_test_split(
diabetes.data, diabetes.target, test_size=0.2, random_state=42
)
# Define the pipeline
pipeline = Pipeline(
[("imputer", SimpleImputer()), ("scaler", StandardScaler()), ("lasso", Lasso())]
)
# Define the hyperparameter grid
param_grid = {"lasso__alpha": np.logspace(-4, 4, 20)}
# Define the evaluation metrics
scoring = {"r2": make_scorer(r2_score), "mse": make_scorer(mean_squared_error)}
# Define the cross-validation strategy
cv = KFold(n_splits=5, shuffle=True, random_state=42)
# Define the grid search
grid_search = GridSearchCV(
pipeline, param_grid=param_grid, cv=cv, scoring=scoring, refit="r2"
)
# Fit the grid search to the training data
grid_search.fit(X_train, y_train)
# Print the best hyperparameters and metrics
print("Best hyperparameters:", grid_search.best_params_)
print("Best r2 score:", grid_search.best_score_)
print("Best MSE:", grid_search.cv_results_["mean_test_mse"][grid_search.best_index_])
# Save the best model
dump(grid_search.best_estimator_, "diabetes_model.joblib")
# Load the best model and use it to predict a new observation
loaded_model = load("diabetes_model.joblib")
new_observation = X_test[0, :].reshape(1, -1)
print("Predicted value:", loaded_model.predict(new_observation)[0])
|
[
"[email protected]"
] | |
7844825964125eb543f1049cde3198ffff59c10f
|
fca9772888449ecb8ff54ead7bbe4dede2ff1a3c
|
/CO1.14.n+nn+nnn.py
|
6919a27b996614b38382757e9c8dd3b0d790ed2d
|
[] |
no_license
|
Python-lab-cycle/ramsinayoosuf
|
ab862963c1a130b762c9cec2404111636a1407d9
|
cadfaa4b9e6aaa68c9b2324ff75da01e17345156
|
refs/heads/main
| 2023-06-17T14:16:21.908960 | 2021-07-02T09:40:36 | 2021-07-02T09:40:36 | 325,949,659 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 84 |
py
|
i=int(input("Enter a number:"))
num=(i+((i*10)+i)+((i*100)+(i*10)+i))
print(num)
|
[
"[email protected]"
] | |
6320ccb78a34f4a6666733de6526e3d4ebf016d2
|
acb8e84e3b9c987fcab341f799f41d5a5ec4d587
|
/langs/0/bn4.py
|
caf18b2dd66155809531533074d1f52923163ed2
|
[] |
no_license
|
G4te-Keep3r/HowdyHackers
|
46bfad63eafe5ac515da363e1c75fa6f4b9bca32
|
fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2
|
refs/heads/master
| 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 486 |
py
|
import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'bn4':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1])
|
[
"[email protected]"
] | |
0dc3338dc5d0d26d62a6edd028266ac3cc0eba8f
|
2100df7054545b15927d66d4ef2f0490e78d0761
|
/tests/test_spidermanager/test_spiders/spider0.py
|
65c6d49a97e01d1e65f0414ede96ddd5f58c7276
|
[
"MIT"
] |
permissive
|
elkingtowa/pyrake
|
79dd6053de604ad52df530e544e873c107333489
|
3f2fbe805e9b153d287fb50b5cb3f5b35495ac52
|
refs/heads/master
| 2020-12-24T14:01:53.009374 | 2014-08-29T04:44:56 | 2014-08-29T04:44:56 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 110 |
py
|
from pyrake.spider import Spider
class Spider0(Spider):
allowed_domains = ["pyrake1.org", "pyrake3.org"]
|
[
"[email protected]"
] | |
eab369dbdac56b2df51579a6ff167856be574579
|
97caa124ffa5da9819c39a16c734165176d90349
|
/archive/2016/week6/tasks/figures.py
|
bdb3f1677fcede9381c778c3f8ccd4270e3351ce
|
[
"Apache-2.0"
] |
permissive
|
YAtOff/python0
|
dd684731065321fd52d475fd2b2105db59f5c19c
|
b5af5004131d64dd52d42746eddb72b6c43a13c7
|
refs/heads/master
| 2021-01-18T21:19:11.990434 | 2019-05-29T20:14:23 | 2019-05-29T20:14:23 | 44,601,010 | 6 | 7 |
Apache-2.0
| 2019-10-31T22:45:21 | 2015-10-20T11:13:11 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 420 |
py
|
"""
Напишета програма, която с помощта на Turtle рисува следните фитури:
- триъгълник
- квадрат
- петоъгълник
- шестоъгълник
Използвайте цикли.
Може ли да напишета една функция, която да може да рисува всяка едно от фигурите.
"""
|
[
"[email protected]"
] | |
a67ec9e602d48416932753f037925e48bd6d91cb
|
2916dd05f6c67958d4ad71392f8c093ed6710016
|
/app1/migrations/0015_auto_20180326_1039.py
|
0f52518c963545d78a706a8d0fa95fc72539fa1b
|
[] |
no_license
|
firchatn/WorkOrderMangerDjango
|
a05d6bbfdcc6111da813aca56676ea12b8a4c1d0
|
f546e8db24e8ab9a0465e09bb17bd9190570a018
|
refs/heads/master
| 2020-04-04T10:16:52.840589 | 2018-11-02T10:12:17 | 2018-11-02T10:12:17 | 155,849,103 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,068 |
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2018-03-26 08:39
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('app1', '0014_auto_20180326_0008'),
]
operations = [
migrations.AlterField(
model_name='woconsommable',
name='codWo',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app1.WorkOrdre'),
),
migrations.AlterField(
model_name='woeqpinspecter',
name='codWo',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app1.WorkOrdre'),
),
migrations.AlterField(
model_name='woequipement',
name='codWo',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app1.WorkOrdre'),
),
migrations.AlterField(
model_name='wologistique',
name='codWo',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app1.WorkOrdre'),
),
migrations.AlterField(
model_name='womethodeinspection',
name='codWo',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app1.WorkOrdre'),
),
migrations.AlterField(
model_name='wopersonnel',
name='codWo',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app1.WorkOrdre'),
),
migrations.AlterField(
model_name='woservice',
name='codWo',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app1.WorkOrdre'),
),
migrations.AlterField(
model_name='wovehicule',
name='codWo',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app1.WorkOrdre'),
),
]
|
[
"[email protected]"
] | |
01f0fbe0765bca56a059bfe8dcb23fba59cfa74f
|
b739654b057190041d3f82d035874fe10e4825d4
|
/qtpynodeeditor/base.py
|
ec2455a0de51d78cf186d5399a2e9c8f5dde762c
|
[
"BSD-3-Clause"
] |
permissive
|
klauer/qtpynodeeditor
|
708cec70ae51cdbf52262e1cdf0d0bd33bf5e137
|
523e76e15ef26edc73fdad6fdd65df9babbde73b
|
refs/heads/master
| 2023-08-16T06:35:08.051000 | 2023-08-04T16:50:21 | 2023-08-04T16:50:21 | 175,901,436 | 141 | 50 |
NOASSERTION
| 2023-08-15T06:23:03 | 2019-03-15T22:55:45 |
Python
|
UTF-8
|
Python
| false | false | 342 |
py
|
class Serializable:
'Interface for a serializable class'
def save(self) -> dict:
"""
Save
Returns
-------
value : dict
"""
...
def restore(self, state: dict):
"""
Restore
Parameters
----------
state : dict
"""
...
|
[
"[email protected]"
] | |
dc78542a96e486cffbe3bd259698a41a9b92db77
|
f9acdde88dbb70a2844e058f6c53c016fc8407c1
|
/lfc/utils/images.py
|
ea55297329c22e839417157a4753b8d6ebccdc1f
|
[] |
no_license
|
yzl11/django-lfc
|
536daccae82351af66f3894c38c8f2702691af75
|
75c900d672b4d36705fb8fa4833c446bbb78efea
|
refs/heads/master
| 2021-01-15T13:14:37.192773 | 2015-05-03T15:03:12 | 2015-05-03T15:03:12 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,916 |
py
|
# PIL imports
import PIL.ImageFile
import PIL
def scale_to_min_size(image, min_width, min_height):
"""Returns an image, that isn't smaller than min_width and min_height.
That means one side is exactly given value and the other is greater.
This may only makes sense if the image is cut after it is scaled.
"""
# resize proportinal
width, height = image.size
prop_x = float(min_width) / width
prop_y = float(min_height) / height
# TODO: Translate to english
# Die groessere Proportion (oder Faktor oder Quotient) zwischen Soll-Groesse
# und Ist-Groesse kommt fuer beide Kanten (da proportional) zur Anwendung.
# Das bedeutet die uebrige Kante ist auf jeden Fall groesser als gewuenscht
# (da Multiplikation mit Faktor).
if prop_x > prop_y:
height = int(prop_x * height)
image = image.resize((min_width, height), PIL.Image.ANTIALIAS)
else:
width = int(prop_y * width)
image = image.resize((width, min_height), PIL.Image.ANTIALIAS)
return image
def scale_to_max_size(image, max_width, max_height):
"""Returns an image, that isn't bigger than max_width and max_height.
That means one side is exactly given value and the other is smaller. In
other words the image fits at any rate in the given box max_width x
max_height.
"""
# resize proportinal
width, height = image.size
# TODO: Translate to english
# Erechne Proportionen zwischen Soll-Weite und Ist-Weite und zwischen
# Soll-Hoehe und Ist-Hoehe
prop_width = float(max_width) / width
prop_height = float(max_height) / height
# TODO: Translate to english
# Die kleinere Proportion (oder Faktor oder Quotient) der beiden kommt fuer
# beide Kanten (da Proportional) zur Anwendung. Das bedeutet die uebrige
# Kante ist auf jeden Fall kleiner als gewuenscht (da Multiplikation mit
# Faktor).
if prop_height < prop_width:
width = int(prop_height * width)
image = image.resize((width, max_height), PIL.Image.ANTIALIAS)
else:
height = int(prop_width * height)
image = image.resize((max_width, height), PIL.Image.ANTIALIAS)
return image
def scale_to_width(image, target_width):
"""Returns an image that has the exactly given width and scales height
proportional.
"""
width, height = image.size
prop_width = float(target_width) / width
new_height = int(prop_width * height)
image = image.resize((target_width, new_height), PIL.Image.ANTIALIAS)
return image
def scale_to_height(image, target_height):
"""Returns an image that has the exactly given height and scales width
proportional.
"""
width, height = image.size
prop_height = float(target_height) / height
new_height = int(prop_height * width)
image = image.resize((new_height, target_height), PIL.Image.ANTIALIAS)
return image
|
[
"[email protected]"
] | |
a5b4d3210895376b4fef4c8e422ea7effa1ebb54
|
255e19ddc1bcde0d3d4fe70e01cec9bb724979c9
|
/all-gists/5224673/snippet.py
|
cea9424811bd53898a7138f8a2e829391080caa2
|
[
"MIT"
] |
permissive
|
gistable/gistable
|
26c1e909928ec463026811f69b61619b62f14721
|
665d39a2bd82543d5196555f0801ef8fd4a3ee48
|
refs/heads/master
| 2023-02-17T21:33:55.558398 | 2023-02-11T18:20:10 | 2023-02-11T18:20:10 | 119,861,038 | 76 | 19 | null | 2020-07-26T03:14:55 | 2018-02-01T16:19:24 |
Python
|
UTF-8
|
Python
| false | false | 153 |
py
|
def java_string_hashcode(s):
h = 0
for c in s:
h = (31 * h + ord(c)) & 0xFFFFFFFF
return ((h + 0x80000000) & 0xFFFFFFFF) - 0x80000000
|
[
"[email protected]"
] | |
f8696bb50b6a66fd6c4d390c0935db0f6299ab42
|
914626bf92d528766bf4b9402f5f120caccbe5cf
|
/탐욕법_단속카메라.py
|
7292bbc0d89ca0e7879785a8ec8bdc31684e24ab
|
[] |
no_license
|
LINDBURG/Programmers
|
64ee0a193f407c9802f7fbda64c27c6adb4a26d3
|
adf94767ebe2d4d63aa17bf52ece0c74873aec5c
|
refs/heads/master
| 2020-12-27T06:47:59.062955 | 2020-11-28T07:29:08 | 2020-11-28T07:29:08 | 237,799,965 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 440 |
py
|
def solution(routes):
answer = 0
routes.sort()
while routes:
answer += 1
now = routes.pop(0)
end = now[1]
for i in range(len(routes)):
if routes[i][1] < end:
end = routes[i][1]
elif routes[i][0] > end:
break
for route in routes[:]:
if route[0] > end:
break
routes.pop(0)
return answer
|
[
"[email protected]"
] | |
7c318280fb4418094a2b04b53172893d5d9bba08
|
037d31bfad9c9da19f6fa8046fa575b59bdfd97b
|
/app/language/models.py
|
88494ac6f043a96a8491fa81c4c46c5965f46765
|
[] |
no_license
|
luoyun/homepress
|
d8cfa58ea8de81bc559da9da3e2eb44d537e5df6
|
8c97579bd7b523dbbcc4a11f378bc4b56407de7c
|
refs/heads/master
| 2020-12-30T10:23:09.672711 | 2013-08-05T10:24:02 | 2013-08-05T10:24:02 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 534 |
py
|
import datetime
from yweb.orm import ORMBase
from sqlalchemy import Column, Integer, String, \
Sequence, DateTime, Table, ForeignKey, Boolean, Text
from sqlalchemy.orm import relationship, backref
class Language(ORMBase):
''' Language system '''
__tablename__ = 'language'
id = Column(Integer, Sequence('language_id_seq'), primary_key=True)
name = Column(String(32))
name_en = Column(String(32))
codename = Column(String(6))
def __str__(self):
return '<%s>' % self.codename
|
[
"[email protected]"
] | |
a485108da57b34bdbdf518806f23a799754dfbc5
|
b6db9a5bdbe84ad9b53407635a8a054a6af8e779
|
/dataportal/wsgi.py
|
c1bb5fc00883504bac7ff98b591a9e1dd404c49f
|
[] |
no_license
|
gagon/dataportal_kpo_django
|
6cca0b03d1d82e20fb5fa4db6430ae616ec91b96
|
6f862a026c0b9fa02d31452e29525021159de58d
|
refs/heads/master
| 2021-01-10T11:12:58.426186 | 2018-04-04T12:41:17 | 2018-04-04T12:41:17 | 48,659,768 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 397 |
py
|
"""
WSGI config for dataportal project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "dataportal.settings")
application = get_wsgi_application()
|
[
"[email protected]"
] | |
32d6e24f2fe834017f93833a66735566ed3b82a1
|
a46b480eefa54d0aa8a1df7158cdf61def351724
|
/tests/bcm/cli/test_cli_util.py
|
54e34f55fb487e301b00143be0be6e220865aa51
|
[
"MIT"
] |
permissive
|
achillesrasquinha/biomodels-cellcollective-migrator
|
3ed3a575c32343f0b94115af7a67db40ea40953f
|
7c9b50986a6fa8cdfc7d6ec2b434a7b2be999a5b
|
refs/heads/develop
| 2021-06-14T16:34:52.468209 | 2019-06-28T20:59:06 | 2019-06-28T20:59:06 | 194,329,396 | 0 | 1 |
MIT
| 2021-06-02T00:05:12 | 2019-06-28T20:16:57 |
Python
|
UTF-8
|
Python
| false | false | 1,914 |
py
|
# imports - module imports
from bcm import cli
# imports - test imports
from testutils import assert_input, assert_stdout
def test_confirm(capfd):
query = "foobar"
stdout = "{} [Y/n/q]: ".format(query)
def _assert_confirm(stdout):
assert_input(capfd, query, "Y", expected = True, input_ = cli.confirm, stdout = stdout)
assert_input(capfd, query, "y", expected = True, input_ = cli.confirm, stdout = stdout)
assert_input(capfd, query,"\n", expected = True, input_ = cli.confirm, stdout = stdout)
assert_input(capfd, query, "n", expected = False, input_ = cli.confirm, stdout = stdout)
assert_input(capfd, query, "1", expected = False, input_ = cli.confirm, stdout = stdout)
_assert_confirm(stdout)
stdout = "{} [Y/n]: ".format(query)
# assert_input(capfd, query, "Y", expected = True, input_ = cli.confirm, stdout = stdout, input_args = { 'quit_': False })
# assert_input(capfd, query, "y", expected = True, input_ = cli.confirm, stdout = stdout, input_args = { 'quit_': False })
# assert_input(capfd, query,"\n", expected = True, input_ = cli.confirm, stdout = stdout, input_args = { 'quit_': False })
# assert_input(capfd, query, "n", expected = False, input_ = cli.confirm, stdout = stdout, input_args = { 'quit_': False })
# assert_input(capfd, query, "1", expected = False, input_ = cli.confirm, stdout = stdout, input_args = { 'quit_': False })
def test_format():
string = "foobar"
def _assert_format(string, type_):
assert cli.format(string, type_) == "{}{}{}".format(type_, string, cli.CLEAR)
_assert_format(string, cli.GREEN)
_assert_format(string, cli.RED)
_assert_format(string, cli.BOLD)
def test_echo(capfd):
query = "foobar"
cli.echo(query, nl = False)
assert_stdout(capfd, query)
cli.echo(query, nl = True)
assert_stdout(capfd, "{}\n".format(query))
|
[
"[email protected]"
] | |
c0798e517a6501161acbc0b853fb0d04fba4d25a
|
d3dcbda2c798f24b43bd35ecc8ea40b2e494e646
|
/games/views.py
|
f0159630d481b4778ad81fd4acbf7828e1e22b19
|
[] |
no_license
|
milu-buet/milu_test3
|
48e560d778a61c44fadfacf2adcad3374797cdf6
|
09790d829fddd09cdf9fd9525c8b6829b58d67e5
|
refs/heads/master
| 2021-01-18T22:10:58.079326 | 2015-06-24T09:15:55 | 2015-06-24T09:15:55 | 19,772,796 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 323 |
py
|
from django.shortcuts import render
from django.http import HttpResponse
import simplejson as json
# Create your views here.
def home(request):
return HttpResponse("game home works")
def game1(request):
return render(request,"games/game1.html")
def game2(request):
return render(request,"games/game2.html")
|
[
"[email protected]"
] | |
9e51ee20084954c66c9648d096b95fc2a2f2f697
|
0f2112a0e198cb0275c002826854c836bbfb5bdf
|
/utils/gui/benchmark/images_list_model.py
|
e63ce3f72c2efd4ef74cc8157899d49a2c31fe7a
|
[
"MIT"
] |
permissive
|
jeremiedecock/pywi-cta
|
a7f98ae59beb1adecb25623153c13e5bc70e5560
|
1185f7dfa48d60116472c12ffc423be78a250fc9
|
refs/heads/master
| 2021-04-15T12:06:03.723786 | 2019-03-21T02:33:15 | 2019-03-21T02:33:15 | 126,397,380 | 0 | 1 |
MIT
| 2018-10-16T12:17:52 | 2018-03-22T21:31:45 |
Python
|
UTF-8
|
Python
| false | false | 4,280 |
py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Jérémie DECOCK (http://www.jdhp.org)
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from gi.repository import Gtk as gtk
import json
import math
import os
from pywicta.io import images
class ImagesListModel(object):
def __init__(self, input_directory_path):
self.input_directory_path = input_directory_path
# Parse the input directory
self.fits_file_name_list = get_fits_files_list(input_directory_path)
# Parse FITS files
self.fits_metadata_list = parse_fits_files(self.input_directory_path, self.fits_file_name_list)
# Creating the gtk.ListStore model
self.liststore = gtk.ListStore(int, # Event ID
int, # Tel ID
float, # MC energy
float, # NPE
str) # File name
for image_metadata_dict in self.fits_metadata_list:
event_id = image_metadata_dict["event_id"]
tel_id = image_metadata_dict["tel_id"]
mc_energy = image_metadata_dict["mc_energy"]
npe = image_metadata_dict["npe"]
file_name = image_metadata_dict["file_name"]
self.liststore.append([event_id, tel_id, mc_energy, npe, file_name])
def get_fits_files_list(directory_path):
# Parse the input directory
print("Parsing", directory_path)
fits_file_name_list = [file_name
for file_name
in os.listdir(directory_path)
if os.path.isfile(os.path.join(directory_path, file_name))
and file_name.endswith((".fits", ".fit"))]
return fits_file_name_list
def parse_fits_files(dir_name, fits_file_name_list):
fits_metadata_list = []
# Parse the input files
mc_energy_unit = None
for file_index, file_name in enumerate(fits_file_name_list):
metadata_dict = {}
# Read the input file #########
fits_images_dict, fits_metadata_dict = images.load_benchmark_images(os.path.join(dir_name, file_name))
# Fill the dict ###############
if mc_energy_unit is None:
mc_energy_unit = fits_metadata_dict["mc_energy_unit"] # TODO
else:
if mc_energy_unit != fits_metadata_dict["mc_energy_unit"]:
raise Exception("Inconsistent data")
metadata_dict["event_id"] = fits_metadata_dict["event_id"]
metadata_dict["tel_id"] = fits_metadata_dict["tel_id"]
metadata_dict["mc_energy"] = fits_metadata_dict["mc_energy"]
metadata_dict["npe"] = fits_metadata_dict["npe"]
metadata_dict["file_name"] = file_name
fits_metadata_list.append(metadata_dict)
# Progress bar ################
num_files = len(fits_file_name_list)
relative_steps = math.ceil(num_files / 100.)
if (file_index % relative_steps) == 0:
progress_str = "{:.2f}% ({}/{})".format((file_index + 1)/num_files * 100,
file_index + 1,
num_files)
print(progress_str)
return fits_metadata_list
|
[
"[email protected]"
] | |
3497dc1e312299fd39e88a6dcb577ccca78fd827
|
167c6226bc77c5daaedab007dfdad4377f588ef4
|
/python/ql/test/library-tests/dependencies/b.py
|
24586e4ddc6ffd4d1ffaa60fda5277c1ebebd0fe
|
[
"MIT",
"LicenseRef-scancode-python-cwi",
"LicenseRef-scancode-other-copyleft",
"GPL-1.0-or-later",
"LicenseRef-scancode-free-unknown",
"Python-2.0"
] |
permissive
|
github/codeql
|
1eebb449a34f774db9e881b52cb8f7a1b1a53612
|
d109637e2d7ab3b819812eb960c05cb31d9d2168
|
refs/heads/main
| 2023-08-20T11:32:39.162059 | 2023-08-18T14:33:32 | 2023-08-18T14:33:32 | 143,040,428 | 5,987 | 1,363 |
MIT
| 2023-09-14T19:36:50 | 2018-07-31T16:35:51 |
CodeQL
|
UTF-8
|
Python
| false | false | 33 |
py
|
import c
class B(c.C):
pass
|
[
"[email protected]"
] | |
8e2b508ffb965952aba2ad74a9c3b8bcedda1017
|
db7aac75e31d35c4a18c966170b46f269d015d0b
|
/webgl_canvas_gadget/apps/projects/migrations/0010_auto_20160621_0147.py
|
16a5fabbca8c7c236c5a7224f993ed4aebefe01d
|
[] |
no_license
|
jjpastprojects/Django
|
12fbf3cf27a9230db98a21cc1013216aeadaae1e
|
c55562be7226f29b4ec213f8f018b6c2dd50c420
|
refs/heads/master
| 2022-12-12T22:15:49.493289 | 2017-09-05T12:51:20 | 2017-09-05T12:51:20 | 101,995,798 | 0 | 0 | null | 2022-12-07T23:21:46 | 2017-08-31T11:55:00 |
JavaScript
|
UTF-8
|
Python
| false | false | 1,191 |
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-06-20 19:47
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('projects', '0009_auto_20160619_2250'),
]
operations = [
migrations.AddField(
model_name='lensflare',
name='band_1',
field=models.ImageField(default='', upload_to='projects/lens_flare'),
preserve_default=False,
),
migrations.AddField(
model_name='lensflare',
name='band_2',
field=models.ImageField(default='', upload_to='projects/lens_flare'),
preserve_default=False,
),
migrations.AddField(
model_name='lensflare',
name='hexigon_shape',
field=models.ImageField(default='', upload_to='projects/lens_flare'),
preserve_default=False,
),
migrations.AddField(
model_name='lensflare',
name='main_flare',
field=models.ImageField(default='', upload_to='projects/lens_flare'),
preserve_default=False,
),
]
|
[
"[email protected]"
] | |
c7406959efccab9a16ed49ab84285896a04da6ec
|
159aed4755e47623d0aa7b652e178296be5c9604
|
/data/scripts/templates/object/draft_schematic/clothing/shared_clothing_ith_pants_casual_16.py
|
8d9d8eacf01fe496da90925628e0d43e97005813
|
[
"MIT"
] |
permissive
|
anhstudios/swganh
|
fb67d42776864b1371e95f769f6864d0784061a3
|
41c519f6cdef5a1c68b369e760781652ece7fec9
|
refs/heads/develop
| 2020-12-24T16:15:31.813207 | 2016-03-08T03:54:32 | 2016-03-08T03:54:32 | 1,380,891 | 33 | 44 | null | 2016-03-08T03:54:32 | 2011-02-18T02:32:45 |
Python
|
UTF-8
|
Python
| false | false | 466 |
py
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Intangible()
result.template = "object/draft_schematic/clothing/shared_clothing_ith_pants_casual_16.iff"
result.attribute_template_id = -1
result.stfName("string_id_table","")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
[
"[email protected]"
] | |
fd7e4b0e092aae7c5122f922cc9f62076b885df2
|
1e43fd5e134157e6f034327ffbf3e6501c67275d
|
/mlps/core/apeflow/interface/model/SKLModel.py
|
2e656276a0b2fe1d5080e2af279877a487b2445e
|
[
"Apache-2.0"
] |
permissive
|
sone777/automl-mlps
|
f15780e23142e0f3f368815678959c7954966e71
|
a568b272333bc22dc979ac3affc9762ac324efd8
|
refs/heads/main
| 2023-08-24T10:07:30.834883 | 2021-11-03T07:41:15 | 2021-11-03T07:41:15 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 402 |
py
|
# -*- coding: utf-8 -*-
# Author : Manki Baek
# e-mail : [email protected]
# Powered by Seculayer © 2021 Service Model Team, R&D Center.
from mlps.core.apeflow.interface.model.ModelAbstract import ModelAbstract
class SKLModel(ModelAbstract):
def __init__(self, param_dict, ext_data=None):
super(SKLModel, self).__init__(param_dict, ext_data)
self.model = self._build()
|
[
"[email protected]"
] | |
5b4f27a3f438a63076f482c06b59452a9bcf8501
|
7f698acfc0655fb0978c46f7c79a1f66fd0f4af0
|
/users/migrations/0003_auto_20210716_1850.py
|
a3614b7bd00bd907051b24bf0629f48b409918d7
|
[] |
no_license
|
bunnycast/bird_bnb
|
a53c67c1fd117bf459dd36062a63d0b10aceda7b
|
2bd43f0be4f9873028c278c735633cee990ca372
|
refs/heads/master
| 2023-06-19T18:27:49.151543 | 2021-07-16T10:05:50 | 2021-07-16T10:05:50 | 386,560,933 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 541 |
py
|
# Generated by Django 3.1.6 on 2021-07-16 09:50
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0002_user_bio'),
]
operations = [
migrations.AddField(
model_name='user',
name='avatar',
field=models.ImageField(null=True, upload_to=''),
),
migrations.AddField(
model_name='user',
name='gender',
field=models.CharField(max_length=10, null=True),
),
]
|
[
"[email protected]"
] | |
2e85756c40f453e7ad77be898d16d5598a94feab
|
ed915e9ac23875688b35734b3ffd42b23f00f626
|
/tools/make_response.py
|
5f3e89567324172be6ac15a900205f3180e28b8a
|
[
"MIT"
] |
permissive
|
rcbyron/hey-athena-client
|
4df2b25cf4aa05b65fa359836609afa2f5c50224
|
703e2184610a1718923bf60bc2ef6ec18e126148
|
refs/heads/demo-branch
| 2023-01-02T00:30:03.948348 | 2018-01-15T03:51:10 | 2018-01-15T03:51:10 | 40,776,319 | 391 | 136 |
MIT
| 2020-10-15T05:32:33 | 2015-08-15T18:08:20 |
Python
|
UTF-8
|
Python
| false | false | 423 |
py
|
'''
Created on Feb 11, 2016
@author: Connor
'''
from athena import tts
print('~ Enter \'q\' at any time to quit')
while True:
fname = input('\n~ Unique Filename: ')
if len(fname) is 0 or 'q' in fname[0].lower():
break
phrase = input('~ Phrase: ')
if len(phrase) is 0 or 'q' in phrase[0].lower():
break
tts.speak(phrase, cache=True, filename=fname)
|
[
"[email protected]"
] | |
92d0ca24d92136a27f7cb54a84c65c5b885630e9
|
6b77241ff82ca0ac1293e971276c87a1294dd878
|
/tea/msg/__init__.py
|
891e39ed5fcb2c379bf2986daa07cc97f45af264
|
[] |
no_license
|
SenadI/tea
|
356639d2d24b6f56fad69adeba90bbadacacd10b
|
abed19ecd5274ac05b825d8b83c3f448db197e9a
|
refs/heads/master
| 2020-12-30T10:23:51.880549 | 2017-06-12T16:40:18 | 2017-06-12T16:40:18 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 233 |
py
|
__author__ = 'Viktor Kerkez <[email protected]>'
__date__ = '27 November 2009'
__copyright__ = 'Copyright (c) 2009 Viktor Kerkez'
__all__ = ['send_mail', 'send_mass_mail']
from tea.msg.mail import send_mail, send_mass_mail
|
[
"[email protected]"
] | |
64407c5d245ba72556e881a516f27a69cd701491
|
bc441bb06b8948288f110af63feda4e798f30225
|
/topboard_sdk/model/pipeline/stage_status_pb2.py
|
90be5a02ac0c56de1b11fec38714cee55201a3fa
|
[
"Apache-2.0"
] |
permissive
|
easyopsapis/easyops-api-python
|
23204f8846a332c30f5f3ff627bf220940137b6b
|
adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0
|
refs/heads/master
| 2020-06-26T23:38:27.308803 | 2020-06-16T07:25:41 | 2020-06-16T07:25:41 | 199,773,131 | 5 | 0 | null | null | null | null |
UTF-8
|
Python
| false | true | 10,312 |
py
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: stage_status.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from topboard_sdk.model.pipeline import condition_pb2 as topboard__sdk_dot_model_dot_pipeline_dot_condition__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='stage_status.proto',
package='pipeline',
syntax='proto3',
serialized_options=_b('ZBgo.easyops.local/contracts/protorepo-models/easyops/model/pipeline'),
serialized_pb=_b('\n\x12stage_status.proto\x12\x08pipeline\x1a+topboard_sdk/model/pipeline/condition.proto\"\xec\x02\n\x0bStageStatus\x12\n\n\x02id\x18\x01 \x01(\t\x12\x12\n\nstage_name\x18\x02 \x01(\t\x12\x0e\n\x06number\x18\x03 \x01(\x05\x12\x10\n\x08parallel\x18\x04 \x01(\x08\x12\r\n\x05state\x18\x05 \x01(\t\x12\'\n\nconditions\x18\x06 \x03(\x0b\x32\x13.pipeline.Condition\x12*\n\x05steps\x18\x07 \x03(\x0b\x32\x1b.pipeline.StageStatus.Steps\x12\x0f\n\x07\x63reated\x18\x08 \x01(\x05\x12\x0f\n\x07updated\x18\t \x01(\x05\x1a\x94\x01\n\x05Steps\x12\n\n\x02id\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\x0e\n\x06number\x18\x03 \x01(\x05\x12\r\n\x05state\x18\x04 \x01(\t\x12\x11\n\texit_code\x18\x05 \x01(\x05\x12\x0e\n\x06log_id\x18\x06 \x01(\t\x12\x0f\n\x07started\x18\x07 \x01(\x05\x12\x10\n\x08\x66inished\x18\x08 \x01(\x05\x12\x0c\n\x04type\x18\t \x01(\tBDZBgo.easyops.local/contracts/protorepo-models/easyops/model/pipelineb\x06proto3')
,
dependencies=[topboard__sdk_dot_model_dot_pipeline_dot_condition__pb2.DESCRIPTOR,])
_STAGESTATUS_STEPS = _descriptor.Descriptor(
name='Steps',
full_name='pipeline.StageStatus.Steps',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='pipeline.StageStatus.Steps.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='name', full_name='pipeline.StageStatus.Steps.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='number', full_name='pipeline.StageStatus.Steps.number', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='state', full_name='pipeline.StageStatus.Steps.state', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='exit_code', full_name='pipeline.StageStatus.Steps.exit_code', index=4,
number=5, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='log_id', full_name='pipeline.StageStatus.Steps.log_id', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='started', full_name='pipeline.StageStatus.Steps.started', index=6,
number=7, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='finished', full_name='pipeline.StageStatus.Steps.finished', index=7,
number=8, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='type', full_name='pipeline.StageStatus.Steps.type', index=8,
number=9, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=294,
serialized_end=442,
)
_STAGESTATUS = _descriptor.Descriptor(
name='StageStatus',
full_name='pipeline.StageStatus',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='pipeline.StageStatus.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='stage_name', full_name='pipeline.StageStatus.stage_name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='number', full_name='pipeline.StageStatus.number', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='parallel', full_name='pipeline.StageStatus.parallel', index=3,
number=4, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='state', full_name='pipeline.StageStatus.state', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='conditions', full_name='pipeline.StageStatus.conditions', index=5,
number=6, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='steps', full_name='pipeline.StageStatus.steps', index=6,
number=7, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='created', full_name='pipeline.StageStatus.created', index=7,
number=8, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='updated', full_name='pipeline.StageStatus.updated', index=8,
number=9, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_STAGESTATUS_STEPS, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=78,
serialized_end=442,
)
_STAGESTATUS_STEPS.containing_type = _STAGESTATUS
_STAGESTATUS.fields_by_name['conditions'].message_type = topboard__sdk_dot_model_dot_pipeline_dot_condition__pb2._CONDITION
_STAGESTATUS.fields_by_name['steps'].message_type = _STAGESTATUS_STEPS
DESCRIPTOR.message_types_by_name['StageStatus'] = _STAGESTATUS
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
StageStatus = _reflection.GeneratedProtocolMessageType('StageStatus', (_message.Message,), {
'Steps' : _reflection.GeneratedProtocolMessageType('Steps', (_message.Message,), {
'DESCRIPTOR' : _STAGESTATUS_STEPS,
'__module__' : 'stage_status_pb2'
# @@protoc_insertion_point(class_scope:pipeline.StageStatus.Steps)
})
,
'DESCRIPTOR' : _STAGESTATUS,
'__module__' : 'stage_status_pb2'
# @@protoc_insertion_point(class_scope:pipeline.StageStatus)
})
_sym_db.RegisterMessage(StageStatus)
_sym_db.RegisterMessage(StageStatus.Steps)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
|
[
"[email protected]"
] | |
6e201b9d508d0e01435acef4ce54354372191e65
|
add331d7951c0aef4e6afef8a33664893e32310a
|
/src/idp_test/saml2base.py
|
05872c3d118c2dcb446fb82ae646e18b8726559d
|
[
"BSD-2-Clause"
] |
permissive
|
dv10den/saml2test
|
bd2b24c6048fc5b8f67e5fc423bec7b758b74a91
|
d5c40b1efbb703ff44e6ed54b5a11b77c6b9bb01
|
refs/heads/master
| 2021-01-21T07:45:41.891472 | 2014-05-22T07:54:46 | 2014-05-22T07:54:46 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 18,952 |
py
|
from saml2 import samlp
from saml2 import BINDING_HTTP_ARTIFACT
from saml2 import BINDING_HTTP_POST
from saml2 import BINDING_HTTP_REDIRECT
from saml2 import BINDING_PAOS
from saml2 import BINDING_SOAP
from saml2 import BINDING_URI
from saml2.saml import NAMEID_FORMAT_PERSISTENT
from saml2.saml import NAMEID_FORMAT_UNSPECIFIED
from saml2.saml import NAMEID_FORMAT_TRANSIENT
from saml2.saml import NAMEID_FORMAT_EMAILADDRESS
from idp_test.check import CheckLogoutSupport
from idp_test.check import CheckSaml2IntAttributes
from idp_test.check import CheckSaml2IntMetaData
from idp_test.check import VerifyAttributeNameFormat
from idp_test.check import VerifyFunctionality
from idp_test.check import VerifyContent
from idp_test.check import VerifyNameIDMapping
from idp_test.check import VerifyNameIDPolicyUsage
from idp_test.check import VerifySuccessStatus
from idp_test.check import VerifySignatureAlgorithm
from idp_test.check import VerifySignedPart
from idp_test.check import VerifyEndpoint
from saml2.samlp import NameIDPolicy
__author__ = 'rolandh'
class Request(object):
_args = {}
_class = None
tests = {"post": [VerifyContent], "pre": []}
def __init__(self, conv):
self.args = self._args.copy()
self.conv = conv
def setup(self):
pass
def pre_processing(self, message, args):
return message
def post_processing(self, message):
return message
#class Saml2IntRequest(Request):
# tests = {"pre": [],
# "post": [CheckSaml2IntAttributes, VerifyContent
# # CheckSubjectNameIDFormat,
# ]}
class AuthnRequest(Request):
_class = samlp.AuthnRequest
request = "authn_request"
_args = {"response_binding": BINDING_HTTP_POST,
"request_binding": BINDING_HTTP_REDIRECT,
"nameid_format": NAMEID_FORMAT_PERSISTENT,
"allow_create": True}
tests = {"pre": [VerifyFunctionality],
"post": [CheckSaml2IntAttributes,
VerifyAttributeNameFormat,
VerifySignedPart,
VerifySignatureAlgorithm]}
class AuthnRequestNID_Transient(AuthnRequest):
def __init__(self, conv):
AuthnRequest.__init__(self, conv)
self.args["nameid_format"] = NAMEID_FORMAT_TRANSIENT
def setup(self):
cnf = self.conv.client.config
endps = cnf.getattr("endpoints", "sp")
url = ""
for url, binding in endps["assertion_consumer_service"]:
if binding == BINDING_HTTP_POST:
self.args["assertion_consumer_service_url"] = url
break
self.tests["post"].append((VerifyEndpoint, {"endpoint": url}))
class AuthnRequestNID_Email(AuthnRequest):
def __init__(self, conv):
AuthnRequest.__init__(self, conv)
self.args["nameid_format"] = NAMEID_FORMAT_EMAILADDRESS
def setup(self):
cnf = self.conv.client.config
endps = cnf.getattr("endpoints", "sp")
url = ""
for url, binding in endps["assertion_consumer_service"]:
if binding == BINDING_HTTP_POST:
self.args["assertion_consumer_service_url"] = url
break
self.tests["post"].append((VerifyEndpoint, {"endpoint": url}))
class AuthnRequestNID_Unspecified(AuthnRequest):
def __init__(self, conv):
AuthnRequest.__init__(self, conv)
self.args["nameid_format"] = NAMEID_FORMAT_UNSPECIFIED
def setup(self):
cnf = self.conv.client.config
endps = cnf.getattr("endpoints", "sp")
url = ""
for url, binding in endps["assertion_consumer_service"]:
if binding == BINDING_HTTP_POST:
self.args["assertion_consumer_service_url"] = url
break
self.tests["post"].append((VerifyEndpoint, {"endpoint": url}))
class AuthnRequestNID_no(AuthnRequest):
def __init__(self, conv):
AuthnRequest.__init__(self, conv)
self.args["nameid_format"] = ""
def setup(self):
cnf = self.conv.client.config
endps = cnf.getattr("endpoints", "sp")
url = ""
for url, binding in endps["assertion_consumer_service"]:
if binding == BINDING_HTTP_POST:
self.args["assertion_consumer_service_url"] = url
break
self.tests["post"].append((VerifyEndpoint, {"endpoint": url}))
class AuthnRequestEndpointIndex(AuthnRequest):
def __init__(self, conv):
AuthnRequest.__init__(self, conv)
self.args["attribute_consuming_service_index"] = 3
def setup(self):
cnf = self.conv.client.config
endps = cnf.getattr("endpoints", "sp")
acs3 = endps["assertion_consumer_service"][3]
self.tests["post"].append((VerifyEndpoint, {"endpoint": acs3[0]}))
class AuthnRequestEndpointIndexNIDTransient(AuthnRequest):
def __init__(self, conv):
AuthnRequest.__init__(self, conv)
self.args["attribute_consuming_service_index"] = 3
self.args["nameid_format"] = NAMEID_FORMAT_TRANSIENT
def setup(self):
cnf = self.conv.client.config
endps = cnf.getattr("endpoints", "sp")
acs3 = endps["assertion_consumer_service"][3]
self.tests["post"].append((VerifyEndpoint, {"endpoint": acs3[0]}))
class AuthnRequestSpecEndpoint(AuthnRequest):
def setup(self):
cnf = self.conv.client.config
endps = cnf.getattr("endpoints", "sp")
acs3 = endps["assertion_consumer_service"][3]
self.args["assertion_consumer_service_url"] = acs3[0]
self.tests["post"].append((VerifyEndpoint, {"endpoint": acs3[0]}))
class DynAuthnRequest(Request):
_class = samlp.AuthnRequest
request = "authn_request"
_args = {"response_binding": BINDING_HTTP_POST}
tests = {}
name_id_formats = [NAMEID_FORMAT_TRANSIENT, NAMEID_FORMAT_PERSISTENT]
bindings = [BINDING_HTTP_REDIRECT, BINDING_HTTP_POST]
def setup(self):
metadata = self.conv.client.metadata
entity = metadata[self.conv.entity_id]
self.args.update({"nameid_format": "", "request_binding": ""})
for idp in entity["idpsso_descriptor"]:
for nformat in self.name_id_formats:
if self.args["nameid_format"]:
break
for nif in idp["name_id_format"]:
if nif["text"] == nformat:
self.args["nameid_format"] = nformat
break
for bind in self.bindings:
if self.args["request_binding"]:
break
for sso in idp["single_sign_on_service"]:
if sso["binding"] == bind:
self.args["request_binding"] = bind
break
class AuthnRequestPost(AuthnRequest):
def __init__(self, conv):
AuthnRequest.__init__(self, conv)
self.args["request_binding"] = BINDING_HTTP_POST
class AuthnRequest_using_Artifact(AuthnRequest):
def __init__(self, conv):
AuthnRequest.__init__(self, conv)
self.args["response_binding"] = BINDING_HTTP_ARTIFACT
self.args["binding"] = BINDING_HTTP_ARTIFACT
class AuthnRequest_using_ArtifactNID_Transient(AuthnRequest):
def __init__(self, conv):
AuthnRequest.__init__(self, conv)
self.args["nameid_format"] = NAMEID_FORMAT_TRANSIENT
self.args["response_binding"] = BINDING_HTTP_ARTIFACT
self.args["binding"] = BINDING_HTTP_ARTIFACT
class AuthnRequestPostNID_Transient(AuthnRequestPost):
def __init__(self, conv):
AuthnRequest.__init__(self, conv)
self.args["nameid_format"] = NAMEID_FORMAT_TRANSIENT
class LogOutRequest(Request):
request = "logout_request"
_args = {"request_binding": BINDING_SOAP}
tests = {"pre": [VerifyFunctionality], "post": []}
def __init__(self, conv):
Request.__init__(self, conv)
self.tests["pre"].append(CheckLogoutSupport)
#self.tests["post"].append(VerifyLogout)
def setup(self):
resp = self.conv.saml_response[-1].response
assertion = resp.assertion[0]
subj = assertion.subject
self.args["name_id"] = subj.name_id
self.args["issuer_entity_id"] = assertion.issuer.text
class AssertionIDRequest(Request):
request = "assertion_id_request"
_args = {"request_binding": BINDING_URI,
"response_binding": None}
tests = {"pre": [VerifyFunctionality]}
def setup(self):
assertion = self.conv.saml_response[-1].assertion
self.args["assertion_id_refs"] = [assertion.id]
class AuthnQuery(Request):
request = "authn_query"
_args = {"request_binding": BINDING_SOAP}
tests = {"pre": [VerifyFunctionality], "post": []}
def __init__(self, conv):
Request.__init__(self, conv)
self.tests["post"].append(VerifySuccessStatus)
def setup(self):
assertion = self.conv.saml_response[-1].assertion
self.args["subject"] = assertion.subject
class NameIDMappingRequest(Request):
request = "name_id_mapping_request"
_args = {"request_binding": BINDING_SOAP,
"name_id_policy": NameIDPolicy(format=NAMEID_FORMAT_PERSISTENT,
sp_name_qualifier="GroupOn",
allow_create="true")}
def __init__(self, conv):
Request.__init__(self, conv)
self.tests["post"].append(VerifyNameIDMapping)
def setup(self):
assertion = self.conv.saml_response[-1].assertion
self.args["name_id"] = assertion.subject.name_id
class AuthnRequest_NameIDPolicy1(AuthnRequest):
def __init__(self, conv):
AuthnRequest.__init__(self, conv)
self.args["name_id_policy"] = NameIDPolicy(
format=NAMEID_FORMAT_PERSISTENT, sp_name_qualifier="Group1",
allow_create="true")
self.tests["post"].append(VerifyNameIDPolicyUsage)
class AuthnRequest_NameIDPolicy1Transient(AuthnRequest):
def __init__(self, conv):
AuthnRequest.__init__(self, conv)
self.args["name_id_policy"] = NameIDPolicy(
format=NAMEID_FORMAT_TRANSIENT, sp_name_qualifier="Group1",
allow_create="true")
self.args["nameid_format"] = NAMEID_FORMAT_TRANSIENT
self.tests["post"].append(VerifyNameIDPolicyUsage)
class AuthnRequest_TransientNameID(AuthnRequest):
def __init__(self, conv):
AuthnRequest.__init__(self, conv)
self.args["name_id_policy"] = NameIDPolicy(
format=NAMEID_FORMAT_TRANSIENT, sp_name_qualifier="Group",
allow_create="true")
self.tests["post"].append(VerifyNameIDPolicyUsage)
class ECP_AuthnRequest(AuthnRequest):
def __init__(self, conv):
AuthnRequest.__init__(self, conv)
self.args["request_binding"] = BINDING_SOAP
self.args["service_url_binding"] = BINDING_PAOS
def setup(self):
_client = self.conv.client
_client.user = "babs"
_client.passwd = "howes"
# def post_processing(self, message):
# # Unpacking SOAP message
# return parse_soap_enveloped_saml_response(message)
class ManageNameIDRequest(Request):
request = "manage_name_id_request"
_args = {"request_binding": BINDING_SOAP,
"new_id": samlp.NewID("New identifier")}
def __init__(self, conv):
Request.__init__(self, conv)
self.tests["post"].append(VerifySuccessStatus)
def setup(self):
assertion = self.conv.saml_response[-1].assertion
self.args["name_id"] = assertion.subject.name_id
class AttributeQuery(Request):
request = "attribute_query"
_args = {"request_binding": BINDING_SOAP}
tests = {"pre": [VerifyFunctionality],
"post": [CheckSaml2IntAttributes, VerifyAttributeNameFormat]}
def setup(self):
assertion = self.conv.saml_response[-1].assertion
self.args["name_id"] = assertion.subject.name_id
# -----------------------------------------------------------------------------
OPERATIONS = {
'verify': {
'tc_id': "S2c-16",
"name": 'Verify connectivity',
"descr": 'Uses AuthnRequest to check connectivity',
"sequence": [DynAuthnRequest],
"tests": {"pre": [CheckSaml2IntMetaData],
"post": []}
},
'authn': {
"tc_id": "S2c-02",
"name": 'Absolute basic SAML2 AuthnRequest',
"descr": 'AuthnRequest using HTTP-redirect',
"sequence": [AuthnRequest],
"tests": {"pre": [CheckSaml2IntMetaData],
"post": []},
"depend":["verify"]
},
'authn-nid_transient': {
"tc_id": "S2c-10",
"name": 'Basic SAML2 AuthnRequest, transient name ID',
"descr": 'AuthnRequest using HTTP-redirect',
"sequence": [AuthnRequestNID_Transient],
"tests": {"pre": [CheckSaml2IntMetaData],
"post": []},
"depend":["authn"]
},
'authn-nid_email': {
"tc_id": "S2c-20",
"name": 'Basic SAML2 AuthnRequest, email name ID',
"descr": 'AuthnRequest using HTTP-redirect',
"sequence": [AuthnRequestNID_Email],
"tests": {"pre": [CheckSaml2IntMetaData],
"post": []},
"depend":["authn"]
},
'authn-nid_no': {
"tc_id": "S2c-21",
"name": 'Basic SAML2 AuthnRequest, no name ID format specified',
"descr": 'AuthnRequest using HTTP-redirect',
"sequence": [AuthnRequestNID_no],
"tests": {"pre": [CheckSaml2IntMetaData],
"post": []},
"depend":["authn"]
},
'authn-nid_unspecified': {
"tc_id": "S2c-21",
"name": 'Basic SAML2 AuthnRequest, unspecified name ID format',
"descr": 'AuthnRequest using HTTP-redirect',
"sequence": [AuthnRequestNID_Unspecified],
"tests": {"pre": [CheckSaml2IntMetaData],
"post": []},
"depend":["authn"]
},
'authn-post': {
"tc_id": "S2c-08",
"name": 'Basic SAML2 AuthnRequest using HTTP POST',
"descr": 'AuthnRequest using HTTP-POST',
"sequence": [AuthnRequestPost],
"tests": {"pre": [CheckSaml2IntMetaData],
"post": []},
"depend":["authn"]
},
'authn-post-transient': {
"tc_id": "S2c-09",
"name": 'AuthnRequest using HTTP POST expecting transient NameID',
"descr": 'AuthnRequest using HTTP-POST',
"sequence": [AuthnRequestPostNID_Transient],
"tests": {"pre": [CheckSaml2IntMetaData],
"post": []},
"depend":["authn-post"]
},
'attribute-query':{
"tc_id": "S2c-01",
"name": "",
"sequence":[AuthnRequest, AttributeQuery],
"depend":["authn"]
},
'attribute-query-transient':{
"tc_id": "S2c-20",
"name": "",
"sequence":[AuthnRequestNID_Transient, AttributeQuery],
"depend":["authn"]
},
'authn_endpoint_index': {
"tc_id": "S2c-03",
"name": '',
"descr": '',
"sequence": [AuthnRequestEndpointIndex],
"depend":["authn"]
},
'authn_endpoint_index-transient': {
"tc_id": "S2c-03",
"name": '',
"descr": '',
"sequence": [AuthnRequestEndpointIndexNIDTransient],
"depend":["authn"]
},
'authn_specified_endpoint': {
"tc_id": "S2c-04",
"name": '',
"descr": '',
"sequence": [AuthnRequestSpecEndpoint],
"depend":["authn"]
},
'authn-artifact':{
'tc_id': "S2c-05",
"name": "SAML2 AuthnRequest using an artifact",
"descr": ('AuthnRequest using HTTP-redirect and artifact'),
"sequence": [AuthnRequest_using_Artifact]
},
'authn-artifact_nid-transient':{
'tc_id': "S2c-05",
"name": "SAML2 AuthnRequest expecting artifact response",
"descr": ('AuthnRequest using HTTP-redirect and artifact'),
"sequence": [AuthnRequest_using_ArtifactNID_Transient]
},
'authn-assertion_id_request': {
"tc_id": "S2c-06",
"name": 'AuthnRequest and then an AssertionIDRequest',
"descr": 'AuthnRequest followed by an AssertionIDRequest',
"sequence": [AuthnRequest, AssertionIDRequest],
"tests": {"pre": [CheckSaml2IntMetaData], "post": []},
"depend":["authn"]
},
'authn-nid_transient-assertion_id_request': {
"tc_id": "S2c-26",
"name": 'AuthnRequest and then an AssertionIDRequest',
"descr": 'AuthnRequest followed by an AssertionIDRequest',
"sequence": [AuthnRequestNID_Transient, AssertionIDRequest],
"tests": {"pre": [CheckSaml2IntMetaData], "post": []},
"depend":["authn"]
},
'authn-with-name_id_policy': {
"tc_id": "S2c-11",
"name": 'SAML2 AuthnRequest with specific NameIDPolicy',
"descr": 'AuthnRequest with specific NameIDPolicy',
"sequence": [AuthnRequest_NameIDPolicy1],
"tests": {"pre": [CheckSaml2IntMetaData], "post": []},
"depend":["authn"]
},
'authn-with-name_id_policy_nid-transient': {
"tc_id": "S2c-31",
"name": 'SAML2 AuthnRequest with specific NameIDPolicy',
"descr": 'AuthnRequest with specific NameIDPolicy',
"sequence": [AuthnRequest_NameIDPolicy1Transient],
"tests": {"pre": [CheckSaml2IntMetaData], "post": []},
"depend":["authn"]
},
'ecp_authn': {
'tc_id': "S2c-12",
"name": "SAML2 AuthnRequest using ECP and PAOS",
"descr": "SAML2 AuthnRequest using ECP and PAOS",
"sequence":[ECP_AuthnRequest]
},
'log-in-out': {
"tc_id": "S2c-13",
"name": 'Absolute basic SAML2 log in and out',
"descr": 'AuthnRequest using HTTP-redirect followed by a logout',
"sequence": [AuthnRequest, LogOutRequest],
"tests": {"pre": [CheckSaml2IntMetaData], "post": []},
"depend":["authn"]
},
'manage_nameid':{
"tc_id": "S2c-14",
"name": "Setting the SP provided ID by using ManageNameID",
"sequence":[AuthnRequest, ManageNameIDRequest],
"depend":["authn"]
},
'manage_nameid_nid-transient':{
"tc_id": "S2c-14",
"name": "Setting the SP provided ID by using ManageNameID",
"sequence":[AuthnRequestNID_Transient, ManageNameIDRequest],
"depend":["authn"]
},
'nameid-mapping':{
"tc_id": "S2c-15",
"name": "Simple NameIDMapping request",
"sequence":[AuthnRequest, NameIDMappingRequest],
"depend":["authn"]
},
'authn-authn_query': {
"name": 'AuthnRequest and then an AuthnQuery',
"descr": 'AuthnRequest followed by an AuthnQuery',
"sequence": [AuthnRequest, AuthnQuery],
"tests": {"pre": [CheckSaml2IntMetaData], "post": []},
"depend":["authn"]
},
}
|
[
"[email protected]"
] | |
27eb01bd63140d723b41b4dd57ec4807f308f050
|
ee8c4c954b7c1711899b6d2527bdb12b5c79c9be
|
/assessment2/amazon/run/core/controllers/fragile.py
|
3dde43aea23363a04aaa23d1ebff9541e7fa44da
|
[] |
no_license
|
sqlconsult/byte
|
02ac9899aebea4475614969b594bfe2992ffe29a
|
548f6cb5038e927b54adca29caf02c981fdcecfc
|
refs/heads/master
| 2021-01-25T14:45:42.120220 | 2018-08-11T23:45:31 | 2018-08-11T23:45:31 | 117,135,069 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 368 |
py
|
#!/usr/bin/env python3
from flask import Blueprint, Flask, render_template, request, url_for
controller = Blueprint('fragile', __name__, url_prefix='/fragile')
# @controller.route('/<string:title>', methods=['GET'])
# def lookup(title):
# if title == 'Republic': # TODO 2
# return render_template('republic.html') # TODO 2
# else:
# pass
|
[
"[email protected]"
] | |
b3c7e233bc4c37c1c0d23b146680f7bea823069f
|
cdf8b0df0b22f18e3e31e59946dadfbf50074e67
|
/dockless/qr/views.py
|
5f2a4a7c0ca835ed8548a38fd9719e30d65b3689
|
[] |
no_license
|
deshiyan1010/Dockless-Bike-Service
|
a5bd3bf92c837053f05835f9ebdfe291389a9e30
|
a7df45ed19a2f93c02abc101b19f5aca5d42337d
|
refs/heads/main
| 2023-07-01T05:00:20.135427 | 2021-08-10T11:12:54 | 2021-08-10T11:12:54 | 382,597,432 | 0 | 0 | null | 2021-07-04T18:34:05 | 2021-07-03T11:23:12 |
CSS
|
UTF-8
|
Python
| false | false | 1,568 |
py
|
from django.shortcuts import render
import geocoder
from maps import staticdb
import math
from django.http import HttpResponseRedirect, HttpResponse
from django.urls import reverse
from django.core.files.storage import FileSystemStorage
from PIL import Image
from pyzbar.pyzbar import decode
import cv2
import pyzbar
def euc(c1,c2):
return math.sqrt((c1[0]-c2[0])**2+(c1[1]-c2[1])**2)
def read_barcodes(frame):
barcodes = decode(frame)
print(frame.shape)
for barcode in barcodes:
x, y , w, h = barcode.rect
#1
barcode_info = barcode.data
# cv2.rectangle(frame, (x, y),(x+w, y+h), (0, 255, 0), 2)
# #2
# font = cv2.FONT_HERSHEY_DUPLEX
# cv2.putText(frame, barcode_info, (x + 6, y - 6), font, 2.0, (255, 255, 255), 1)
# #3
# with open("barcode_result.txt", mode ='w') as file:
# file.write("Recognized Barcode:" + barcode_info)
barcode_info = barcode_info.decode('utf-8')
print("Barcode:", barcode_info)
return barcode_info
def valQR(path):
# data = decode(Image.open(path))
data = read_barcodes(cv2.imread(path,1))
print(type(data),type(staticdb.QR_DATA))
if data == staticdb.QR_DATA:
g = geocoder.ip('me').latlng
min = euc(staticdb.QR_LOCATIONS[0],g)
for coords in staticdb.QR_LOCATIONS[1:]:
dist = euc(coords,g)
if dist<min:
min = dist
if min<0.4:
return 1
else:
return 0
return 0
|
[
"[email protected]"
] | |
f6195f60d2d18a5fec319ddf775067291fe7e7e8
|
32b076374481b10e9ba67209d677eb6be0b3d440
|
/bin/wp-get-access-logs
|
a746b956a88fffacfa1bef2b613a75ce2587e88b
|
[
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
defconcepts/quac
|
7f8cca7aebf2fcdc1382fbbf329d03ad3dd86f59
|
4c279ef6ff0fcd51f389ae1817d3104d38740276
|
refs/heads/master
| 2021-01-18T18:53:31.227441 | 2015-11-03T21:23:49 | 2015-11-03T21:23:49 | 46,738,274 | 1 | 0 | null | 2015-11-23T17:58:21 | 2015-11-23T17:58:20 | null |
UTF-8
|
Python
| false | false | 2,466 |
#!/usr/bin/env python3
'Update local copy of Wikimedia access logs.'
# Copyright (c) Los Alamos National Security, LLC, and others.
help_epilogue = '''
Logs only go to stdout, so as to capture the rsync chatter as well.
Note that we use rsync to download Wikipedia stuff, which isn't supported by
the Wikimedia site, so you have to use a mirror. See the list of mirrors:
<http://meta.wikimedia.org/wiki/Mirroring_Wikimedia_project_XML_dumps>
Notes:
* If a log file is changed, your local copy will be updated to match. This
probably shouldn't happen, so investigate the situation if it does.
(However, deleted log files will not also be deleted locally; e.g., you are
OK if the mirror you are using goes into a zombie state.)
* This script will download several terabytes of data. Be patient and be
courteous.
* --verify also takes a long time and gives no output if all's well. If you're
worried about progress, you can use lsof to see what file is currently
being checked.
'''
import os.path
import subprocess
import quacpath
import rsync
import testable
import u
l = u.l
c = u.c
### Setup ###
ap = u.ArgumentParser(description=__doc__, epilog=help_epilogue)
gr = ap.add_argument_group('arguments')
gr.add_argument('--verify',
action='store_true',
help='verify MD5sums of existing files instead of downloading')
### Main ###
def main():
l.info('Wikimedia access logs in %s' % (log_dir))
if (args.verify):
l.info('mode: verify')
subprocess.call('find %s -name md5sums.txt -exec sh -c "cd \$(dirname {}) && md5sum --check --quiet md5sums.txt || echo MD5 error in {}" \;' % (log_dir), shell=True)
else:
l.info('mode: update')
l.info('bandwidth limit is %d KB/s' % (bwlimit))
# FIXME: awkward to specify --include * simply to override --exclude *.
rsync.fetch(mirror_url, log_dir + '/raw', bwlimit,
['--exclude', 'projectcounts*'], args.verbose)
l.info('done')
### Bootstrap ###
try:
args = u.parse_args(ap)
u.configure(args.config)
u.logging_init('wpacc')
bwlimit = c.getint('wkpd', 'bandwidth_limit')
mirror_url = c.get('wkpd', 'access_log_url')
log_dir = c.getpath('wkpd', 'access_log_dir')
if (not os.path.isdir(log_dir)):
u.abort('%s is not a directory or does not exist' % (log_dir))
if (__name__ == '__main__'):
main()
except testable.Unittests_Only_Exception:
testable.register('')
|
[
"[email protected]"
] | ||
b01d86596c1d50ddad7135ffa21518b523697c01
|
e99985945b956698dbe430b61250607efeae0fe7
|
/blog/migrations/0001_initial.py
|
152678f4693b11cfadff686c1658bf8d693cfb70
|
[] |
no_license
|
anthonylauly/Project-Blog----Django-2-By-Example
|
5a01b2083122d3f26eb72b4b16b9177d9e5f8715
|
b7bb60bb80de11dceedeb1edd100e2d78ba83cee
|
refs/heads/main
| 2023-01-21T17:55:49.736625 | 2020-12-08T05:52:29 | 2020-12-08T05:52:29 | 319,536,171 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,369 |
py
|
# Generated by Django 2.2.2 on 2019-08-22 15:23
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=250)),
('slug', models.SlugField(max_length=250, unique_for_date='publish')),
('body', models.TextField()),
('publish', models.DateTimeField(default=django.utils.timezone.now)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('status', models.CharField(choices=[('draft', 'Draft'), ('published', 'Published')], default='draft', max_length=10)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='blog_posts', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('-publish',),
},
),
]
|
[
"[email protected]"
] | |
32cc3cc6bf949f5da324abe9e03b06cf15d693b9
|
c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c
|
/cases/synthetic/coverage-big-1997.py
|
9d0cbf64aab3c1d3fcae52c99ba4dea241300b98
|
[] |
no_license
|
Virtlink/ccbench-chocopy
|
c3f7f6af6349aff6503196f727ef89f210a1eac8
|
c7efae43bf32696ee2b2ee781bdfe4f7730dec3f
|
refs/heads/main
| 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 13,343 |
py
|
count:int = 0
count2:int = 0
count3:int = 0
count4:int = 0
count5:int = 0
def foo(s: str) -> int:
return len(s)
def foo2(s: str, s2: str) -> int:
return len(s)
def foo3(s: str, s2: str, s3: str) -> int:
return len(s)
def foo4(s: str, s2: str, s3: str, s4: str) -> int:
return len(s)
def foo5(s: str, s2: str, s3: str, s4: str, s5: str) -> int:
return len(s)
class bar(object):
p: bool = True
def baz(self:"bar", xx: [int]) -> str:
global count
x:int = 0
y:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
class bar2(object):
p: bool = True
p2: bool = True
def baz(self:"bar2", xx: [int]) -> str:
global count
x:int = 0
y:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz2(self:"bar2", xx: [int], xx2: [int]) -> str:
global count
x:int = 0
x2:int = 0
y:int = 1
y2:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
class bar3(object):
p: bool = True
p2: bool = True
p3: bool = True
def baz(self:"bar3", xx: [int]) -> str:
global count
x:int = 0
y:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz2(self:"bar3", xx: [int], xx2: [int]) -> str:
global count
x:int = 0
x2:int = 0
y:int = 1
y2:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz3(self:"bar3", xx: [int], xx2: [int], xx3: [int]) -> str:
global count
x:int = 0
x2:int = 0
x3:int = 0
y:int = 1
y2:int = 1
y3:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
def qux3(y: int, y2: int, y3: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
class bar4(object):
p: bool = True
p2: bool = True
p3: bool = True
p4: bool = True
def baz(self:"bar4", xx: [int]) -> str:
global count
x:int = 0
y:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
for x in xx:
$Statement
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz2(self:"bar4", xx: [int], xx2: [int]) -> str:
global count
x:int = 0
x2:int = 0
y:int = 1
y2:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz3(self:"bar4", xx: [int], xx2: [int], xx3: [int]) -> str:
global count
x:int = 0
x2:int = 0
x3:int = 0
y:int = 1
y2:int = 1
y3:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
def qux3(y: int, y2: int, y3: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz4(self:"bar4", xx: [int], xx2: [int], xx3: [int], xx4: [int]) -> str:
global count
x:int = 0
x2:int = 0
x3:int = 0
x4:int = 0
y:int = 1
y2:int = 1
y3:int = 1
y4:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
def qux3(y: int, y2: int, y3: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
if x > y:
x = -1
def qux4(y: int, y2: int, y3: int, y4: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
nonlocal x4
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
class bar5(object):
p: bool = True
p2: bool = True
p3: bool = True
p4: bool = True
p5: bool = True
def baz(self:"bar5", xx: [int]) -> str:
global count
x:int = 0
y:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz2(self:"bar5", xx: [int], xx2: [int]) -> str:
global count
x:int = 0
x2:int = 0
y:int = 1
y2:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz3(self:"bar5", xx: [int], xx2: [int], xx3: [int]) -> str:
global count
x:int = 0
x2:int = 0
x3:int = 0
y:int = 1
y2:int = 1
y3:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
def qux3(y: int, y2: int, y3: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz4(self:"bar5", xx: [int], xx2: [int], xx3: [int], xx4: [int]) -> str:
global count
x:int = 0
x2:int = 0
x3:int = 0
x4:int = 0
y:int = 1
y2:int = 1
y3:int = 1
y4:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
def qux3(y: int, y2: int, y3: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
if x > y:
x = -1
def qux4(y: int, y2: int, y3: int, y4: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
nonlocal x4
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz5(self:"bar5", xx: [int], xx2: [int], xx3: [int], xx4: [int], xx5: [int]) -> str:
global count
x:int = 0
x2:int = 0
x3:int = 0
x4:int = 0
x5:int = 0
y:int = 1
y2:int = 1
y3:int = 1
y4:int = 1
y5:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
def qux3(y: int, y2: int, y3: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
if x > y:
x = -1
def qux4(y: int, y2: int, y3: int, y4: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
nonlocal x4
if x > y:
x = -1
def qux5(y: int, y2: int, y3: int, y4: int, y5: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
nonlocal x4
nonlocal x5
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
print(bar().baz([1,2]))
|
[
"[email protected]"
] | |
82fc14c687f9944699c1a3fa65afe78e4d058b0d
|
50ecef248f7d45c1d82a8c9822964a668a87574a
|
/test/python/binding/uMediaServer/uMSTest/__init__.py
|
cde9049cfd151396f4f16174ea15d10544513e62
|
[
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] |
permissive
|
NotixOfficial/umediaserver
|
c4f4bf45854b8fb58b28fde5c0d36f829b69ad25
|
af89f8ac3355027a90d7b299a78fb6fab9b6ffca
|
refs/heads/master
| 2023-08-22T01:57:15.983956 | 2021-09-15T09:44:00 | 2021-09-15T09:54:55 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 58 |
py
|
from .main import uMSTest
from .cmdparser import cmd_line
|
[
"[email protected]"
] | |
5c619b914e03131689488198a31009c0eb874c03
|
2aba62d66c2c622bdc148cef451da76cae5fd76c
|
/exercise/learn_python_dm2039/ch16/ch16_13.py
|
effa4076052f01a7b1da11b467c3cc048caa2602
|
[] |
no_license
|
NTUT-109AB8011/crawler
|
6a76de2ab1848ebc8365e071e76c08ca7348be62
|
a703ec741b48d3af615a757fed7607b1f8eb66a6
|
refs/heads/master
| 2023-03-26T22:39:59.527175 | 2021-03-30T03:29:22 | 2021-03-30T03:29:22 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 312 |
py
|
# ch16_13.py
import re
msg = 'Johnson, Johnnason and Johnnathan will attend my party tonight.'
pattern = 'John(son|nason|nathan)'
txt = re.search(pattern,msg) # 傳回搜尋結果
print(txt.group()) # 列印第一個搜尋結果
print(txt.group(1)) # 列印第一個分組
|
[
"[email protected]"
] | |
93ba3a81c498c8776a404efc7152c5f91099bd87
|
97319df4efbd371e89273bd2679c7a219b2a069a
|
/simple/save.py
|
6506d473c7b7703e2b564d985e297c4f4b3a6183
|
[] |
no_license
|
mpaisner/projects
|
7dd17429d80aa3630cbafd613bbb7d27fe5dfb71
|
30bdb423077c730184bf81707ad7cb0038288435
|
refs/heads/master
| 2021-01-01T17:04:47.703035 | 2015-03-01T21:58:03 | 2015-03-01T21:58:03 | 31,512,295 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 92 |
py
|
class SavedMap:
def __init__(self, map, conts):
self.map = map
self.conts = conts
|
[
"[email protected]"
] | |
81c444ea32e12b01a3349ba80b9ada0162c9b2de
|
c646eda22844eb3aadc832a55dc8a7a8d8b28656
|
/LintCode/Problems/Python3/1010. 维持城市轮廓的最大增量.py
|
6f6e037a68900ddc9879fccf078174560d88550a
|
[] |
no_license
|
daidai21/ExerciseProblem
|
78f41f20f6d12cd71c510241d5fe829af676a764
|
cdc526fdb4ee1ca8e0d6334fecc4932d55019cea
|
refs/heads/master
| 2021-11-22T21:54:13.106707 | 2021-11-14T10:54:37 | 2021-11-14T10:54:37 | 213,108,530 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 876 |
py
|
class Solution:
"""
@param grid: a 2D array
@return: the maximum total sum that the height of the buildings can be increased
"""
def maxIncreaseKeepingSkyline(self, grid):
# Write your code here
# row high list
row_high = []
for i in range(len(grid[0])):
high = 0
for j in range(len(grid)):
high = max(high, grid[j][i])
row_high.append(high)
# column high list
column_high = []
for i in range(len(grid)):
high = 0
for j in range(len(grid[0])):
high = max(high, grid[i][j])
column_high.append(high)
# ans
ans = 0
for i in range(len(grid)):
for j in range(len(grid[0])):
ans += min(row_high[i], column_high[j]) - grid[i][j]
return ans
|
[
"[email protected]"
] | |
661e388c326d7f7fcae917f8bbd046c6126f2242
|
7792b03540784a0d28073899dd4ad78689e9a9fb
|
/char_map/all_char.py
|
9ff04bc9c121310a5b6bd6c7e57b7b628212d1ae
|
[] |
no_license
|
ayiis/coding
|
3b1362f813a22a7246af3725162cfb53dea2f175
|
c73e4622e1811cc3fd8729a92df6537bd73dc802
|
refs/heads/master
| 2021-06-02T14:55:38.451288 | 2021-04-26T08:39:16 | 2021-04-26T08:39:16 | 134,660,001 | 0 | 0 | null | 2020-06-05T04:03:58 | 2018-05-24T04:14:14 |
CSS
|
UTF-8
|
Python
| false | false | 439 |
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# __author__ = "ayiis"
# create on 2020/05/06
# char_set = set([chr(i) for i in range(32, 127)])
char_set = set([chr(i) for i in range(32, 127)])
# print("char_set:", sorted(list(char_set)))
r"""
! " # $ % & ' ( ) * + , - .
/ 0 1 2 3 4 5 6 7 8 9 : ; < =
> ? @ A B C D E F G H I J K L
M N O P Q R S T U V W X Y Z [
\ ] ^ _ ` a b c d e f g h i j
k l m n o p q r s t u v w x y
z { | } ~
"""
|
[
"[email protected]"
] | |
76570c11245c3e2210c7d78bd57b32ff5fe7b088
|
e58aa9d46f06d091cc0be6259996efed238529cd
|
/tests/test_extension.py
|
058c583c5f83ede5286c46d6c1f3d51d25c97d2e
|
[
"Apache-2.0"
] |
permissive
|
kingosticks/mopidy-pidi
|
d446390c84de511c4f2f522d1f267b1f1f90e44a
|
7a091ea4597b313b59082c8b7494395e8f9e49ea
|
refs/heads/master
| 2020-09-16T13:35:06.825810 | 2019-11-15T13:53:05 | 2019-11-15T13:53:05 | 223,785,395 | 0 | 0 |
Apache-2.0
| 2019-11-24T17:48:10 | 2019-11-24T17:48:09 | null |
UTF-8
|
Python
| false | false | 499 |
py
|
from __future__ import unicode_literals
from mopidy_pidi import Extension, frontend as frontend_lib
def test_get_default_config():
ext = Extension()
config = ext.get_default_config()
assert "[pidi]" in config
assert "enabled = true" in config
def test_get_config_schema():
ext = Extension()
schema = ext.get_config_schema()
# TODO Test the content of your config schema
#assert "username" in schema
#assert "password" in schema
# TODO Write more tests
|
[
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.