blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3df115b3dffbcbdc55a87e74face40f87c7cca8f | 00504f069d4f0eb93ed8777b627a6cd7de0fe94d | /10.0/auth_allowed_ips/__init__.py | 4f27000c587c01350a8ffde35f37c6c282bb6e4c | [] | no_license | Gofekra/odoo-2 | 6e1a688a9e04cc0ecd1ca91ad7fca004194f1c4a | a2f870a695663fe505451b6d97692433a4ea2b1d | refs/heads/master | 2021-04-09T13:32:46.994819 | 2018-03-13T07:52:55 | 2018-03-13T07:52:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,733 | py | # coding: utf-8
import re
import logging
from odoo import models, fields, SUPERUSER_ID, api
_logger = logging.getLogger(__name__)
class User(models.Model):
_inherit = 'res.users'
allowed_ips = fields.Text(string='Allowed IPs', help=u"""正则匹配
如:^192\.168\.2\.\d{1,3}$, 支持多个正则,每一个正则单独一行。满足任意一行即可通过。
""")
@classmethod
def authenticate(cls, db, login, password, user_agent_env):
uid = super(User, cls).authenticate(db, login, password, user_agent_env)
if uid:
with cls.pool.cursor() as cr:
self = api.Environment(cr, SUPERUSER_ID, {})[cls._name]
user = self.browse(uid)
if hasattr(user, 'allowed_ips') and user.allowed_ips:
addr = user_agent_env['REMOTE_ADDR']
if not any(re.match(line, addr) for line in user.allowed_ips.splitlines()):
_logger.warn('User login blocked cause of the remote_addr %s not match allowed_ips %s',
user_agent_env['REMOTE_ADDR'], user.allowed_ips)
uid = False
# 在super方法中,已经普通密码验证成功,且创建了登录成功的日志,
# 但是在上面被IP限制,修改此login最后一条的日志和note。
Log = api.Environment(cr, SUPERUSER_ID, {})['auth_login_log.log']
Log.search([('login_account', '=', login)], limit=1, order='id desc').write({
'note': u'IP受限',
'login_status': 'e',
})
return uid
| [
"[email protected]"
] | |
760b0eb4c5e3ffc5e8dc4d8b21479bb959617a91 | 0a28bcde2499e6a41e16d88ed62cd2e80a5b464d | /hb_quant/huobi/model/subuser/trade_market.py | 74d22b7034eb44acc3ad53c865621798e9a56b5f | [
"MIT"
] | permissive | wenli135/Binance-volatility-trading-bot | 2cfe66007294b13a89b16d1622d50ce1615f1d66 | 75a03ad61df0e95492128fb6f1f419d4dc256ab3 | refs/heads/main | 2023-06-13T06:40:43.855256 | 2021-07-01T02:03:25 | 2021-07-01T02:03:25 | 373,853,320 | 0 | 0 | MIT | 2021-06-04T13:38:26 | 2021-06-04T13:38:26 | null | UTF-8 | Python | false | false | 658 | py |
class TradeMarket:
"""
The trade information with price and amount etc.
:member
subUid: sub user ID.
accountType:
activation: sub user account state for given accountType.
"""
def __init__(self):
self.sub_uid = ""
self.account_type = ""
self.activation = ""
def print_object(self, format_data=""):
from huobi.utils.print_mix_object import PrintBasic
PrintBasic.print_basic(self.sub_uid, format_data + "subUid")
PrintBasic.print_basic(self.account_type, format_data + "accountType")
PrintBasic.print_basic(self.activation, format_data + "activation")
| [
"[email protected]"
] | |
214b3914aab920368717b8b7efce2aa7628cfc34 | c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c | /cases/pa3/benchmarks/tree-491.py | b791356b5b8ce529fe2fb5cf44c992b2430b4224 | [] | no_license | Virtlink/ccbench-chocopy | c3f7f6af6349aff6503196f727ef89f210a1eac8 | c7efae43bf32696ee2b2ee781bdfe4f7730dec3f | refs/heads/main | 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,451 | py | # Binary-search trees
class TreeNode(object):
value:int = 0
left:"TreeNode" = None
right:"TreeNode" = None
def insert(self:"TreeNode", x:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode(x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode(x)
return True
else:
return self.right.insert(x)
return False
def contains(self:"TreeNode", x:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
class Tree(object):
root:TreeNode = None
size:int = 0
def insert(self:"Tree", x:int) -> object:
if self.root is None:
self.root = makeNode(x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def contains(self:"Tree", x:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def makeNode(x: int) -> TreeNode:
$FuncBodyMember
b = TreeNode()
b.value = x
return b
# Input parameters
n:int = 100
c:int = 4
# Data
t:Tree = None
i:int = 0
k:int = 37813
# Crunch
t = Tree()
while i < n:
t.insert(k)
k = (k * 37813) % 37831
if i % c != 0:
t.insert(i)
i = i + 1
print(t.size)
for i in [4, 8, 15, 16, 23, 42]:
if t.contains(i):
print(i)
| [
"[email protected]"
] | |
e73e71b19defdf97b66220f6612d3d90b742c593 | 954e69e23e10e18d6f8ac721d8e42d6aabceb9ab | /topytorch_all.py | c9c65555052d46e5aa3dce65220f004f795fe386 | [] | no_license | jie311/2018--ZJUAI--PyramidBoxDetector | 53fc001d6e60fdc67d22ab0864ed1b574c53c182 | cc9b87b5082df65704a24117ff7136f9d077f49e | refs/heads/master | 2022-09-09T02:30:05.219093 | 2020-05-28T07:37:59 | 2020-05-28T07:37:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,230 | py | # pdpd输入格式的txt
pdfile = './final_all.txt'
# pytorch输入格式的txt
ptfile = './final_all_pt.txt'
f = open(pdfile, 'r')
f_pt = open(ptfile, 'w')
lines = f.readlines()
i = 0
rect = 0
total = 0
while i < len(lines):
if 'jpg' in lines[i]:
im_id = lines[i].rstrip()
# print(im_id)
num = int(lines[i + 1].rstrip())
#
i = i + 2
box = []
bad = 0
for j in range(num):
x1, y1, w, h = map(int, lines[i].rstrip().split(' ')[0:4])
if w != h:
print(im_id)
print(w, h)
rect += 1
if w == 0 or h == 0:
# print(im_id)
bad += 1
i = i + 1
continue
else:
box.append([x1, y1, w, h])
i = i + 1
num = num - bad
total += num
if num > 0:
f_pt.write(im_id)
f_pt.write(' {0}'.format(num))
for [x1, y1, w, h] in box:
f_pt.write(' {0} {1} {2} {3}'.format(x1, y1, w, h))
f_pt.write('\n')
else:
pass
else:
i = i + 1
f_pt.close()
f.close()
print(rect)
print(total)
| [
"[email protected]"
] | |
249ab99aec490d4e6164883dbd8dea5220340a17 | 19d6bddc562b8cc3c7a6e67465f7601c74979e05 | /results/190902/failed/lifelong-stochastic-tight-big_h-11_w-11/stochastic-tight-big.py | 9b63dc6b158b73d05568948f1f0b3b6fa474c43e | [] | no_license | SuReLI/llrl | 3bca1d1c755e5c59a5d242c18df997ed17f546d0 | 5f581cdded3cdecf69a8af76dc624494d82a4034 | refs/heads/master | 2023-01-08T04:04:28.358563 | 2020-11-06T10:27:33 | 2020-11-06T10:27:33 | 169,303,041 | 9 | 3 | null | null | null | null | UTF-8 | Python | false | false | 3,358 | py | """
Lifelong RL experiment in constant transition function setting
"""
import numpy as np
from llrl.agents.rmax import RMax
from llrl.agents.lrmax import LRMax
from llrl.agents.maxqinit import MaxQInit
from llrl.agents.lrmaxqinit import LRMaxQInit
from llrl.utils.env_handler import make_env_distribution
from llrl.experiments import run_agents_lifelong
def experiment():
# Parameters
gamma = .9
env_distribution = make_env_distribution(env_class='stochastic-tight-big', env_name='stochastic-tight-big', gamma=gamma)
actions = env_distribution.get_actions()
n_known = 10
p_min = 1. / 5.
epsilon_q = .01
epsilon_m = .01
delta = .1
r_max = 1.
v_max = 1.
n_states = 4
max_mem = 20
# Agents
rmax = RMax(actions=actions, gamma=gamma, r_max=r_max, v_max=v_max, deduce_v_max=False, n_known=n_known,
deduce_n_known=False, epsilon_q=epsilon_q, epsilon_m=epsilon_m, name='RMax')
lrmax = LRMax(actions=actions, gamma=gamma, r_max=r_max, v_max=v_max, deduce_v_max=False, n_known=n_known,
deduce_n_known=False, epsilon_q=epsilon_q, epsilon_m=epsilon_m, delta=delta, n_states=n_states,
max_memory_size=max_mem, prior=None, estimate_distances_online=True,
min_sampling_probability=p_min, name='LRMax')
lrmaxprior = LRMax(actions=actions, gamma=gamma, r_max=r_max, v_max=v_max, deduce_v_max=False, n_known=n_known,
deduce_n_known=False, epsilon_q=epsilon_q, epsilon_m=epsilon_m, delta=delta, n_states=n_states,
max_memory_size=max_mem, prior=0.2, estimate_distances_online=True,
min_sampling_probability=p_min, name='LRMax(Dmax=0.2)')
maxqinit = MaxQInit(actions=actions, gamma=gamma, r_max=r_max, v_max=v_max, deduce_v_max=False, n_known=n_known,
deduce_n_known=False, epsilon_q=epsilon_q, epsilon_m=epsilon_m, delta=delta, n_states=n_states,
min_sampling_probability=p_min, name='MaxQInit')
lrmaxqinit = LRMaxQInit(actions=actions, gamma=gamma, r_max=r_max, v_max=v_max, deduce_v_max=False, n_known=n_known,
deduce_n_known=False, epsilon_q=epsilon_q, epsilon_m=epsilon_m, delta=delta,
n_states=n_states, max_memory_size=max_mem, prior=None, estimate_distances_online=True,
min_sampling_probability=p_min, name='LRMaxQInit')
lrmaxqinitprior = LRMaxQInit(actions=actions, gamma=gamma, r_max=r_max, v_max=v_max, deduce_v_max=False, n_known=n_known,
deduce_n_known=False, epsilon_q=epsilon_q, epsilon_m=epsilon_m, delta=delta,
n_states=n_states, max_memory_size=max_mem, prior=0.2, estimate_distances_online=True,
min_sampling_probability=p_min, name='LRMaxQInit(Dmax=0.2)')
agents_pool = [rmax, lrmax, lrmaxprior, maxqinit, lrmaxqinit, lrmaxqinitprior]
# Run
run_agents_lifelong(agents_pool, env_distribution, n_instances=2, n_tasks=80, n_episodes=80, n_steps=100,
reset_at_terminal=False, open_plot=False, plot_title=True, do_run=True, do_plot=True,
parallel_run=True, n_processes=None)
if __name__ == '__main__':
np.random.seed(1993)
experiment()
| [
"[email protected]"
] | |
6fdf82764e7bedb0106b8819a6f2767a11903402 | e315e9ad3c77289702517ebf464f8d8d8efcaf90 | /LGTVRemote/PythonistaKit.framework/pylib/Cookie.py | 68d989383e053a126368a37b2851b709104ee722 | [] | no_license | Megarushing/LGTVRemote | 856123c7907777fe1cbbd431669aaa5e5490746c | abc5e92fa91cd41df68104df7dc9d13270914550 | refs/heads/master | 2020-03-23T12:51:42.635735 | 2018-08-10T19:48:40 | 2018-08-10T19:48:40 | 141,586,628 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 25,822 | py | #
####
# Copyright 2000 by Timothy O'Malley <[email protected]>
#
# All Rights Reserved
#
# Permission to use, copy, modify, and distribute this software
# and its documentation for any purpose and without fee is hereby
# granted, provided that the above copyright notice appear in all
# copies and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Timothy O'Malley not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# Timothy O'Malley DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS
# SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS, IN NO EVENT SHALL Timothy O'Malley BE LIABLE FOR
# ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
# PERFORMANCE OF THIS SOFTWARE.
#
####
#
# Id: Cookie.py,v 2.29 2000/08/23 05:28:49 timo Exp
# by Timothy O'Malley <[email protected]>
#
# Cookie.py is a Python module for the handling of HTTP
# cookies as a Python dictionary. See RFC 2109 for more
# information on cookies.
#
# The original idea to treat Cookies as a dictionary came from
# Dave Mitchell ([email protected]) in 1995, when he released the
# first version of nscookie.py.
#
####
r"""
Here's a sample session to show how to use this module.
At the moment, this is the only documentation.
The Basics
----------
Importing is easy..
>>> import Cookie
Most of the time you start by creating a cookie. Cookies come in
three flavors, each with slightly different encoding semantics, but
more on that later.
>>> C = Cookie.SimpleCookie()
>>> C = Cookie.SerialCookie()
>>> C = Cookie.SmartCookie()
[Note: Long-time users of Cookie.py will remember using
Cookie.Cookie() to create an Cookie object. Although deprecated, it
is still supported by the code. See the Backward Compatibility notes
for more information.]
Once you've created your Cookie, you can add values just as if it were
a dictionary.
>>> C = Cookie.SmartCookie()
>>> C["fig"] = "newton"
>>> C["sugar"] = "wafer"
>>> C.output()
'Set-Cookie: fig=newton\r\nSet-Cookie: sugar=wafer'
Notice that the printable representation of a Cookie is the
appropriate format for a Set-Cookie: header. This is the
default behavior. You can change the header and printed
attributes by using the .output() function
>>> C = Cookie.SmartCookie()
>>> C["rocky"] = "road"
>>> C["rocky"]["path"] = "/cookie"
>>> print C.output(header="Cookie:")
Cookie: rocky=road; Path=/cookie
>>> print C.output(attrs=[], header="Cookie:")
Cookie: rocky=road
The load() method of a Cookie extracts cookies from a string. In a
CGI script, you would use this method to extract the cookies from the
HTTP_COOKIE environment variable.
>>> C = Cookie.SmartCookie()
>>> C.load("chips=ahoy; vienna=finger")
>>> C.output()
'Set-Cookie: chips=ahoy\r\nSet-Cookie: vienna=finger'
The load() method is darn-tootin smart about identifying cookies
within a string. Escaped quotation marks, nested semicolons, and other
such trickeries do not confuse it.
>>> C = Cookie.SmartCookie()
>>> C.load('keebler="E=everybody; L=\\"Loves\\"; fudge=\\012;";')
>>> print C
Set-Cookie: keebler="E=everybody; L=\"Loves\"; fudge=\012;"
Each element of the Cookie also supports all of the RFC 2109
Cookie attributes. Here's an example which sets the Path
attribute.
>>> C = Cookie.SmartCookie()
>>> C["oreo"] = "doublestuff"
>>> C["oreo"]["path"] = "/"
>>> print C
Set-Cookie: oreo=doublestuff; Path=/
Each dictionary element has a 'value' attribute, which gives you
back the value associated with the key.
>>> C = Cookie.SmartCookie()
>>> C["twix"] = "none for you"
>>> C["twix"].value
'none for you'
A Bit More Advanced
-------------------
As mentioned before, there are three different flavors of Cookie
objects, each with different encoding/decoding semantics. This
section briefly discusses the differences.
SimpleCookie
The SimpleCookie expects that all values should be standard strings.
Just to be sure, SimpleCookie invokes the str() builtin to convert
the value to a string, when the values are set dictionary-style.
>>> C = Cookie.SimpleCookie()
>>> C["number"] = 7
>>> C["string"] = "seven"
>>> C["number"].value
'7'
>>> C["string"].value
'seven'
>>> C.output()
'Set-Cookie: number=7\r\nSet-Cookie: string=seven'
SerialCookie
The SerialCookie expects that all values should be serialized using
cPickle (or pickle, if cPickle isn't available). As a result of
serializing, SerialCookie can save almost any Python object to a
value, and recover the exact same object when the cookie has been
returned. (SerialCookie can yield some strange-looking cookie
values, however.)
>>> C = Cookie.SerialCookie()
>>> C["number"] = 7
>>> C["string"] = "seven"
>>> C["number"].value
7
>>> C["string"].value
'seven'
>>> C.output()
'Set-Cookie: number="I7\\012."\r\nSet-Cookie: string="S\'seven\'\\012p1\\012."'
Be warned, however, if SerialCookie cannot de-serialize a value (because
it isn't a valid pickle'd object), IT WILL RAISE AN EXCEPTION.
SmartCookie
The SmartCookie combines aspects of each of the other two flavors.
When setting a value in a dictionary-fashion, the SmartCookie will
serialize (ala cPickle) the value *if and only if* it isn't a
Python string. String objects are *not* serialized. Similarly,
when the load() method parses out values, it attempts to de-serialize
the value. If it fails, then it fallsback to treating the value
as a string.
>>> C = Cookie.SmartCookie()
>>> C["number"] = 7
>>> C["string"] = "seven"
>>> C["number"].value
7
>>> C["string"].value
'seven'
>>> C.output()
'Set-Cookie: number="I7\\012."\r\nSet-Cookie: string=seven'
Backwards Compatibility
-----------------------
In order to keep compatibilty with earlier versions of Cookie.py,
it is still possible to use Cookie.Cookie() to create a Cookie. In
fact, this simply returns a SmartCookie.
>>> C = Cookie.Cookie()
>>> print C.__class__.__name__
SmartCookie
Finis.
""" #"
# ^
# |----helps out font-lock
#
# Import our required modules
#
import string
try:
from cPickle import dumps, loads
except ImportError:
from pickle import dumps, loads
import re, warnings
__all__ = ["CookieError","BaseCookie","SimpleCookie","SerialCookie",
"SmartCookie","Cookie"]
_nulljoin = ''.join
_semispacejoin = '; '.join
_spacejoin = ' '.join
#
# Define an exception visible to External modules
#
class CookieError(Exception):
pass
# These quoting routines conform to the RFC2109 specification, which in
# turn references the character definitions from RFC2068. They provide
# a two-way quoting algorithm. Any non-text character is translated
# into a 4 character sequence: a forward-slash followed by the
# three-digit octal equivalent of the character. Any '\' or '"' is
# quoted with a preceeding '\' slash.
#
# These are taken from RFC2068 and RFC2109.
# _LegalChars is the list of chars which don't require "'s
# _Translator hash-table for fast quoting
#
_LegalChars = string.ascii_letters + string.digits + "!#$%&'*+-.^_`|~"
_Translator = {
'\000' : '\\000', '\001' : '\\001', '\002' : '\\002',
'\003' : '\\003', '\004' : '\\004', '\005' : '\\005',
'\006' : '\\006', '\007' : '\\007', '\010' : '\\010',
'\011' : '\\011', '\012' : '\\012', '\013' : '\\013',
'\014' : '\\014', '\015' : '\\015', '\016' : '\\016',
'\017' : '\\017', '\020' : '\\020', '\021' : '\\021',
'\022' : '\\022', '\023' : '\\023', '\024' : '\\024',
'\025' : '\\025', '\026' : '\\026', '\027' : '\\027',
'\030' : '\\030', '\031' : '\\031', '\032' : '\\032',
'\033' : '\\033', '\034' : '\\034', '\035' : '\\035',
'\036' : '\\036', '\037' : '\\037',
# Because of the way browsers really handle cookies (as opposed
# to what the RFC says) we also encode , and ;
',' : '\\054', ';' : '\\073',
'"' : '\\"', '\\' : '\\\\',
'\177' : '\\177', '\200' : '\\200', '\201' : '\\201',
'\202' : '\\202', '\203' : '\\203', '\204' : '\\204',
'\205' : '\\205', '\206' : '\\206', '\207' : '\\207',
'\210' : '\\210', '\211' : '\\211', '\212' : '\\212',
'\213' : '\\213', '\214' : '\\214', '\215' : '\\215',
'\216' : '\\216', '\217' : '\\217', '\220' : '\\220',
'\221' : '\\221', '\222' : '\\222', '\223' : '\\223',
'\224' : '\\224', '\225' : '\\225', '\226' : '\\226',
'\227' : '\\227', '\230' : '\\230', '\231' : '\\231',
'\232' : '\\232', '\233' : '\\233', '\234' : '\\234',
'\235' : '\\235', '\236' : '\\236', '\237' : '\\237',
'\240' : '\\240', '\241' : '\\241', '\242' : '\\242',
'\243' : '\\243', '\244' : '\\244', '\245' : '\\245',
'\246' : '\\246', '\247' : '\\247', '\250' : '\\250',
'\251' : '\\251', '\252' : '\\252', '\253' : '\\253',
'\254' : '\\254', '\255' : '\\255', '\256' : '\\256',
'\257' : '\\257', '\260' : '\\260', '\261' : '\\261',
'\262' : '\\262', '\263' : '\\263', '\264' : '\\264',
'\265' : '\\265', '\266' : '\\266', '\267' : '\\267',
'\270' : '\\270', '\271' : '\\271', '\272' : '\\272',
'\273' : '\\273', '\274' : '\\274', '\275' : '\\275',
'\276' : '\\276', '\277' : '\\277', '\300' : '\\300',
'\301' : '\\301', '\302' : '\\302', '\303' : '\\303',
'\304' : '\\304', '\305' : '\\305', '\306' : '\\306',
'\307' : '\\307', '\310' : '\\310', '\311' : '\\311',
'\312' : '\\312', '\313' : '\\313', '\314' : '\\314',
'\315' : '\\315', '\316' : '\\316', '\317' : '\\317',
'\320' : '\\320', '\321' : '\\321', '\322' : '\\322',
'\323' : '\\323', '\324' : '\\324', '\325' : '\\325',
'\326' : '\\326', '\327' : '\\327', '\330' : '\\330',
'\331' : '\\331', '\332' : '\\332', '\333' : '\\333',
'\334' : '\\334', '\335' : '\\335', '\336' : '\\336',
'\337' : '\\337', '\340' : '\\340', '\341' : '\\341',
'\342' : '\\342', '\343' : '\\343', '\344' : '\\344',
'\345' : '\\345', '\346' : '\\346', '\347' : '\\347',
'\350' : '\\350', '\351' : '\\351', '\352' : '\\352',
'\353' : '\\353', '\354' : '\\354', '\355' : '\\355',
'\356' : '\\356', '\357' : '\\357', '\360' : '\\360',
'\361' : '\\361', '\362' : '\\362', '\363' : '\\363',
'\364' : '\\364', '\365' : '\\365', '\366' : '\\366',
'\367' : '\\367', '\370' : '\\370', '\371' : '\\371',
'\372' : '\\372', '\373' : '\\373', '\374' : '\\374',
'\375' : '\\375', '\376' : '\\376', '\377' : '\\377'
}
_idmap = ''.join(chr(x) for x in xrange(256))
def _quote(str, LegalChars=_LegalChars,
idmap=_idmap, translate=string.translate):
#
# If the string does not need to be double-quoted,
# then just return the string. Otherwise, surround
# the string in doublequotes and precede quote (with a \)
# special characters.
#
if "" == translate(str, idmap, LegalChars):
return str
else:
return '"' + _nulljoin( map(_Translator.get, str, str) ) + '"'
# end _quote
_OctalPatt = re.compile(r"\\[0-3][0-7][0-7]")
_QuotePatt = re.compile(r"[\\].")
def _unquote(str):
# If there aren't any doublequotes,
# then there can't be any special characters. See RFC 2109.
if len(str) < 2:
return str
if str[0] != '"' or str[-1] != '"':
return str
# We have to assume that we must decode this string.
# Down to work.
# Remove the "s
str = str[1:-1]
# Check for special sequences. Examples:
# \012 --> \n
# \" --> "
#
i = 0
n = len(str)
res = []
while 0 <= i < n:
Omatch = _OctalPatt.search(str, i)
Qmatch = _QuotePatt.search(str, i)
if not Omatch and not Qmatch: # Neither matched
res.append(str[i:])
break
# else:
j = k = -1
if Omatch: j = Omatch.start(0)
if Qmatch: k = Qmatch.start(0)
if Qmatch and ( not Omatch or k < j ): # QuotePatt matched
res.append(str[i:k])
res.append(str[k+1])
i = k+2
else: # OctalPatt matched
res.append(str[i:j])
res.append( chr( int(str[j+1:j+4], 8) ) )
i = j+4
return _nulljoin(res)
# end _unquote
# The _getdate() routine is used to set the expiration time in
# the cookie's HTTP header. By default, _getdate() returns the
# current time in the appropriate "expires" format for a
# Set-Cookie header. The one optional argument is an offset from
# now, in seconds. For example, an offset of -3600 means "one hour ago".
# The offset may be a floating point number.
#
_weekdayname = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
_monthname = [None,
'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
def _getdate(future=0, weekdayname=_weekdayname, monthname=_monthname):
from time import gmtime, time
now = time()
year, month, day, hh, mm, ss, wd, y, z = gmtime(now + future)
return "%s, %02d %3s %4d %02d:%02d:%02d GMT" % \
(weekdayname[wd], day, monthname[month], year, hh, mm, ss)
#
# A class to hold ONE key,value pair.
# In a cookie, each such pair may have several attributes.
# so this class is used to keep the attributes associated
# with the appropriate key,value pair.
# This class also includes a coded_value attribute, which
# is used to hold the network representation of the
# value. This is most useful when Python objects are
# pickled for network transit.
#
class Morsel(dict):
# RFC 2109 lists these attributes as reserved:
# path comment domain
# max-age secure version
#
# For historical reasons, these attributes are also reserved:
# expires
#
# This is an extension from Microsoft:
# httponly
#
# This dictionary provides a mapping from the lowercase
# variant on the left to the appropriate traditional
# formatting on the right.
_reserved = { "expires" : "expires",
"path" : "Path",
"comment" : "Comment",
"domain" : "Domain",
"max-age" : "Max-Age",
"secure" : "secure",
"httponly" : "httponly",
"version" : "Version",
}
def __init__(self):
# Set defaults
self.key = self.value = self.coded_value = None
# Set default attributes
for K in self._reserved:
dict.__setitem__(self, K, "")
# end __init__
def __setitem__(self, K, V):
K = K.lower()
if not K in self._reserved:
raise CookieError("Invalid Attribute %s" % K)
dict.__setitem__(self, K, V)
# end __setitem__
def isReservedKey(self, K):
return K.lower() in self._reserved
# end isReservedKey
def set(self, key, val, coded_val,
LegalChars=_LegalChars,
idmap=_idmap, translate=string.translate):
# First we verify that the key isn't a reserved word
# Second we make sure it only contains legal characters
if key.lower() in self._reserved:
raise CookieError("Attempt to set a reserved key: %s" % key)
if "" != translate(key, idmap, LegalChars):
raise CookieError("Illegal key value: %s" % key)
# It's a good key, so save it.
self.key = key
self.value = val
self.coded_value = coded_val
# end set
def output(self, attrs=None, header = "Set-Cookie:"):
return "%s %s" % ( header, self.OutputString(attrs) )
__str__ = output
def __repr__(self):
return '<%s: %s=%s>' % (self.__class__.__name__,
self.key, repr(self.value) )
def js_output(self, attrs=None):
# Print javascript
return """
<script type="text/javascript">
<!-- begin hiding
document.cookie = \"%s\";
// end hiding -->
</script>
""" % ( self.OutputString(attrs).replace('"',r'\"'), )
# end js_output()
def OutputString(self, attrs=None):
# Build up our result
#
result = []
RA = result.append
# First, the key=value pair
RA("%s=%s" % (self.key, self.coded_value))
# Now add any defined attributes
if attrs is None:
attrs = self._reserved
items = self.items()
items.sort()
for K,V in items:
if V == "": continue
if K not in attrs: continue
if K == "expires" and type(V) == type(1):
RA("%s=%s" % (self._reserved[K], _getdate(V)))
elif K == "max-age" and type(V) == type(1):
RA("%s=%d" % (self._reserved[K], V))
elif K == "secure":
RA(str(self._reserved[K]))
elif K == "httponly":
RA(str(self._reserved[K]))
else:
RA("%s=%s" % (self._reserved[K], V))
# Return the result
return _semispacejoin(result)
# end OutputString
# end Morsel class
#
# Pattern for finding cookie
#
# This used to be strict parsing based on the RFC2109 and RFC2068
# specifications. I have since discovered that MSIE 3.0x doesn't
# follow the character rules outlined in those specs. As a
# result, the parsing rules here are less strict.
#
_LegalCharsPatt = r"[\w\d!#%&'~_`><@,:/\$\*\+\-\.\^\|\)\(\?\}\{\=]"
_CookiePattern = re.compile(
r"(?x)" # This is a Verbose pattern
r"(?P<key>" # Start of group 'key'
""+ _LegalCharsPatt +"+?" # Any word of at least one letter, nongreedy
r")" # End of group 'key'
r"\s*=\s*" # Equal Sign
r"(?P<val>" # Start of group 'val'
r'"(?:[^\\"]|\\.)*"' # Any doublequoted string
r"|" # or
r"\w{3},\s[\s\w\d-]{9,11}\s[\d:]{8}\sGMT" # Special case for "expires" attr
r"|" # or
""+ _LegalCharsPatt +"*" # Any word or empty string
r")" # End of group 'val'
r"\s*;?" # Probably ending in a semi-colon
)
# At long last, here is the cookie class.
# Using this class is almost just like using a dictionary.
# See this module's docstring for example usage.
#
class BaseCookie(dict):
# A container class for a set of Morsels
#
def value_decode(self, val):
"""real_value, coded_value = value_decode(STRING)
Called prior to setting a cookie's value from the network
representation. The VALUE is the value read from HTTP
header.
Override this function to modify the behavior of cookies.
"""
return val, val
# end value_encode
def value_encode(self, val):
"""real_value, coded_value = value_encode(VALUE)
Called prior to setting a cookie's value from the dictionary
representation. The VALUE is the value being assigned.
Override this function to modify the behavior of cookies.
"""
strval = str(val)
return strval, strval
# end value_encode
def __init__(self, input=None):
if input: self.load(input)
# end __init__
def __set(self, key, real_value, coded_value):
"""Private method for setting a cookie's value"""
M = self.get(key, Morsel())
M.set(key, real_value, coded_value)
dict.__setitem__(self, key, M)
# end __set
def __setitem__(self, key, value):
"""Dictionary style assignment."""
rval, cval = self.value_encode(value)
self.__set(key, rval, cval)
# end __setitem__
def output(self, attrs=None, header="Set-Cookie:", sep="\015\012"):
"""Return a string suitable for HTTP."""
result = []
items = self.items()
items.sort()
for K,V in items:
result.append( V.output(attrs, header) )
return sep.join(result)
# end output
__str__ = output
def __repr__(self):
L = []
items = self.items()
items.sort()
for K,V in items:
L.append( '%s=%s' % (K,repr(V.value) ) )
return '<%s: %s>' % (self.__class__.__name__, _spacejoin(L))
def js_output(self, attrs=None):
"""Return a string suitable for JavaScript."""
result = []
items = self.items()
items.sort()
for K,V in items:
result.append( V.js_output(attrs) )
return _nulljoin(result)
# end js_output
def load(self, rawdata):
"""Load cookies from a string (presumably HTTP_COOKIE) or
from a dictionary. Loading cookies from a dictionary 'd'
is equivalent to calling:
map(Cookie.__setitem__, d.keys(), d.values())
"""
if type(rawdata) == type(""):
self.__ParseString(rawdata)
else:
# self.update() wouldn't call our custom __setitem__
for k, v in rawdata.items():
self[k] = v
return
# end load()
def __ParseString(self, str, patt=_CookiePattern):
i = 0 # Our starting point
n = len(str) # Length of string
M = None # current morsel
while 0 <= i < n:
# Start looking for a cookie
match = patt.search(str, i)
if not match: break # No more cookies
K,V = match.group("key"), match.group("val")
i = match.end(0)
# Parse the key, value in case it's metainfo
if K[0] == "$":
# We ignore attributes which pertain to the cookie
# mechanism as a whole. See RFC 2109.
# (Does anyone care?)
if M:
M[ K[1:] ] = V
elif K.lower() in Morsel._reserved:
if M:
M[ K ] = _unquote(V)
else:
rval, cval = self.value_decode(V)
self.__set(K, rval, cval)
M = self[K]
# end __ParseString
# end BaseCookie class
class SimpleCookie(BaseCookie):
"""SimpleCookie
SimpleCookie supports strings as cookie values. When setting
the value using the dictionary assignment notation, SimpleCookie
calls the builtin str() to convert the value to a string. Values
received from HTTP are kept as strings.
"""
def value_decode(self, val):
return _unquote( val ), val
def value_encode(self, val):
strval = str(val)
return strval, _quote( strval )
# end SimpleCookie
class SerialCookie(BaseCookie):
"""SerialCookie
SerialCookie supports arbitrary objects as cookie values. All
values are serialized (using cPickle) before being sent to the
client. All incoming values are assumed to be valid Pickle
representations. IF AN INCOMING VALUE IS NOT IN A VALID PICKLE
FORMAT, THEN AN EXCEPTION WILL BE RAISED.
Note: Large cookie values add overhead because they must be
retransmitted on every HTTP transaction.
Note: HTTP has a 2k limit on the size of a cookie. This class
does not check for this limit, so be careful!!!
"""
def __init__(self, input=None):
warnings.warn("SerialCookie class is insecure; do not use it",
DeprecationWarning)
BaseCookie.__init__(self, input)
# end __init__
def value_decode(self, val):
# This could raise an exception!
return loads( _unquote(val) ), val
def value_encode(self, val):
return val, _quote( dumps(val) )
# end SerialCookie
class SmartCookie(BaseCookie):
"""SmartCookie
SmartCookie supports arbitrary objects as cookie values. If the
object is a string, then it is quoted. If the object is not a
string, however, then SmartCookie will use cPickle to serialize
the object into a string representation.
Note: Large cookie values add overhead because they must be
retransmitted on every HTTP transaction.
Note: HTTP has a 2k limit on the size of a cookie. This class
does not check for this limit, so be careful!!!
"""
def __init__(self, input=None):
warnings.warn("Cookie/SmartCookie class is insecure; do not use it",
DeprecationWarning)
BaseCookie.__init__(self, input)
# end __init__
def value_decode(self, val):
strval = _unquote(val)
try:
return loads(strval), val
except:
return strval, val
def value_encode(self, val):
if type(val) == type(""):
return val, _quote(val)
else:
return val, _quote( dumps(val) )
# end SmartCookie
###########################################################
# Backwards Compatibility: Don't break any existing code!
# We provide Cookie() as an alias for SmartCookie()
Cookie = SmartCookie
#
###########################################################
def _test():
import doctest, Cookie
return doctest.testmod(Cookie)
if __name__ == "__main__":
_test()
#Local Variables:
#tab-width: 4
#end: | [
"[email protected]"
] | |
6dcec5b996d8a262227be3e7686fc90e5ef05185 | fc5816b2ba73124a4744d08b7acf8f62ced66640 | /timer.py | ec255d136560e92c284404460b1a319a77e14567 | [] | no_license | rkdarst/fitz | 51e2b9e28a36ffe2b58f49ff36a4593ca55c5045 | b2ac6aaff47217f40ac39042d27ffd130b8a36a5 | refs/heads/master | 2016-09-05T17:20:52.187546 | 2015-05-16T10:49:14 | 2015-05-16T10:49:14 | 22,289,724 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,755 | py | # Richard Darst, 2006
"""Provides a stopwatch for code.
Classes
=======
There is a class defined called Timer. It has the following methods:
__init__ -- argument is `clock`, which is the timing function to use
for this timer. It defaults to proctime.
reset -- zero the stopwatch. The getrusage system call is zeroed when
the code starts (and the timer keeps this zero initially),
but calling reset() zeros it. Zeroing is done by recording
the current time and subtracting this out from future calls.
time -- returns the time since the last reset
lap -- return the time from the last reset, and reset it.
Global functions
================
t -- an automatically created instance of timer, using `proctime`.
start-- ~\ The methods on `t` are bound to the global namespace,
time -- > so timer.start(), etc, can be used if this what you
reset-- _/ need.
The module includes various clock functions to use, such as
`realtime`, `proctime`, `usertime`, and `systime`.
"""
import resource
import time as timemodule
def systime():
"""Time spent executing system calls.
Time spend doing things like disk access, IO, etc.
Uses the system call getrusage().ru_stime
"""
return resource.getrusage(resource.RUSAGE_SELF).ru_stime
def usertime():
"""Time spent executing code in user mode.
Time spent doing things like adding numbers.
Uses the system call getrusage().ru_utime
"""
return resource.getrusage(resource.RUSAGE_SELF).ru_utime
def proctime():
"""Time spent by processor executing code
sys + user time
"""
r = resource.getrusage(resource.RUSAGE_SELF)
return r.ru_utime+r.ru_stime
def realtime():
"""Time on a clock on the wall.
If your processor isn't busy doing other things, this will be the
best to find how much time your code takes.
time.time(), which uses the system call gettimeofday() for greater
accuracy when avaliable.
"""
return timemodule.time()
class Timer:
_starttime = 0.
def __init__(self, clock=proctime):
"""Create rusage object using a certain timing function.
The argument `clock` is the clock function to use. Default is
proctime.
"""
self._clock = clock
def reset(self):
"""Reset the timer
"""
self._starttime = self._clock()
def time(self):
"""Return time since last reset
"""
return self._clock() - self._starttime
def lap(self):
"""Reset and return time since last reset
"""
oldtime = self._clock() - self._starttime
self._starttime = self._clock()
return oldtime
t = Timer()
reset = t.reset
time = t.time
lap = t.lap
| [
"[email protected]"
] | |
16a3fc07f5076c8209e850bf5fae219bc5f9d24a | a718de5d51c8d430e791aca6092669c04548fd64 | /Census-Analyser-master/census_analyser/test_census.py | a1d7d9093033bac09637e510c928fb25d4e80fa1 | [] | no_license | santoshikalaskar/Basic_Advance_python_program | d0fef4134ed4b14f84ff05a3b37e1773c111a2d1 | 84df5c336d5304c3c727102194ba62417640643a | refs/heads/master | 2023-01-22T15:06:24.909145 | 2020-12-02T14:01:29 | 2020-12-02T14:01:29 | 314,511,681 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,451 | py | import pytest
from stateCensusAnalyser import CSVStateCensus, SortData, Mapping
from custom_exceptions import ( FileIsNotCSVTypeException,
EmptyFileException,
InvalidDelimiterException)
sort_ref = SortData()
map_ref = Mapping()
class TestCensus:
def test_State_census_records_to_match_number_of_records_UC1_TC1(self):
obj = CSVStateCensus("IndiaStateCensusData.csv")
total_records = obj.number_of_records(obj.load_CSV)
assert total_records == 28
def test_file_not_in_csv_format_will_raise_FileIsNotCSVTypeException_UC1_TC2(self):
with pytest.raises(FileIsNotCSVTypeException):
obj = CSVStateCensus("demo_empty.txt")
obj.load_CSV
def test_file_is_csv_but_empty_will_raise_EmptyFileException_UC1_TC3(self):
with pytest.raises(EmptyFileException):
obj = CSVStateCensus("demo_empty.csv")
obj.load_CSV
def test_file_is_csv_but_delimiter_is_invalid_will_raise_InvalidDelimiterException_UC1_TC4(self):
with pytest.raises(InvalidDelimiterException):
obj = CSVStateCensus('csv_with_invalid_delimiter.csv')
obj.load_CSV
def test_file_is_csv_but_header_is_invalid_will_return_InvalidHeader_UC1_TC5(self):
obj = CSVStateCensus("csv_with_invalid_header.csv")
assert obj.load_CSV == "InvalidHeader"
def test_State_code_records_to_match_number_of_records_UC2_TC1(self):
obj = CSVStateCensus("StateCode.csv")
total_records = obj.number_of_records(obj.load_CSV)
assert total_records == 36
def test_IndiaStateCensus_first_state_after_sorting_in_JSON_will_be_Andhra_Pradesh_UC3(self):
data_frame = sort_ref._SortData__sort_InidaCensusData_in_alphabetical_order_in_JSON()
assert data_frame[0]["State"] == 'Andhra Pradesh'
def test_IndiaStateCensus_last_state_after_sorting_in_JSON_will_be_West_Bengal_UC3(self):
data_frame = sort_ref._SortData__sort_InidaCensusData_in_alphabetical_order_in_JSON()
assert data_frame[28]["State"] == 'West Bengal'
def test_StateCode_first_stateCode_after_sorting_in_JSON_will_be_AD_UC4(self):
data_frame = sort_ref._SortData__sort_StateCode_in_stateCode_order_in_JSON()
assert data_frame[0]["StateCode"] == 'AD'
def test_StateCode_last_stateCode_after_sorting_in_JSON_will_be_WB_UC4(self):
data_frame = sort_ref._SortData__sort_StateCode_in_stateCode_order_in_JSON()
assert data_frame.pop()["StateCode"] == 'WB'
def test_after_sort_according_to_population_check_first_record_will_be_Sikkim_UC5(self):
data = sort_ref._SortData__sort_InidaCensusData_in_asc_population_order_in_JSON()
assert data[0]["State"] == "Sikkim"
def test_after_sort_according_to_population_check_last_record_will_be_Uttar_Pradesh_UC5(self):
data = sort_ref._SortData__sort_InidaCensusData_in_asc_population_order_in_JSON()
assert data.pop()["State"] == "Uttar Pradesh"
def test_after_sort_according_to_populationDensity_check_first_record_will_be_Arunachal_Pradesh_UC6(self):
data = sort_ref._SortData__sort_InidaCensusData_in_asc_population_density_order_in_JSON()
assert data[0]["State"] == "Arunachal Pradesh"
def test_after_sort_according_to_populationDensity_check_last_record_will_be_Bihar_UC6(self):
data = sort_ref._SortData__sort_InidaCensusData_in_asc_population_density_order_in_JSON()
assert data.pop()["State"] == "Bihar"
def test_mapping_by_checking_first_record_will_be_AP_REFACTOR6(self):
data = map_ref._Mapping__map_state_census_with_state_code_according_to_code()
assert data[0]["StateCode"] == 'AP'
def test_mapping_by_checking_last_record_will_be_WB_REFACTOR6(self):
data = map_ref._Mapping__map_state_census_with_state_code_according_to_code()
assert data.pop()["StateCode"] == 'WB'
def test_first_state_from_census_data_after_sorting_in_desc_area_order_will_return_Rajasthan_UC7(self):
data = sort_ref._SortData__sort_InidaCensusData_in_desc_area_order_in_JSON()
assert data[0]["State"] == "Rajasthan"
def test_last_state_from_census_data_after_sorting_in_desc_area_order_will_return_Goa_UC7(self):
data = sort_ref._SortData__sort_InidaCensusData_in_desc_area_order_in_JSON()
assert data.pop()["State"] == "Goa" | [
"[email protected]"
] | |
0c80371e2bfa26e44298ef6ee0467de3c1f87c35 | d697c1d45e96bd440be9c17ab14243a5882b1f52 | /qianfeng/常用模块/Tkinter/Button.py | b271d275b4103dc54132277c710592fefe06f946 | [] | no_license | ithjl521/python | 9eeda2e60dda97ee36e8764c06400eb12818689f | f4fe50799501c483cb64445fd05ee0f30f56576c | refs/heads/master | 2020-07-12T23:10:53.608276 | 2019-11-08T08:59:35 | 2019-11-08T08:59:35 | 204,931,359 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 334 | py | import tkinter
def fun():
print('hello word')
win = tkinter.Tk()
win.title('title-hjl')
win.geometry("400x400+200+50")
# 创建按钮
button = tkinter.Button(win,
text='按钮',
command=fun,
width=10,
height=10)
button.pack()
button2 = tkinter.Button(win,
text='按钮',
command=win.quit)
button2.pack()
win.mainloop() | [
"[email protected]"
] | |
ff5da7da6b07126320c3f20bf185a5bb97f29a76 | 45edff14271724c5bf27e62e96eeb635840eae22 | /DeepLearning/tensorflow/10-1验证码生成.py | 088f13b02cdcbe8d829ad08df6367e5e7919adc9 | [] | no_license | DaiJitao/machine_learning | 1e41208dc94836a97e57a4b0f5778f8da2bb81d4 | 49e1db9ecbfbf886a11ce416eea402d214cf2049 | refs/heads/master | 2021-06-25T23:52:06.066315 | 2021-02-07T16:17:50 | 2021-02-07T16:17:50 | 209,712,507 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,595 | py |
# coding: utf-8
# In[1]:
# 验证码生成库
from captcha.image import ImageCaptcha # pip install captcha
import numpy as np
from PIL import Image
import random
import sys
from DeepLearning.utils import mkdir
number = ['0','1','2','3','4','5','6','7','8','9']
# alphabet = ['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z']
# ALPHABET = ['A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z']
def random_captcha_text(char_set=number, captcha_size=4):
# 验证码列表
captcha_text = []
for i in range(captcha_size):
#随机选择
c = random.choice(char_set)
#加入验证码列表
captcha_text.append(c)
return captcha_text
# 生成字符对应的验证码
def gen_captcha_text_and_image(out_path='E:/data/captcha/images/'):
image = ImageCaptcha()
#获得随机生成的验证码
captcha_text = random_captcha_text()
#把验证码列表转为字符串
captcha_text = ''.join(captcha_text)
#生成验证码
captcha = image.generate(captcha_text)
mkdir(out_path)
image.write(captcha_text, out_path + captcha_text + '.jpg') # 写到文件
#数量少于10000,因为重名
num = 10000
if __name__ == '__main__':
for i in range(num):
gen_captcha_text_and_image()
sys.stdout.write('\r>> Creating image %d/%d' % (i+1, num))
sys.stdout.flush()
sys.stdout.write('\n')
sys.stdout.flush()
print("生成完毕")
# In[ ]:
# In[ ]:
| [
"[email protected]"
] | |
ea796738c0ea8c09243d2bef06d9183481be9d08 | d2f7471c1429f1ca454bb4cc982bbbecc31f8160 | /app/conf/wsgi.py | 9cc41bf59a4f0f8c847660a80aebaaa9ea5625f6 | [
"MIT"
] | permissive | HenriqueLR/hangman-game | 1e442735f688c4e0b2ada6f2208360a01e0df353 | 6cb29ae1ab666af0d6b054b2e1d598ebb5ff8db3 | refs/heads/master | 2021-08-22T03:39:08.156486 | 2017-11-29T05:08:56 | 2017-11-29T05:08:56 | 108,403,950 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 393 | py | """
WSGI config for app project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "conf.settings_production")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
| [
"[email protected]"
] | |
baaea832d2de21fc9bdca57ef60c88bc7b43cf92 | df2cbe914f463ad050d7ed26194424afbe3a0a52 | /addons/website_sale/models/product_image.py | eea9afb69671067f39780fa6041a4031d9e785e0 | [
"Apache-2.0"
] | permissive | SHIVJITH/Odoo_Machine_Test | 019ed339e995be980606a2d87a63312ddc18e706 | 310497a9872db7844b521e6dab5f7a9f61d365a4 | refs/heads/main | 2023-07-16T16:23:14.300656 | 2021-08-29T11:48:36 | 2021-08-29T11:48:36 | 401,010,175 | 0 | 0 | Apache-2.0 | 2021-08-29T10:13:58 | 2021-08-29T10:13:58 | null | UTF-8 | Python | false | false | 2,699 | py | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, fields, models, tools, _
from odoo.exceptions import ValidationError
from odoo.addons.website.tools import get_video_embed_code
class ProductImage(models.Model):
_name = 'product.image'
_description = "Product Image"
_inherit = ['image.mixin']
_order = 'sequence, id'
name = fields.Char("Name", required=True)
sequence = fields.Integer(default=10, index=True)
image_1920 = fields.Image(required=True)
product_tmpl_id = fields.Many2one('product.template', "Product Template", index=True, ondelete='cascade')
product_variant_id = fields.Many2one('product.product', "Product Variant", index=True, ondelete='cascade')
video_url = fields.Char('Video URL',
help='URL of a video for showcasing your product.')
embed_code = fields.Char(compute="_compute_embed_code")
can_image_1024_be_zoomed = fields.Boolean("Can Image 1024 be zoomed", compute='_compute_can_image_1024_be_zoomed', store=True)
@api.depends('image_1920', 'image_1024')
def _compute_can_image_1024_be_zoomed(self):
for image in self:
image.can_image_1024_be_zoomed = image.image_1920 and tools.is_image_size_above(image.image_1920, image.image_1024)
@api.depends('video_url')
def _compute_embed_code(self):
for image in self:
image.embed_code = get_video_embed_code(image.video_url)
@api.constrains('video_url')
def _check_valid_video_url(self):
for image in self:
if image.video_url and not image.embed_code:
raise ValidationError(_("Provided video URL for '%s' is not valid. Please enter a valid video URL.", image.name))
@api.model_create_multi
def create(self, vals_list):
"""
We don't want the default_product_tmpl_id from the context
to be applied if we have a product_variant_id set to avoid
having the variant images to show also as template images.
But we want it if we don't have a product_variant_id set.
"""
context_without_template = self.with_context({k: v for k, v in self.env.context.items() if k != 'default_product_tmpl_id'})
normal_vals = []
variant_vals_list = []
for vals in vals_list:
if vals.get('product_variant_id') and 'default_product_tmpl_id' in self.env.context:
variant_vals_list.append(vals)
else:
normal_vals.append(vals)
return super().create(normal_vals) + super(ProductImage, context_without_template).create(variant_vals_list)
| [
"[email protected]"
] | |
c227bd7e8a5d2110d0ff22a4bd3d177ce65344de | d8db486c6c0e4f7c4da3dd9d8752a2de0174a1d6 | /test/apiv2/rest_api/v1_test_rest_v1_0_0.py | acd6273ef57d7afcf195aca5cd60f67e8b9c27dd | [
"Apache-2.0"
] | permissive | isabella232/podman | 49c10ca0df99bbc4362b8ec284b43bf05c38cca8 | dcd498a6885f0293934214af0c6fc2d3c7717bd5 | refs/heads/master | 2023-03-07T23:36:51.958197 | 2020-11-18T15:59:41 | 2020-11-18T15:59:41 | 314,002,564 | 0 | 0 | Apache-2.0 | 2021-02-23T16:35:32 | 2020-11-18T17:06:49 | null | UTF-8 | Python | false | false | 7,662 | py | import json
import os
import shlex
import signal
import string
import subprocess
import sys
import time
import unittest
from collections.abc import Iterable
from multiprocessing import Process
import requests
from dateutil.parser import parse
PODMAN_URL = "http://localhost:8080"
def _url(path):
return PODMAN_URL + "/v1.0.0/libpod" + path
def podman():
binary = os.getenv("PODMAN_BINARY")
if binary is None:
binary = "bin/podman"
return binary
def ctnr(path):
r = requests.get(_url("/containers/json?all=true"))
try:
ctnrs = json.loads(r.text)
except Exception as e:
sys.stderr.write("Bad container response: {}/{}".format(r.text, e))
raise e
return path.format(ctnrs[0]["Id"])
class TestApi(unittest.TestCase):
podman = None
def setUp(self):
super().setUp()
if TestApi.podman.poll() is not None:
sys.stderr.write("podman service returned {}", TestApi.podman.returncode)
sys.exit(2)
requests.get(_url("/images/create?fromSrc=docker.io%2Falpine%3Alatest"))
# calling out to podman is easier than the API for running a container
subprocess.run(
[podman(), "run", "alpine", "/bin/ls"],
check=True,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
)
@classmethod
def setUpClass(cls):
super().setUpClass()
TestApi.podman = subprocess.Popen(
[
podman(),
"system",
"service",
"tcp:localhost:8080",
"--log-level=debug",
"--time=0",
],
shell=False,
stdin=subprocess.DEVNULL,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
)
time.sleep(2)
@classmethod
def tearDownClass(cls):
TestApi.podman.terminate()
stdout, stderr = TestApi.podman.communicate(timeout=0.5)
if stdout:
print("\nService Stdout:\n" + stdout.decode("utf-8"))
if stderr:
print("\nService Stderr:\n" + stderr.decode("utf-8"))
if TestApi.podman.returncode > 0:
sys.stderr.write(
"podman exited with error code {}\n".format(TestApi.podman.returncode)
)
sys.exit(2)
return super().tearDownClass()
def test_info(self):
r = requests.get(_url("/info"))
self.assertEqual(r.status_code, 200)
self.assertIsNotNone(r.content)
_ = json.loads(r.text)
def test_events(self):
r = requests.get(_url("/events?stream=false"))
self.assertEqual(r.status_code, 200, r.text)
self.assertIsNotNone(r.content)
for line in r.text.splitlines():
obj = json.loads(line)
# Actor.ID is uppercase for compatibility
_ = obj["Actor"]["ID"]
def test_containers(self):
r = requests.get(_url("/containers/json"), timeout=5)
self.assertEqual(r.status_code, 200, r.text)
obj = json.loads(r.text)
self.assertEqual(len(obj), 0)
def test_containers_all(self):
r = requests.get(_url("/containers/json?all=true"))
self.assertEqual(r.status_code, 200, r.text)
self.validateObjectFields(r.text)
def test_inspect_container(self):
r = requests.get(_url(ctnr("/containers/{}/json")))
self.assertEqual(r.status_code, 200, r.text)
obj = self.validateObjectFields(r.content)
_ = parse(obj["Created"])
def test_stats(self):
r = requests.get(_url(ctnr("/containers/{}/stats?stream=false")))
self.assertIn(r.status_code, (200, 409), r.text)
if r.status_code == 200:
self.validateObjectFields(r.text)
def test_delete_containers(self):
r = requests.delete(_url(ctnr("/containers/{}")))
self.assertEqual(r.status_code, 204, r.text)
def test_stop_containers(self):
r = requests.post(_url(ctnr("/containers/{}/start")))
self.assertIn(r.status_code, (204, 304), r.text)
r = requests.post(_url(ctnr("/containers/{}/stop")))
self.assertIn(r.status_code, (204, 304), r.text)
def test_start_containers(self):
r = requests.post(_url(ctnr("/containers/{}/stop")))
self.assertIn(r.status_code, (204, 304), r.text)
r = requests.post(_url(ctnr("/containers/{}/start")))
self.assertIn(r.status_code, (204, 304), r.text)
def test_restart_containers(self):
r = requests.post(_url(ctnr("/containers/{}/start")))
self.assertIn(r.status_code, (204, 304), r.text)
r = requests.post(_url(ctnr("/containers/{}/restart")), timeout=5)
self.assertEqual(r.status_code, 204, r.text)
def test_resize(self):
r = requests.post(_url(ctnr("/containers/{}/resize?h=43&w=80")))
self.assertIn(r.status_code, (200, 409), r.text)
if r.status_code == 200:
self.assertIsNone(r.text)
def test_attach_containers(self):
r = requests.post(_url(ctnr("/containers/{}/attach")))
self.assertIn(r.status_code, (101, 409), r.text)
def test_logs_containers(self):
r = requests.get(_url(ctnr("/containers/{}/logs?stdout=true")))
self.assertEqual(r.status_code, 200, r.text)
def test_post_create(self):
self.skipTest("TODO: create request body")
r = requests.post(_url("/containers/create?args=True"))
self.assertEqual(r.status_code, 200, r.text)
json.loads(r.text)
def test_commit(self):
r = requests.post(_url(ctnr("/commit?container={}")))
self.assertEqual(r.status_code, 200, r.text)
self.validateObjectFields(r.text)
def test_images(self):
r = requests.get(_url("/images/json"))
self.assertEqual(r.status_code, 200, r.text)
self.validateObjectFields(r.content)
def test_inspect_image(self):
r = requests.get(_url("/images/alpine/json"))
self.assertEqual(r.status_code, 200, r.text)
obj = self.validateObjectFields(r.content)
_ = parse(obj["Created"])
def test_delete_image(self):
r = requests.delete(_url("/images/alpine?force=true"))
self.assertEqual(r.status_code, 200, r.text)
json.loads(r.text)
def test_pull(self):
r = requests.post(_url("/images/pull?reference=alpine"), timeout=5)
self.assertEqual(r.status_code, 200, r.text)
json.loads(r.text)
def test_search(self):
# Had issues with this test hanging when repositories not happy
def do_search():
r = requests.get(_url("/images/search?term=alpine"), timeout=5)
self.assertEqual(r.status_code, 200, r.text)
json.loads(r.text)
search = Process(target=do_search)
search.start()
search.join(timeout=10)
self.assertFalse(search.is_alive(), "/images/search took too long")
def test_ping(self):
r = requests.get(PODMAN_URL + "/_ping")
self.assertEqual(r.status_code, 200, r.text)
r = requests.head(PODMAN_URL + "/_ping")
self.assertEqual(r.status_code, 200, r.text)
r = requests.get(_url("/_ping"))
self.assertEqual(r.status_code, 200, r.text)
r = requests.get(_url("/_ping"))
self.assertEqual(r.status_code, 200, r.text)
def validateObjectFields(self, buffer):
objs = json.loads(buffer)
if not isinstance(objs, dict):
for o in objs:
_ = o["Id"]
else:
_ = objs["Id"]
return objs
if __name__ == "__main__":
unittest.main()
| [
"[email protected]"
] | |
e4d23a56aa95cac0d11bf916aabdb1ea0d1d364c | 4dd1d8fa59e20061e2c12e540fc52b1b305e575b | /source/sims/s286/double-harris-ic.py | ba4ee31039abd673c146d855b94be5ecfb63344d | [
"MIT"
] | permissive | ammarhakim/ammar-simjournal | f63521906a97d55ab290a5960d94758139944c89 | 5019f4723e20db80a20db6f2bd454c2fd3241412 | refs/heads/master | 2023-06-08T08:18:11.722779 | 2023-06-02T15:06:43 | 2023-06-02T15:06:43 | 204,050,516 | 3 | 3 | null | 2022-02-01T16:53:13 | 2019-08-23T18:28:44 | Lua | UTF-8 | Python | false | false | 1,554 | py | from pylab import *
import numpy
Lx = 100.0
Ly = 50.0
NX = 200
NY = 100
B0 = 0.1
me = 1.0
mi = me*25.0
qe = -1.0
qi = 1.0
dlambda = 1.0
n0 = 1.0
ninf = 0.2*n0
psi0 = B0
dx = Lx/NX
dy = Ly/NY
X = linspace(0.5*dx, Lx-0.5*dx, NX)
Y = linspace(0.5*dy, Ly-0.5*dy, NY)
XX, YY = meshgrid(X, Y)
Bx = numpy.zeros((NX, NY), numpy.float)
n = numpy.zeros((NX, NY), numpy.float)
dBx1 = numpy.zeros((NX, NY), numpy.float)
dBy1 = numpy.zeros((NX, NY), numpy.float)
dBx2 = numpy.zeros((NX, NY), numpy.float)
dBy2 = numpy.zeros((NX, NY), numpy.float)
for i in range(NX):
for j in range(NY):
Bx[i,j] = B0*(-1+tanh((Y[j]-Ly/4)/dlambda)-tanh((Y[j]-3*Ly/4)/dlambda))
n[i,j] = n0/cosh((Y[j]-Ly/4)/dlambda)**2+n0/cosh((Y[j]-3*Ly/4)/dlambda)**2+ninf
dBx1[i,j] = -psi0*(pi/Ly)*cos(2*pi*(X[i]-Lx/4)/Lx)*sin(pi*(Y[j]-Ly/4)/Ly)
dBy1[i,j] = psi0*(2*pi/Lx)*sin(2*pi*(X[i]-Lx/4)/Lx)*cos(pi*(Y[j]-Ly/4)/Ly)
dBx2[i,j] = -psi0*(pi/Ly)*cos(2*pi*(X[i]+Lx/4)/Lx)*sin(pi*(Y[j]+Ly/4)/Ly)
dBy2[i,j] = psi0*(2*pi/Lx)*sin(2*pi*(X[i]+Lx/4)/Lx)*cos(pi*(Y[j]+Ly/4)/Ly)
figure(1)
pcolormesh(XX, YY, transpose(Bx))
title('Bx(x,y)')
colorbar()
figure(2)
pcolormesh(XX, YY, transpose(n))
title('n(x,y)')
colorbar()
figure(3)
plot(Y, Bx[NX/2,:], 'r-')
xlabel('Y')
ylabel('Bx')
title('Bx(y)')
figure(4)
plot(Y, n[NX/2,:], 'r-')
xlabel('Y')
ylabel('n')
title('n(y)')
figure(7)
Bxt = Bx+dBx1+dBx2
Byt = dBy1+dBy2
Btot = sqrt(Bxt**2+Byt**2)
#contour(XX, YY, transpose(Btot))
streamplot(X, Y, transpose(Bxt), transpose(Byt), density=2)
show()
| [
"[email protected]"
] | |
f1f6ea249402b6419fc6f324019956dc69813c50 | 69633bcb719e5caa2859c30d38f0fb0ff33b05a7 | /app/api/urls.py | 8ab6b71db340bc097ea2d0ae7dd19e923cae9533 | [] | no_license | Zarinabonu/employee_version_2 | e0ed3df43633241774686b7eaba01fbf2bebfa1a | 991d8fce23d3736df0271c3ca3e380a13ab6e5c0 | refs/heads/master | 2022-11-26T03:22:47.067982 | 2019-12-07T08:14:36 | 2019-12-07T08:14:36 | 223,534,726 | 0 | 0 | null | 2022-11-22T04:50:39 | 2019-11-23T05:12:50 | Python | UTF-8 | Python | false | false | 498 | py |
from django.urls import include, path
urlpatterns = [
path('group/', include('app.api.group.urls')),
path('employee/', include('app.api.employee.urls')),
path('salary/', include('app.api.salary.urls')),
path('accountant/', include('app.api.accountant.urls')),
path('attendance/', include('app.api.attendance.urls')),
path('project/', include('app.api.project.urls')),
path('task/', include('app.api.task.urls')),
path('static/', include('app.api.static.urls')),
] | [
"[email protected]"
] | |
deda7c2da31cde83eeb0a317505f79f8db6fb75e | 326c6ad82d59bb7509c02c76695ea9035993da70 | /lib/modules/powershell/situational_awareness/network/powerview/set_ad_object.py | 2a0e745b9ae829fc42345a0a3330e60ab4217790 | [
"BSD-3-Clause"
] | permissive | Arvanaghi/Empire | 0c08bd7ddfba9be10e96bb0834b8ce3bc829059b | fd168ebf8acb1c2ee59d56f2c393ebd7a297603e | refs/heads/master | 2021-01-20T14:15:34.864581 | 2017-08-05T17:51:44 | 2017-08-05T17:51:44 | 99,435,848 | 2 | 0 | null | 2017-08-05T16:50:16 | 2017-08-05T16:50:16 | null | UTF-8 | Python | false | false | 4,526 | py | from lib.common import helpers
class Module:
def __init__(self, mainMenu, params=[]):
self.info = {
'Name': 'Set-ADObject',
'Author': ['@harmj0y'],
'Description': ('Takes a SID, name, or SamAccountName to query for a specified '
'domain object, and then sets a specified "PropertyName" to a '
'specified "PropertyValue". Part of PowerView.'),
'Background' : True,
'OutputExtension' : None,
'NeedsAdmin' : False,
'OpsecSafe' : True,
'Language' : 'powershell',
'MinLanguageVersion' : '2',
'Comments': [
'https://github.com/PowerShellMafia/PowerSploit/blob/dev/Recon/'
]
}
# any options needed by the module, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Agent' : {
'Description' : 'Agent to run module on.',
'Required' : True,
'Value' : ''
},
'SID' : {
'Description' : "The SID of the domain object you're querying for.",
'Required' : False,
'Value' : ''
},
'Name' : {
'Description' : "The name of the domain object you're querying for.",
'Required' : False,
'Value' : ''
},
'SamAccountName' : {
'Description' : "The SamAccountName of the domain object you're querying for",
'Required' : False,
'Value' : ''
},
'Domain' : {
'Description' : 'The domain to query for objects, defaults to the current domain.',
'Required' : False,
'Value' : ''
},
'PropertyName' : {
'Description' : 'The property name to set.',
'Required' : False,
'Value' : ''
},
'PropertyValue' : {
'Description' : 'The value to set for PropertyName.',
'Required' : False,
'Value' : ''
},
'PropertyXorValue' : {
'Description' : 'Integer calue to binary xor (-bxor) with the current int value.',
'Required' : False,
'Value' : ''
},
'ClearValue' : {
'Description' : 'Switch. Clear the value of PropertyName.',
'Required' : False,
'Value' : ''
}
}
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu = mainMenu
for param in params:
# parameter format is [Name, Value]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def generate(self):
moduleName = self.info["Name"]
# read in the common powerview.ps1 module source code
moduleSource = self.mainMenu.installPath + "/data/module_source/situational_awareness/network/powerview.ps1"
try:
f = open(moduleSource, 'r')
except:
print helpers.color("[!] Could not read module source path at: " + str(moduleSource))
return ""
moduleCode = f.read()
f.close()
# get just the code needed for the specified function
script = helpers.generate_dynamic_powershell_script(moduleCode, moduleName)
script += moduleName + " "
for option,values in self.options.iteritems():
if option.lower() != "agent":
if values['Value'] and values['Value'] != '':
if values['Value'].lower() == "true":
# if we're just adding a switch
script += " -" + str(option)
else:
script += " -" + str(option) + " " + str(values['Value'])
script += ' | Out-String | %{$_ + \"`n\"};"`n'+str(moduleName)+' completed!"'
return script
| [
"[email protected]"
] | |
0b2ea839276a4ec36c3be5661e2ffc16dd965171 | c1655d6c6c11dafc1c7fa9f771b8e1f99cf7f123 | /venv/lib/python3.6/site-packages/pyomo/core/base/suffix.py | aa87df3782834ac58feecd227e58f1e7c93b5655 | [] | no_license | igorsowa9/vpp | a27520f19a54d7490534016ded9cd66f4ef5385b | ea91e3b2db921e7b1a450d243f39dbcf61231107 | refs/heads/master | 2021-04-30T03:28:56.642244 | 2019-09-16T09:01:49 | 2019-09-16T09:01:49 | 121,514,524 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 15,481 | py | # ___________________________________________________________________________
#
# Pyomo: Python Optimization Modeling Objects
# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC
# Under the terms of Contract DE-NA0003525 with National Technology and
# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
# rights in this software.
# This software is distributed under the 3-clause BSD License.
# ___________________________________________________________________________
__all__ = ('Suffix',
'active_export_suffix_generator',
'active_import_suffix_generator')
import logging
import pprint
from pyomo.util.timing import ConstructionTimer
from pyomo.core.kernel.component_map import ComponentMap
from pyomo.core.base.plugin import register_component
from pyomo.core.base.component import ActiveComponent
from six import iteritems, itervalues
from pyomo.util.deprecation import deprecated
logger = logging.getLogger('pyomo.core')
# A list of convenient suffix generators, including:
# - active_export_suffix_generator
# **(used by problem writers)
# - export_suffix_generator
# - active_import_suffix_generator
# **(used by OptSolver and PyomoModel._load_solution)
# - import_suffix_generator
# - active_local_suffix_generator
# - local_suffix_generator
# - active_suffix_generator
# - suffix_generator
def active_export_suffix_generator(a_block, datatype=False):
if (datatype is False):
for name, suffix in iteritems(a_block.component_map(Suffix, active=True)):
if suffix.export_enabled() is True:
yield name, suffix
else:
for name, suffix in iteritems(a_block.component_map(Suffix, active=True)):
if (suffix.export_enabled() is True) and \
(suffix.get_datatype() is datatype):
yield name, suffix
def export_suffix_generator(a_block, datatype=False):
if (datatype is False):
for name, suffix in iteritems(a_block.component_map(Suffix)):
if suffix.export_enabled() is True:
yield name, suffix
else:
for name, suffix in iteritems(a_block.component_map(Suffix)):
if (suffix.export_enabled() is True) and \
(suffix.get_datatype() is datatype):
yield name, suffix
def active_import_suffix_generator(a_block, datatype=False):
if (datatype is False):
for name, suffix in iteritems(a_block.component_map(Suffix, active=True)):
if suffix.import_enabled() is True:
yield name, suffix
else:
for name, suffix in iteritems(a_block.component_map(Suffix, active=True)):
if (suffix.import_enabled() is True) and \
(suffix.get_datatype() is datatype):
yield name, suffix
def import_suffix_generator(a_block, datatype=False):
if (datatype is False):
for name, suffix in iteritems(a_block.component_map(Suffix)):
if suffix.import_enabled() is True:
yield name, suffix
else:
for name, suffix in iteritems(a_block.component_map(Suffix)):
if (suffix.import_enabled() is True) and \
(suffix.get_datatype() is datatype):
yield name, suffix
def active_local_suffix_generator(a_block, datatype=False):
if (datatype is False):
for name, suffix in iteritems(a_block.component_map(Suffix, active=True)):
if suffix.get_direction() is Suffix.LOCAL:
yield name, suffix
else:
for name, suffix in iteritems(a_block.component_map(Suffix, active=True)):
if (suffix.get_direction() is Suffix.LOCAL) and \
(suffix.get_datatype() is datatype):
yield name, suffix
def local_suffix_generator(a_block, datatype=False):
if (datatype is False):
for name, suffix in iteritems(a_block.component_map(Suffix)):
if suffix.get_direction() is Suffix.LOCAL:
yield name, suffix
else:
for name, suffix in iteritems(a_block.component_map(Suffix)):
if (suffix.get_direction() is Suffix.LOCAL) and \
(suffix.get_datatype() is datatype):
yield name, suffix
def active_suffix_generator(a_block, datatype=False):
if (datatype is False):
for name, suffix in iteritems(a_block.component_map(Suffix, active=True)):
yield name, suffix
else:
for name, suffix in iteritems(a_block.component_map(Suffix, active=True)):
if suffix.get_datatype() is datatype:
yield name, suffix
def suffix_generator(a_block, datatype=False):
if (datatype is False):
for name, suffix in iteritems(a_block.component_map(Suffix)):
yield name, suffix
else:
for name, suffix in iteritems(a_block.component_map(Suffix)):
if suffix.get_datatype() is datatype:
yield name, suffix
# Note: The order of inheritance here is important so that
# __setstate__ works correctly on the ActiveComponent base class.
class Suffix(ComponentMap, ActiveComponent):
"""A model suffix, representing extraneous model data"""
"""
Constructor Arguments:
direction The direction of information flow for this suffix.
By default, this is LOCAL, indicating that no
suffix data is exported or imported.
datatype A variable type associated with all values of this
suffix.
"""
# Suffix Directions:
# If more directions are added be sure to update the error message
# in the setDirection method
# neither sent to solver or received from solver
LOCAL = 0
# sent to solver or other external location
EXPORT = 1
# obtained from solver or other external source
IMPORT = 2
IMPORT_EXPORT = 3 # both
SuffixDirections = (LOCAL, EXPORT, IMPORT, IMPORT_EXPORT)
SuffixDirectionToStr = {LOCAL: 'Suffix.LOCAL',
EXPORT: 'Suffix.EXPORT',
IMPORT: 'Suffix.IMPORT',
IMPORT_EXPORT: 'Suffix.IMPORT_EXPORT'}
# Suffix Datatypes
FLOAT = 4
INT = 0
SuffixDatatypes = (FLOAT, INT, None)
SuffixDatatypeToStr = {FLOAT: 'Suffix.FLOAT',
INT: 'Suffix.INT',
None: str(None)}
def __init__(self, **kwds):
# Suffix type information
self._direction = None
self._datatype = None
self._rule = None
# The suffix direction
direction = kwds.pop('direction', Suffix.LOCAL)
# The suffix datatype
datatype = kwds.pop('datatype', Suffix.FLOAT)
# The suffix construction rule
# TODO: deprecate the use of 'rule'
self._rule = kwds.pop('rule', None)
self._rule = kwds.pop('initialize', self._rule)
# Check that keyword values make sense (these function have
# internal error checking).
self.set_direction(direction)
self.set_datatype(datatype)
# Initialize base classes
kwds.setdefault('ctype', Suffix)
ActiveComponent.__init__(self, **kwds)
ComponentMap.__init__(self)
if self._rule is None:
self.construct()
def __setstate__(self, state):
"""
This method must be defined for deepcopy/pickling because this
class relies on component ids.
"""
ActiveComponent.__setstate__(self, state)
ComponentMap.__setstate__(self, state)
def construct(self, data=None):
"""
Constructs this component, applying rule if it exists.
"""
if __debug__ and logger.isEnabledFor(logging.DEBUG):
logger.debug("Constructing suffix %s", self.name)
if self._constructed is True:
return
timer = ConstructionTimer(self)
self._constructed = True
if self._rule is not None:
self.update_values(self._rule(self._parent()))
timer.report()
@deprecated('Suffix.exportEnabled is replaced with Suffix.export_enabled.')
def exportEnabled(self):
return self.export_enabled()
def export_enabled(self):
"""
Returns True when this suffix is enabled for export to
solvers.
"""
return bool(self._direction & Suffix.EXPORT)
@deprecated('Suffix.importEnabled is replaced with Suffix.import_enabled.')
def importEnabled(self):
return self.import_enabled()
def import_enabled(self):
"""
Returns True when this suffix is enabled for import from
solutions.
"""
return bool(self._direction & Suffix.IMPORT)
@deprecated('Suffix.updateValues is replaced with Suffix.update_values.')
def updateValues(self, data, expand=True):
return self.update_values(data, expand)
def update_values(self, data, expand=True):
"""
Updates the suffix data given a list of component,value
tuples. Provides an improvement in efficiency over calling
set_value on every component.
"""
if expand:
try:
items = iteritems(data)
except AttributeError:
items = data
for component, value in items:
self.set_value(component, value, expand=expand)
else:
# As implemented by MutableMapping
self.update(data)
@deprecated('Suffix.setValue is replaced with Suffix.set_value.')
def setValue(self, component, value, expand=True):
return self.set_value(component, value, expand)
def set_value(self, component, value, expand=True):
"""
Sets the value of this suffix on the specified component.
When expand is True (default), array components are handled by
storing a reference and value for each index, with no
reference being stored for the array component itself. When
expand is False (this is the case for __setitem__), this
behavior is disabled and a reference to the array component
itself is kept.
"""
if expand and component.is_indexed():
for component_ in itervalues(component):
self[component_] = value
else:
self[component] = value
@deprecated('Suffix.setAllValues is replaced with Suffix.set_all_values.')
def setAllValues(self, value):
return self.set_all_values(value)
def set_all_values(self, value):
"""
Sets the value of this suffix on all components.
"""
for ndx in self:
self[ndx] = value
@deprecated('Suffix.clearValue is replaced with Suffix.clear_value.')
def clearValue(self, component, expand=True):
return self.clear_value(component, expand)
def clear_value(self, component, expand=True):
"""
Clears suffix information for a component.
"""
if expand and component.is_indexed():
for component_ in itervalues(component):
try:
del self[component_]
except KeyError:
pass
else:
try:
del self[component]
except KeyError:
pass
@deprecated('Suffix.clearAllValues is replaced with '
'Suffix.clear_all_values.')
def clearAllValues(self):
return self.clear_all_values()
def clear_all_values(self):
"""
Clears all suffix data.
"""
self.clear()
@deprecated('Suffix.setDatatype is replaced with Suffix.set_datatype.')
def setDatatype(self, datatype):
return self.set_datatype(datatype)
def set_datatype(self, datatype):
"""
Set the suffix datatype.
"""
if datatype not in self.SuffixDatatypes:
raise ValueError("Suffix datatype must be one of: %s. \n"
"Value given: %s"
% (list(Suffix.SuffixDatatypeToStr.values()),
datatype))
self._datatype = datatype
@deprecated('Suffix.getDatatype is replaced with Suffix.get_datatype.')
def getDatatype(self):
return self.get_datatype()
def get_datatype(self):
"""
Return the suffix datatype.
"""
return self._datatype
@deprecated('Suffix.setDirection is replaced with Suffix.set_direction.')
def setDirection(self, direction):
return self.set_direction(direction)
def set_direction(self, direction):
"""
Set the suffix direction.
"""
if direction not in self.SuffixDirections:
raise ValueError("Suffix direction must be one of: %s. \n"
"Value given: %s"
% (list(self.SuffixDirectionToStr.values()),
direction))
self._direction = direction
@deprecated('Suffix.getDirection is replaced with Suffix.get_direction.')
def getDirection(self):
return self.get_direction()
def get_direction(self):
"""
Return the suffix direction.
"""
return self._direction
def __str__(self):
"""
Return a string representation of the suffix. If the name
attribute is None, then return ''
"""
name = self.name
if name is None:
return ''
return name
def _pprint(self):
return (
[('Direction', self.SuffixDirectionToStr[self._direction]),
('Datatype', self.SuffixDatatypeToStr[self._datatype]),
],
((str(k), v) for k, v in itervalues(self._dict)),
("Value",),
lambda k, v: [v]
)
# TODO: delete
@deprecated('Suffix.getValue is replaced with '
'the dict-interface method Suffix.get.')
def getValue(self, component, *args):
"""
Returns the current value of this suffix for the specified
component.
"""
# As implemented by MutableMapping
return self.get(component, *args)
# TODO: delete
@deprecated('Suffix.extractValues() is replaced with '
'the dict-interface method Suffix.items().')
def extractValues(self):
"""
Extract all data stored on this Suffix into a list of
component, value tuples.
"""
# As implemented by MutableMapping
return list(self.items())
#
# Override a few methods to make sure the ActiveComponent versions are
# called. We can't just switch the inheritance order due to
# complications with __setstate__
#
def pprint(self, *args, **kwds):
return ActiveComponent.pprint(self, *args, **kwds)
def __str__(self):
return ActiveComponent.__str__(self)
#
# Override NotImplementedError messages on ComponentMap base class
#
def __eq__(self, other):
"""Not implemented."""
raise NotImplementedError("Suffix components are not comparable")
def __ne__(self, other):
"""Not implemented."""
raise NotImplementedError("Suffix components are not comparable")
register_component(Suffix, "Declare a container for extraneous model data")
| [
"[email protected]"
] | |
b988547aeade1c42cfea0da062e3ba6a62e711c9 | c2fb6846d5b932928854cfd194d95c79c723f04c | /python/coursera_python/MICHIGAN/wikicrawler/fin5.py | 6ca16eb862d2f226ef4bed731e6432b820b1b03d | [
"MIT"
] | permissive | Jimut123/code-backup | ef90ccec9fb6483bb6dae0aa6a1f1cc2b8802d59 | 8d4c16b9e960d352a7775786ea60290b29b30143 | refs/heads/master | 2022-12-07T04:10:59.604922 | 2021-04-28T10:22:19 | 2021-04-28T10:22:19 | 156,666,404 | 9 | 5 | MIT | 2022-12-02T20:27:22 | 2018-11-08T07:22:48 | Jupyter Notebook | UTF-8 | Python | false | false | 3,200 | py | # To run this, you can install BeautifulSoup
# https://pypi.python.org/pypi/beautifulsoup4
# Or download the file
# http://www.py4e.com/code3/bs4.zip
# and unzip it in the same directory as this file
import sqlite3
conn = sqlite3.connect('wiki1.sqlite')
cur = conn.cursor()
cur.executescript('''
CREATE TABLE IF NOT EXISTS data (
id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,
link TEXT UNIQUE
);
''')
import urllib.request, urllib.parse, urllib.error
from bs4 import BeautifulSoup
import ssl
# Ignore SSL certificate errors
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
url='0'
dummy = 1
next_url = '0'
i=10
#######
print("i = ",i)
while(1):
i=i+1
##########
print("i = ",i)
if dummy == 1:
url = input('Enter - ')
#######
print("url entered = ",url)
print("dummy = ",dummy)
if dummy == 0:
#######
print("dummy = ",dummy)
url = next_url
#######
print("url = ",url)
html = urllib.request.urlopen(url, context=ctx).read()
soup = BeautifulSoup(html, 'html.parser')
# Retrieve all of the anchor tags
tags = soup('a')
######
print(tags)
for tag in tags:
dummy3=0
while dummy3==0:
######
print("dummy3 = ",dummy3)
dummy3=1
try:
######
link_get = tag.get('href', None)
dummy3=1
#######
print("link_get = ",link_get)
print("dummy3 = ",1)
except ValueError:
link_get = cur.execute(''' SELECT link FROM data where id = ?''',(i,))
#######
print("link_get = ",link_get)
i=i-1
#######
print("i = ",i)
#html = urllib.request.urlopen(url, context=ctx).read()
#soup = BeautifulSoup(html, 'html.parser')
#tags = soup('a')
#i=i+1
########
print(link_get)
while(link_get == None):
########
print(link_get)
if link_get == None:
i=i-1
link_get = cur.execute(''' SELECT link FROM data where id = ?''',(i,))
#####
print("Entered here !! safe !!")
print(link_get)
while 'https:' not in link_get:
try :
if 'https:' in link_get:
print(link_get," no https: protocol changing mode");
except ValueError:
link_get = cur.execute(''' SELECT link FROM data where id = ?''',(i,))
print("link_get = ",link_get)
i=i-1
print("i = ",i)
if 'https:' in link_get:
i=i+1
print("link_get = ",link_get,"i = ",i )
if 'https:' in link_get:
next_url = link_get
print("next_url = ", next_url)
k=0
while k==0:
i=i-1
print("i = ",i)
try:
url = next_url
print("next_url : ",next_url)
print("url : ",url)
html = urllib.request.urlopen(url, context=ctx).read()
print(html)
soup = BeautifulSoup(html, 'html.parser')
print(soup)
tags = soup('a')
print(tags)
k=1
except:
url = cur.execute(''' SELECT link FROM data where id = ?''',(i,))
print(next_url," == is not valid")
print("====================================")
cur.execute('''INSERT OR IGNORE INTO data (link)
VALUES ( ? )''', ( link_get, ) )
#i=150
if(i%10 == 0):
conn.commit()
dummy = 0
conn.commit()
| [
"[email protected]"
] | |
fa8309731559f5f28c23907d10d8809df78cf6ea | 7142c3941481e661075154d714a29d5e283a3074 | /KeywordArguments.py | 7238526401ef054445d4af584ad76d295e46f26a | [] | no_license | nirajan5/Demo | 5642a9669fedcca47b0304ac423c0b3e6333b8e2 | 2451875bf5698cd38af69baa117c14099951bc9f | refs/heads/master | 2023-07-27T17:04:03.689673 | 2021-09-15T11:14:25 | 2021-09-15T11:14:25 | 406,732,005 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 279 | py | # function func is called with the name and message as the keyword arguments
def func(name, message):
print("printing the message with", name, "and ", message)
# name and message is copied with the values Mike and hello respectively
func(name="Mike", message="hello")
| [
"[email protected]"
] | |
baf79214692455db57861792b2f1ca24f8f899e9 | 7d8022661a756f77f715ee4d099fb17cb9da671a | /feature/zoo/Skewness_Daily.py | 65ff2a7ce6ebc641dccfe4e1a9bbef343ece64da | [] | no_license | lxj0276/Quant-Util | a7d70d88fc47eb16a08149faefa7b128c01c670e | 2706ecba72a293ee01105ad22508a8d6b20e1394 | refs/heads/master | 2020-04-25T13:40:36.700892 | 2018-10-15T04:35:54 | 2018-10-15T04:35:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 609 | py | from feature.base import NonPersistentFeature
from feature.ops import *
from feature.zoo.ChangeRate_Daily import ChangeRate_Daily
class Skewness_Daily(NonPersistentFeature):
description = '个股历史22日收益率的偏度'
formula = 'Skewness = E[(R)^3], R=(r-mu)/sigma '
granularity = 'day'
def _create_feature(self, instrument_id, time_range):
def get_skewness(x):
neu_x = ((x - nanmean(x)) / nanstd(x)) ** 3
return nanmean(neu_x)
skewness = Rolling(ChangeRate_Daily(), 22, get_skewness)
return skewness.load(instrument_id, time_range)
| [
"[email protected]"
] | |
42d4e14bfa94bd62f6a98bae988d6c52e6d4f11d | 12139fb270a099b01e4d68ce66aa7482f9eed189 | /backend/delivery_order/migrations/0001_initial.py | b001253df4ca0fca357aab50cc409aed84cd07ea | [] | no_license | crowdbotics-apps/himi2-25342 | 610a85cba7221f3412de3ee3e00af182b9cb2fd4 | ba9c7c90e42984b26d234a281ea50f2738b2146f | refs/heads/master | 2023-04-03T05:05:43.969021 | 2021-03-29T13:35:53 | 2021-03-29T13:35:53 | 352,651,097 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,609 | py | # Generated by Django 2.2.19 on 2021-03-29 13:24
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('delivery_user_profile', '0001_initial'),
('menu', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Bill',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('total_amount', models.FloatField()),
('timestamp_created', models.DateTimeField(auto_now_add=True)),
('contact_info', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='bill_contact_info', to='delivery_user_profile.ContactInfo')),
('profile', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='bill_profile', to='delivery_user_profile.Profile')),
],
),
migrations.CreateModel(
name='PaymentMethod',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('detail', models.TextField()),
],
),
migrations.CreateModel(
name='Order',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('quantity', models.IntegerField()),
('total_price', models.FloatField()),
('status', models.CharField(max_length=20)),
('notes', models.TextField()),
('timestamp_created', models.DateTimeField(auto_now_add=True)),
('bill', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='order_bill', to='delivery_order.Bill')),
('item_variant', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='order_item_variant', to='menu.ItemVariant')),
('payment_method', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='order_payment_method', to='delivery_order.PaymentMethod')),
('profile', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='order_profile', to='delivery_user_profile.Profile')),
],
),
]
| [
"[email protected]"
] | |
e48dd44a6f32d70bb408a6cef32b370ce27ed821 | 711756b796d68035dc6a39060515200d1d37a274 | /output_cog/optimized_35426.py | f2b6425c5719c07256d34c3c3090f1c6c421232f | [] | no_license | batxes/exocyst_scripts | 8b109c279c93dd68c1d55ed64ad3cca93e3c95ca | a6c487d5053b9b67db22c59865e4ef2417e53030 | refs/heads/master | 2020-06-16T20:16:24.840725 | 2016-11-30T16:23:16 | 2016-11-30T16:23:16 | 75,075,164 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,841 | py | import _surface
import chimera
try:
import chimera.runCommand
except:
pass
from VolumePath import markerset as ms
try:
from VolumePath import Marker_Set, Link
new_marker_set=Marker_Set
except:
from VolumePath import volume_path_dialog
d= volume_path_dialog(True)
new_marker_set= d.new_marker_set
marker_sets={}
surf_sets={}
if "Cog2_GFPN" not in marker_sets:
s=new_marker_set('Cog2_GFPN')
marker_sets["Cog2_GFPN"]=s
s= marker_sets["Cog2_GFPN"]
mark=s.place_marker((416.771, 568.834, 456.01), (0.89, 0.1, 0.1), 18.4716)
if "Cog2_0" not in marker_sets:
s=new_marker_set('Cog2_0')
marker_sets["Cog2_0"]=s
s= marker_sets["Cog2_0"]
mark=s.place_marker((478.125, 536.686, 441.652), (0.89, 0.1, 0.1), 17.1475)
if "Cog2_1" not in marker_sets:
s=new_marker_set('Cog2_1')
marker_sets["Cog2_1"]=s
s= marker_sets["Cog2_1"]
mark=s.place_marker((543.981, 496.055, 418.157), (0.89, 0.1, 0.1), 17.1475)
if "Cog2_GFPC" not in marker_sets:
s=new_marker_set('Cog2_GFPC')
marker_sets["Cog2_GFPC"]=s
s= marker_sets["Cog2_GFPC"]
mark=s.place_marker((425.594, 431.548, 453.614), (0.89, 0.1, 0.1), 18.4716)
if "Cog2_Anch" not in marker_sets:
s=new_marker_set('Cog2_Anch')
marker_sets["Cog2_Anch"]=s
s= marker_sets["Cog2_Anch"]
mark=s.place_marker((718.919, 437.149, 361.466), (0.89, 0.1, 0.1), 18.4716)
if "Cog3_GFPN" not in marker_sets:
s=new_marker_set('Cog3_GFPN')
marker_sets["Cog3_GFPN"]=s
s= marker_sets["Cog3_GFPN"]
mark=s.place_marker((459.023, 550.179, 451.747), (1, 1, 0), 18.4716)
if "Cog3_0" not in marker_sets:
s=new_marker_set('Cog3_0')
marker_sets["Cog3_0"]=s
s= marker_sets["Cog3_0"]
mark=s.place_marker((458.027, 550.94, 452.46), (1, 1, 0.2), 17.1475)
if "Cog3_1" not in marker_sets:
s=new_marker_set('Cog3_1')
marker_sets["Cog3_1"]=s
s= marker_sets["Cog3_1"]
mark=s.place_marker((436.774, 567.26, 460.512), (1, 1, 0.2), 17.1475)
if "Cog3_2" not in marker_sets:
s=new_marker_set('Cog3_2')
marker_sets["Cog3_2"]=s
s= marker_sets["Cog3_2"]
mark=s.place_marker((430.535, 569.191, 487.703), (1, 1, 0.2), 17.1475)
if "Cog3_3" not in marker_sets:
s=new_marker_set('Cog3_3')
marker_sets["Cog3_3"]=s
s= marker_sets["Cog3_3"]
mark=s.place_marker((427.51, 558.848, 513.509), (1, 1, 0.2), 17.1475)
if "Cog3_4" not in marker_sets:
s=new_marker_set('Cog3_4')
marker_sets["Cog3_4"]=s
s= marker_sets["Cog3_4"]
mark=s.place_marker((442.031, 554.939, 537.2), (1, 1, 0.2), 17.1475)
if "Cog3_5" not in marker_sets:
s=new_marker_set('Cog3_5')
marker_sets["Cog3_5"]=s
s= marker_sets["Cog3_5"]
mark=s.place_marker((469.157, 553.815, 544.912), (1, 1, 0.2), 17.1475)
if "Cog3_GFPC" not in marker_sets:
s=new_marker_set('Cog3_GFPC')
marker_sets["Cog3_GFPC"]=s
s= marker_sets["Cog3_GFPC"]
mark=s.place_marker((440.268, 569.826, 443.081), (1, 1, 0.4), 18.4716)
if "Cog3_Anch" not in marker_sets:
s=new_marker_set('Cog3_Anch')
marker_sets["Cog3_Anch"]=s
s= marker_sets["Cog3_Anch"]
mark=s.place_marker((505.665, 540.548, 643.871), (1, 1, 0.4), 18.4716)
if "Cog4_GFPN" not in marker_sets:
s=new_marker_set('Cog4_GFPN')
marker_sets["Cog4_GFPN"]=s
s= marker_sets["Cog4_GFPN"]
mark=s.place_marker((666.111, 495.379, 529.545), (0, 0, 0.8), 18.4716)
if "Cog4_0" not in marker_sets:
s=new_marker_set('Cog4_0')
marker_sets["Cog4_0"]=s
s= marker_sets["Cog4_0"]
mark=s.place_marker((666.111, 495.379, 529.545), (0, 0, 0.8), 17.1475)
if "Cog4_1" not in marker_sets:
s=new_marker_set('Cog4_1')
marker_sets["Cog4_1"]=s
s= marker_sets["Cog4_1"]
mark=s.place_marker((639.316, 495.203, 518.441), (0, 0, 0.8), 17.1475)
if "Cog4_2" not in marker_sets:
s=new_marker_set('Cog4_2')
marker_sets["Cog4_2"]=s
s= marker_sets["Cog4_2"]
mark=s.place_marker((612.49, 497.77, 508.274), (0, 0, 0.8), 17.1475)
if "Cog4_3" not in marker_sets:
s=new_marker_set('Cog4_3')
marker_sets["Cog4_3"]=s
s= marker_sets["Cog4_3"]
mark=s.place_marker((587.059, 502.902, 496.319), (0, 0, 0.8), 17.1475)
if "Cog4_4" not in marker_sets:
s=new_marker_set('Cog4_4')
marker_sets["Cog4_4"]=s
s= marker_sets["Cog4_4"]
mark=s.place_marker((563.526, 514.295, 484.818), (0, 0, 0.8), 17.1475)
if "Cog4_5" not in marker_sets:
s=new_marker_set('Cog4_5')
marker_sets["Cog4_5"]=s
s= marker_sets["Cog4_5"]
mark=s.place_marker((542.321, 528.683, 472.335), (0, 0, 0.8), 17.1475)
if "Cog4_6" not in marker_sets:
s=new_marker_set('Cog4_6')
marker_sets["Cog4_6"]=s
s= marker_sets["Cog4_6"]
mark=s.place_marker((523.571, 546.236, 460.099), (0, 0, 0.8), 17.1475)
if "Cog4_GFPC" not in marker_sets:
s=new_marker_set('Cog4_GFPC')
marker_sets["Cog4_GFPC"]=s
s= marker_sets["Cog4_GFPC"]
mark=s.place_marker((629.248, 444.349, 672.606), (0, 0, 0.8), 18.4716)
if "Cog4_Anch" not in marker_sets:
s=new_marker_set('Cog4_Anch')
marker_sets["Cog4_Anch"]=s
s= marker_sets["Cog4_Anch"]
mark=s.place_marker((412.033, 648.998, 249.421), (0, 0, 0.8), 18.4716)
if "Cog5_GFPN" not in marker_sets:
s=new_marker_set('Cog5_GFPN')
marker_sets["Cog5_GFPN"]=s
s= marker_sets["Cog5_GFPN"]
mark=s.place_marker((549.542, 543.366, 426.292), (0.3, 0.3, 0.3), 18.4716)
if "Cog5_0" not in marker_sets:
s=new_marker_set('Cog5_0')
marker_sets["Cog5_0"]=s
s= marker_sets["Cog5_0"]
mark=s.place_marker((549.542, 543.366, 426.292), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_1" not in marker_sets:
s=new_marker_set('Cog5_1')
marker_sets["Cog5_1"]=s
s= marker_sets["Cog5_1"]
mark=s.place_marker((530.381, 523.266, 434.841), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_2" not in marker_sets:
s=new_marker_set('Cog5_2')
marker_sets["Cog5_2"]=s
s= marker_sets["Cog5_2"]
mark=s.place_marker((517.88, 497.312, 437.865), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_3" not in marker_sets:
s=new_marker_set('Cog5_3')
marker_sets["Cog5_3"]=s
s= marker_sets["Cog5_3"]
mark=s.place_marker((516.772, 475.796, 418.388), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_GFPC" not in marker_sets:
s=new_marker_set('Cog5_GFPC')
marker_sets["Cog5_GFPC"]=s
s= marker_sets["Cog5_GFPC"]
mark=s.place_marker((400.297, 516.212, 436.788), (0.3, 0.3, 0.3), 18.4716)
if "Cog5_Anch" not in marker_sets:
s=new_marker_set('Cog5_Anch')
marker_sets["Cog5_Anch"]=s
s= marker_sets["Cog5_Anch"]
mark=s.place_marker((630.725, 430.984, 392.126), (0.3, 0.3, 0.3), 18.4716)
if "Cog6_GFPN" not in marker_sets:
s=new_marker_set('Cog6_GFPN')
marker_sets["Cog6_GFPN"]=s
s= marker_sets["Cog6_GFPN"]
mark=s.place_marker((453.26, 523.696, 434.586), (0.21, 0.49, 0.72), 18.4716)
if "Cog6_0" not in marker_sets:
s=new_marker_set('Cog6_0')
marker_sets["Cog6_0"]=s
s= marker_sets["Cog6_0"]
mark=s.place_marker((453.087, 523.598, 434.473), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_1" not in marker_sets:
s=new_marker_set('Cog6_1')
marker_sets["Cog6_1"]=s
s= marker_sets["Cog6_1"]
mark=s.place_marker((427.526, 535.727, 434.791), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_2" not in marker_sets:
s=new_marker_set('Cog6_2')
marker_sets["Cog6_2"]=s
s= marker_sets["Cog6_2"]
mark=s.place_marker((417.975, 536.646, 461.346), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_3" not in marker_sets:
s=new_marker_set('Cog6_3')
marker_sets["Cog6_3"]=s
s= marker_sets["Cog6_3"]
mark=s.place_marker((438.5, 535.991, 480.778), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_4" not in marker_sets:
s=new_marker_set('Cog6_4')
marker_sets["Cog6_4"]=s
s= marker_sets["Cog6_4"]
mark=s.place_marker((458.66, 548.534, 496.147), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_5" not in marker_sets:
s=new_marker_set('Cog6_5')
marker_sets["Cog6_5"]=s
s= marker_sets["Cog6_5"]
mark=s.place_marker((471.084, 568.348, 511.772), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_6" not in marker_sets:
s=new_marker_set('Cog6_6')
marker_sets["Cog6_6"]=s
s= marker_sets["Cog6_6"]
mark=s.place_marker((484.573, 580.637, 532.861), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_GFPC" not in marker_sets:
s=new_marker_set('Cog6_GFPC')
marker_sets["Cog6_GFPC"]=s
s= marker_sets["Cog6_GFPC"]
mark=s.place_marker((508.607, 605.682, 454.219), (0.21, 0.49, 0.72), 18.4716)
if "Cog6_Anch" not in marker_sets:
s=new_marker_set('Cog6_Anch')
marker_sets["Cog6_Anch"]=s
s= marker_sets["Cog6_Anch"]
mark=s.place_marker((457.301, 550.724, 610.16), (0.21, 0.49, 0.72), 18.4716)
if "Cog7_GFPN" not in marker_sets:
s=new_marker_set('Cog7_GFPN')
marker_sets["Cog7_GFPN"]=s
s= marker_sets["Cog7_GFPN"]
mark=s.place_marker((507.568, 589.169, 411.108), (0.7, 0.7, 0.7), 18.4716)
if "Cog7_0" not in marker_sets:
s=new_marker_set('Cog7_0')
marker_sets["Cog7_0"]=s
s= marker_sets["Cog7_0"]
mark=s.place_marker((504.336, 562.174, 411.716), (0.7, 0.7, 0.7), 17.1475)
if "Cog7_1" not in marker_sets:
s=new_marker_set('Cog7_1')
marker_sets["Cog7_1"]=s
s= marker_sets["Cog7_1"]
mark=s.place_marker((494.025, 505.416, 414.401), (0.7, 0.7, 0.7), 17.1475)
if "Cog7_2" not in marker_sets:
s=new_marker_set('Cog7_2')
marker_sets["Cog7_2"]=s
s= marker_sets["Cog7_2"]
mark=s.place_marker((483.424, 449.86, 417.225), (0.7, 0.7, 0.7), 17.1475)
if "Cog7_GFPC" not in marker_sets:
s=new_marker_set('Cog7_GFPC')
marker_sets["Cog7_GFPC"]=s
s= marker_sets["Cog7_GFPC"]
mark=s.place_marker((408.918, 472.128, 390.765), (0.7, 0.7, 0.7), 18.4716)
if "Cog7_Anch" not in marker_sets:
s=new_marker_set('Cog7_Anch')
marker_sets["Cog7_Anch"]=s
s= marker_sets["Cog7_Anch"]
mark=s.place_marker((523.816, 357.25, 440.525), (0.7, 0.7, 0.7), 18.4716)
if "Cog8_0" not in marker_sets:
s=new_marker_set('Cog8_0')
marker_sets["Cog8_0"]=s
s= marker_sets["Cog8_0"]
mark=s.place_marker((475.134, 580.25, 456.521), (1, 0.5, 0), 17.1475)
if "Cog8_1" not in marker_sets:
s=new_marker_set('Cog8_1')
marker_sets["Cog8_1"]=s
s= marker_sets["Cog8_1"]
mark=s.place_marker((477.378, 568.069, 430.952), (1, 0.5, 0), 17.1475)
if "Cog8_2" not in marker_sets:
s=new_marker_set('Cog8_2')
marker_sets["Cog8_2"]=s
s= marker_sets["Cog8_2"]
mark=s.place_marker((469.924, 547.447, 411.567), (1, 0.5, 0), 17.1475)
if "Cog8_3" not in marker_sets:
s=new_marker_set('Cog8_3')
marker_sets["Cog8_3"]=s
s= marker_sets["Cog8_3"]
mark=s.place_marker((490.811, 533.549, 395.781), (1, 0.5, 0), 17.1475)
if "Cog8_4" not in marker_sets:
s=new_marker_set('Cog8_4')
marker_sets["Cog8_4"]=s
s= marker_sets["Cog8_4"]
mark=s.place_marker((513.336, 518.386, 382.281), (1, 0.5, 0), 17.1475)
if "Cog8_5" not in marker_sets:
s=new_marker_set('Cog8_5')
marker_sets["Cog8_5"]=s
s= marker_sets["Cog8_5"]
mark=s.place_marker((535.474, 503.441, 366.558), (1, 0.5, 0), 17.1475)
if "Cog8_GFPC" not in marker_sets:
s=new_marker_set('Cog8_GFPC')
marker_sets["Cog8_GFPC"]=s
s= marker_sets["Cog8_GFPC"]
mark=s.place_marker((485.361, 543.981, 408.528), (1, 0.6, 0.1), 18.4716)
if "Cog8_Anch" not in marker_sets:
s=new_marker_set('Cog8_Anch')
marker_sets["Cog8_Anch"]=s
s= marker_sets["Cog8_Anch"]
mark=s.place_marker((592.874, 460.04, 319.901), (1, 0.6, 0.1), 18.4716)
for k in surf_sets.keys():
chimera.openModels.add([surf_sets[k]])
| [
"[email protected]"
] | |
3b4c3ebcc95f6891385346ebc2ece840427a93c5 | 9c3f16a3474948468215a0bb8de481b685acf0b7 | /pwkit/environments/casa/util.py | 0c44962c7d305ee183f9370cd5831d29460b900a | [
"MIT"
] | permissive | BunnyBuster/pwkit | 4b738ecddf2bb4fae26a73d5a5e62cad96d5b87f | ce20bf4b0d54aa9bf2ebbaadadadeb1d4eb88ba2 | refs/heads/master | 2021-01-17T22:18:28.541008 | 2015-08-06T19:31:10 | 2015-08-06T19:31:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,772 | py | # -*- mode: python; coding: utf-8 -*-
# Copyright 2015 Peter Williams <[email protected]> and collaborators.
# Licensed under the MIT License.
"""pwkit.environments.casa.util - core utilities for the CASA Python libraries
Variables:
INVERSE_C_SM - Inverse of C in s/m (useful for wavelength to time conversion)
INVERSE_C_NSM - Inverse of C in ns/m (ditto).
pol_names - Dict mapping CASA polarization codes to their string names.
pol_to_miriad - Dict mapping CASA polarization codes to their MIRIAD equivalents.
msselect_keys - A set of the keys supported by the CASA ms-select subsystem.
tools - An object for constructing CASA tools: ``ia = tools.image ()``.
Functions:
datadir - Return the CASA data directory.
logger - Create a CASA logger that prints to stderr without leaving a
casapy.log file around.
forkandlog - Run a function in a subprocess, returning the text it outputs
via the CASA logging subsystem.
sanitize_unicode - Encode Unicode strings as bytes for interfacing with casac
functions.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
__all__ = (b'''INVERSE_C_MS INVERSE_C_MNS pol_names pol_to_miriad msselect_keys
datadir logger forkandlog sanitize_unicode tools''').split ()
from ... import binary_type, text_type
# Some constants that can be useful.
INVERSE_C_MS = 3.3356409519815204e-09 # inverse speed of light in m/s
INVERSE_C_MNS = 3.3356409519815204 # inverse speed of light in m/ns
pol_names = {
0: '?',
1: 'I', 2: 'Q', 3: 'U', 4: 'V',
5: 'RR', 6: 'RL', 7: 'LR', 8: 'LL',
9: 'XX', 10: 'XY', 11: 'YX', 12: 'YY',
13: 'RX', 14: 'RY', 15: 'LX', 16: 'LY',
17: 'XR', 18: 'XL', 19: 'YR', 20: 'YL',
21: 'PP', 22: 'PQ', 23: 'QP', 24: 'QQ',
25: 'RCirc', 26: 'Lcirc', 27: 'Lin', 28: 'Ptot', 29: 'Plin',
30: 'PFtot', 31: 'PFlin', 32: 'Pang',
}
pol_to_miriad = {
# see mirtask.util for the MIRIAD magic numbers.
1: 1, 2: 2, 3: 3, 4: 4, # IQUV
5: -1, 6: -3, 7: -4, 8: -2, # R/L
9: -5, 10: -7, 11: -8, 12: -6, # X/Y
# rest are inexpressible
}
# "polarization" is technically valid as an MS selection, but it pretty much
# doesn't do what you'd want since records generally contain multiple pols.
# ms.selectpolarization() should be used instead. Maybe ditto for spw?
msselect_keys = frozenset ('array baseline field observation '
'scan scaninent spw taql time uvdist'.split ())
def sanitize_unicode (item):
"""The Python bindings to CASA tasks expect to receive all string values
as binary data (Python 2.X "str" or 3.X "bytes") and not Unicode (Python
2.X "unicode" or 3.X "str"). To prep for Python 3 (not that CASA will ever
be compatible with it ...) I true to use the unicode_literals everywhere,
and other Python modules are getting better about using Unicode
consistently, so this causes problems. This helper converts Unicode into
UTF-8 encoded bytes, handling the common data structures that are passed
to CASA functions.
I usually import this as just 'b' and write tool.method (b(arg)), in
analogy with the b'' byte string syntax.
"""
if isinstance (item, text_type):
return item.encode ('utf8')
if isinstance (item, dict):
return dict ((sanitize_unicode (k), sanitize_unicode (v)) for k, v in item.iteritems ())
if isinstance (item, (list, tuple)):
return item.__class__ (sanitize_unicode (x) for x in item)
return item
# Finding the data directory
def datadir (*subdirs):
import os.path
data = None
if 'CASAPATH' in os.environ:
data = os.path.join (os.environ['CASAPATH'].split ()[0], 'data')
if data is None:
# The Conda CASA directory layout:
try:
import casadef
except ImportError:
pass
else:
data = os.path.join (os.path.dirname (casadef.task_directory), 'data')
if not os.path.isdir (data):
data = None
if data is None:
import casac
prevp = None
p = os.path.dirname (casac.__file__)
while len (p) and p != prevp:
data = os.path.join (p, 'data')
if os.path.isdir (data):
break
prevp = p
p = os.path.dirname (p)
if not os.path.isdir (data):
raise RuntimeError ('cannot identify CASA data directory')
return os.path.join (data, *subdirs)
# Trying to use the logging facility in a sane way.
#
# As soon as you create a logsink, it creates a file called casapy.log.
# So we do some junk to not leave turds all around the filesystem.
def _rmtree_error (func, path, excinfo):
from ...cli import warn
warn ('couldn\'t delete temporary file %s: %s (%s)', path, excinfo[0], func)
def logger (filter='WARN'):
import os, shutil, tempfile
cwd = os.getcwd ()
tempdir = None
try:
tempdir = tempfile.mkdtemp (prefix='casautil')
try:
os.chdir (tempdir)
sink = tools.logsink ()
sink.setlogfile (sanitize_unicode (os.devnull))
os.unlink ('casapy.log')
finally:
os.chdir (cwd)
finally:
if tempdir is not None:
shutil.rmtree (tempdir, onerror=_rmtree_error)
sink.showconsole (True)
sink.setglobal (True)
sink.filter (sanitize_unicode (filter.upper ()))
return sink
def forkandlog (function, filter='INFO5', debug=False):
import sys, os
readfd, writefd = os.pipe ()
pid = os.fork ()
if pid == 0:
# Child process. We never leave this branch.
#
# Log messages of priority >WARN are sent to stderr regardless of the
# status of log.showconsole(). The idea is for this subprocess to be
# something super lightweight and constrained, so it seems best to
# nullify stderr, and stdout, to not pollute the output of the calling
# process.
#
# I thought of using the default logger() setup and dup2'ing stderr to
# the pipe fd, but then if anything else gets printed to stderr (e.g.
# Python exception info), it'll get sent along the pipe too. The
# caller would have to be much more complex to be able to detect and
# handle such output.
os.close (readfd)
if not debug:
f = open (os.devnull, 'w')
os.dup2 (f.fileno (), 1)
os.dup2 (f.fileno (), 2)
sink = logger (filter=filter)
sink.setlogfile (b'/dev/fd/%d' % writefd)
function (sink)
sys.exit (0)
# Original process.
os.close (writefd)
with os.fdopen (readfd) as readhandle:
for line in readhandle:
yield line
info = os.waitpid (pid, 0)
if info[1]:
# Because we're a generator, this is the only way for us to signal if
# the process died. We could be rewritten as a context manager.
e = RuntimeError ('logging child process PID %d exited '
'with error code %d' % tuple (info))
e.pid, e.exitcode = info
raise e
# Tool factories.
class _Tools (object):
"""This class is structured so that it supports useful tab-completion
interactively, but also so that new tools can be constructed if the
underlying library provides them.
"""
_builtinNames = ('agentflagger atmosphere calanalysis calibrater calplot componentlist '
'coordsys deconvolver fitter flagger functional image imagepol '
'imager logsink measures msmetadata ms msplot plotms regionmanager '
'simulator spectralline quanta table tableplot utils vlafiller '
'vpmanager').split ()
def __getattribute__ (self, n):
"""Returns factories, not instances."""
# We need to make this __getattribute__, not __getattr__, only because
# we set the builtin names in the class __dict__ to enable tab-completion.
import casac
if hasattr (casac, 'casac'): # casapy >= 4.0?
t = getattr (casac.casac, n, None)
if t is None:
raise AttributeError ('tool "%s" not present' % n)
return t
else:
try:
return casac.homefinder.find_home_by_name (n + 'Home').create
except Exception:
# raised exception is class 'homefinder.error'; it appears unavailable
# on the Python layer
raise AttributeError ('tool "%s" not present' % n)
for n in _Tools._builtinNames:
setattr (_Tools, n, None) # ease autocompletion
tools = _Tools ()
| [
"[email protected]"
] | |
f22e0886db6f9382a5e20f45bcfbf625ccc3c4b8 | 54f352a242a8ad6ff5516703e91da61e08d9a9e6 | /Source Codes/AtCoder/arc076/B/3550700.py | 8f96f6d28852e7726701218eb446f377441ebaf7 | [] | no_license | Kawser-nerd/CLCDSA | 5cbd8a4c3f65173e4e8e0d7ed845574c4770c3eb | aee32551795763b54acb26856ab239370cac4e75 | refs/heads/master | 2022-02-09T11:08:56.588303 | 2022-01-26T18:53:40 | 2022-01-26T18:53:40 | 211,783,197 | 23 | 9 | null | null | null | null | UTF-8 | Python | false | false | 1,186 | py | # -*- coding: utf-8 -*-
n = int(input())
ax = []
ay = []
for i in range(n):
x,y = map(int, input().split())
ax.append((x,i))
ay.append((y,i))
ax.sort()
ay.sort()
edge = []
for i in range(n-1):
v = ax[i][1]
u = ax[i+1][1]
c = abs(ax[i][0]-ax[i+1][0])
edge.append((c,v,u))
v = ay[i][1]
u = ay[i+1][1]
c = abs(ay[i][0]-ay[i+1][0])
edge.append((c,v,u))
edge.sort()
class UnionFind():
def __init__(self, n):
self.par = [i for i in range(n)]
def find(self, x):
if self.par[x] == x:
return x
else:
self.par[x] = self.find(self.par[x])
return self.par[x]
def unite(self, x, y):
x = self.find(x)
y = self.find(y)
if x==y:
return
if x<y:
self.par[y] = x
else:
self.par[x] = y
def same(self, x, y):
return self.find(x) == self.find(y)
t = UnionFind(n)
res = 0
for e in edge:
cost = e[0]
v = e[1]
u = e[2]
if not t.same(v,u):
# print((v,u,cost))
t.unite(v,u)
res += cost
print(res) | [
"[email protected]"
] | |
a7bdf555c5c6d3f96279b3733f29b9c8b469e4e2 | 54f352a242a8ad6ff5516703e91da61e08d9a9e6 | /Source Codes/AtCoder/abc114/C/4911470.py | 7120757865868e9a23be1abac95852d41aafe750 | [] | no_license | Kawser-nerd/CLCDSA | 5cbd8a4c3f65173e4e8e0d7ed845574c4770c3eb | aee32551795763b54acb26856ab239370cac4e75 | refs/heads/master | 2022-02-09T11:08:56.588303 | 2022-01-26T18:53:40 | 2022-01-26T18:53:40 | 211,783,197 | 23 | 9 | null | null | null | null | UTF-8 | Python | false | false | 428 | py | n=int(input())
import bisect
import itertools
l=len(str(n))
ans=[]
num=[]
x=['3', '5', '7']
for k in range(3,l+1):
m=list(itertools.product(x, repeat=k))
num.extend(m)
for i in range(len(num)):
y=num[i]
if '3' in y and '5' in y and '7' in y:
number=''
for j in range(len(y)):
number+=y[j]
ans.append(int(number))
ans.sort()
print(bisect.bisect_right(ans, n)) | [
"[email protected]"
] | |
4bfae37ee37f293153a442a5301b4ea1d1887493 | 7cd0b68c088bf8d42e048c3e4c2b3428a914b988 | /mk-ckeck-ints-py2.py | b5e236d2cf8f7ab5e2b24ee8432412a594fb0cd9 | [
"Apache-2.0"
] | permissive | mykespb/pythoner | 9603904be46298f52ce54f7e421889e6d88b1c8e | 5049b20018890d18d9fd8076ad13f176e1f037e3 | refs/heads/master | 2023-07-26T07:17:42.848231 | 2023-07-23T19:30:20 | 2023-07-23T19:30:20 | 42,587,225 | 1 | 0 | Apache-2.0 | 2022-07-06T21:09:43 | 2015-09-16T12:59:54 | HTML | UTF-8 | Python | false | false | 3,741 | py | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
# mk-check-ints.py (c) myke 2015-11-07
# check first integer predefined refs
for i in range (-10, 300):
a = i+0
b = i+0
if a is b:
print i, "equals"
else:
print i, "not equals"
-10 not equals
-9 not equals
-8 not equals
-7 not equals
-6 not equals
-5 equals
-4 equals
-3 equals
-2 equals
-1 equals
0 equals
1 equals
2 equals
3 equals
4 equals
5 equals
6 equals
7 equals
8 equals
9 equals
10 equals
11 equals
12 equals
13 equals
14 equals
15 equals
16 equals
17 equals
18 equals
19 equals
20 equals
21 equals
22 equals
23 equals
24 equals
25 equals
26 equals
27 equals
28 equals
29 equals
30 equals
31 equals
32 equals
33 equals
34 equals
35 equals
36 equals
37 equals
38 equals
39 equals
40 equals
41 equals
42 equals
43 equals
44 equals
45 equals
46 equals
47 equals
48 equals
49 equals
50 equals
51 equals
52 equals
53 equals
54 equals
55 equals
56 equals
57 equals
58 equals
59 equals
60 equals
61 equals
62 equals
63 equals
64 equals
65 equals
66 equals
67 equals
68 equals
69 equals
70 equals
71 equals
72 equals
73 equals
74 equals
75 equals
76 equals
77 equals
78 equals
79 equals
80 equals
81 equals
82 equals
83 equals
84 equals
85 equals
86 equals
87 equals
88 equals
89 equals
90 equals
91 equals
92 equals
93 equals
94 equals
95 equals
96 equals
97 equals
98 equals
99 equals
100 equals
101 equals
102 equals
103 equals
104 equals
105 equals
106 equals
107 equals
108 equals
109 equals
110 equals
111 equals
112 equals
113 equals
114 equals
115 equals
116 equals
117 equals
118 equals
119 equals
120 equals
121 equals
122 equals
123 equals
124 equals
125 equals
126 equals
127 equals
128 equals
129 equals
130 equals
131 equals
132 equals
133 equals
134 equals
135 equals
136 equals
137 equals
138 equals
139 equals
140 equals
141 equals
142 equals
143 equals
144 equals
145 equals
146 equals
147 equals
148 equals
149 equals
150 equals
151 equals
152 equals
153 equals
154 equals
155 equals
156 equals
157 equals
158 equals
159 equals
160 equals
161 equals
162 equals
163 equals
164 equals
165 equals
166 equals
167 equals
168 equals
169 equals
170 equals
171 equals
172 equals
173 equals
174 equals
175 equals
176 equals
177 equals
178 equals
179 equals
180 equals
181 equals
182 equals
183 equals
184 equals
185 equals
186 equals
187 equals
188 equals
189 equals
190 equals
191 equals
192 equals
193 equals
194 equals
195 equals
196 equals
197 equals
198 equals
199 equals
200 equals
201 equals
202 equals
203 equals
204 equals
205 equals
206 equals
207 equals
208 equals
209 equals
210 equals
211 equals
212 equals
213 equals
214 equals
215 equals
216 equals
217 equals
218 equals
219 equals
220 equals
221 equals
222 equals
223 equals
224 equals
225 equals
226 equals
227 equals
228 equals
229 equals
230 equals
231 equals
232 equals
233 equals
234 equals
235 equals
236 equals
237 equals
238 equals
239 equals
240 equals
241 equals
242 equals
243 equals
244 equals
245 equals
246 equals
247 equals
248 equals
249 equals
250 equals
251 equals
252 equals
253 equals
254 equals
255 equals
256 equals
257 not equals
258 not equals
259 not equals
260 not equals
261 not equals
262 not equals
263 not equals
264 not equals
265 not equals
266 not equals
267 not equals
268 not equals
269 not equals
270 not equals
271 not equals
272 not equals
273 not equals
274 not equals
275 not equals
276 not equals
277 not equals
278 not equals
279 not equals
280 not equals
281 not equals
282 not equals
283 not equals
284 not equals
285 not equals
286 not equals
287 not equals
288 not equals
289 not equals
290 not equals
291 not equals
292 not equals
293 not equals
294 not equals
295 not equals
296 not equals
297 not equals
298 not equals
299 not equals
| [
"[email protected]"
] | |
781480b6b7c9c10bf5e38599c8db5a2b48975330 | 4e3eedbf46a032c42665c3b212a48bc30652c1ed | /day09/03 作业.py | 0f6da2b7095362708f20d35f0057463e18209925 | [] | no_license | zranguai/python-learning | 7eb2a842f6f4624f550ee1c4ff7cd64ac948097a | acf19c9f85eec4bee3e3e3a00712c4a53aa9d249 | refs/heads/master | 2023-03-24T14:36:32.248437 | 2021-03-15T08:24:01 | 2021-03-15T08:24:01 | 347,874,326 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,475 | py | #
# 1.整理函数相关知识点,写博客。
#
# 2.写函数,检查获取传入列表或元组对象的所有奇数位索引对应的元素,并将其作为新列表返回给调用者。
# def odd_element(li):
# l1 = li[1::2]
# return l1
#
#
# l2 = [1, 2, 3, 6, 5, 8]
# tu2 = (1, 2, 3, 6, 5, 8)
# res = odd_element(l2)
# res1 = odd_element(tu2)
# print(res)
# print(res1)
# 3.写函数,判断用户传入的对象(字符串、列表、元组)长度是否大于5。
# def judge_length(li):
# if len(li) > 5:
# return True
# else:
# return False
#
#
# print(judge_length('askdhajdaj'))
# print(judge_length([1, 2, 3, 5]))
# print(judge_length((1, 2, 3, 5, 6, 9)))
# 4.写函数,检查传入列表的长度,如果大于2,那么仅保留前两个长度的内容,并将新内容返回给调用者。
# def check_length(li):
# return li[0:2] if len(li) > 2 else False
#
#
# print(check_length([1, 2, 3, 6, 8]))
# print(check_length([18]))
# 5.写函数,计算传入函数的字符串中,[数字]、[字母] 以及 [其他]的个数,并返回结果。
# s1 = '256aasdf582中文学习'
# i.isalpha不能判断中英文
# def foo(s):
# num1 = 0
# s1 = 0
# other = 0
# for i in s:
# if i.isdigit():
# num1 += 1
# elif i.encode('utf-8').isalpha():
# s1 += 1
# else:
# other += 1
# return num1, s1, other
#
#
# res = foo('256aasdf582中文k学习')
# print(res)
# 6.写函数,接收两个数字参数,返回比较大的那个数字。
# def foo(num1, num2):
# return num1 if num1 > num2 else num2
#
#
# print(foo(53, 23))
# print(foo(0, 23))
# 7.写函数,检查传入字典的每一个value的长度,如果大于2,那么仅保留前两个长度的内容,并将新内容返回给调用者。
# dic = {"k1": "v1v1", "k2": [11,22,33,44]} {"k1": "v1", "k2": [11,22]}
# PS:字典中的value只能是字符串或列表
# dic = {"k1": "v1v1", "k2": [11, 22, 33, 44]}
# def foo(dic):
# dic1 = {}
# for i in dic.keys():
# if len(dic[i]) > 2:
# dic1[i] = dic[i][0:2]
# return dic1
#
#
# print(foo(dic))
# 8.写函数,此函数只接收一个参数且此参数必须是列表数据类型,此函数完成的功能是返回给调用者一个字典,
# 此字典的键值对为此列表的索引及对应的元素。例如传入的列表为:[11,22,33] 返回的字典为 {0:11,1:22,2:33}。
# l1 = [11, 22, 33, 44, 25]
# def foo(l1):
# dic = {}
# for index in range(len(l1)):
# dic[index] = l1[index]
# return dic
#
#
# print(foo(l1))
# 9.写函数,函数接收四个参数分别是:姓名,性别,年龄,学历。
# 用户通过输入这四个内容,然后将这四个内容传入到函数中,此函数接收到这四个内容,将内容追加到一个student_msg文件中。
# def foo(name, sex, age, edu):
# s1 = '姓名是:{},性别是:{},年龄是:{},学历是:{}\n'.format(name, sex, age, edu)
# with open('student_msg', mode='a', encoding='utf-8') as f:
# f.write(s1)
#
#
# foo('小明', '男', 23, '本科')
# foo('小红', '女', 21, '专科')
# 10.对第9题升级:支持用户持续输入,Q或者q退出,性别默认为男,如果遇到女学生,则把性别输入女。
# 用户持续输入: while input
# # 函数:接收四个参数。将四个参数追加到文件中。
# def foo(name, age, edu, sex='男'):
# s1 = '姓名是:{},性别是:{},年龄是:{},学历是:{}\n'.format(name, sex, age, edu)
# with open('student_msg', mode='a', encoding='utf-8') as f:
# f.write(s1)
#
#
# while True:
# if input('输入q/Q退出,输入其他继续').upper() == 'Q':
# break
# name = input('请输入姓名')
# sex = input('请输入性别')
# age = input('请输入年龄')
# edu = input('请输入学历')
# foo(name, age, edu, sex)
# 写函数,用户传入修改的文件名,与要修改的内容,执行函数,完成整个文件的批量修改操作(选做题)。
#
# import os
# def foo(name, change):
# with open(name, mode='r', encoding='utf-8') as f1, \
# open(name + '.bak', mode='w', encoding='utf-8') as f2:
# old_content = f1.read()
# new_content = old_content.replace('SB', change)
# f2.write(new_content)
# os.remove(name)
# os.rename(name + '.bak', name)
#
# foo('student_msg', 'alexxx') | [
"[email protected]"
] | |
55c0ddc10dfebaabc5ebad6bab71d2634378ec9e | 887811408e187da2422900a31859925d59d4d6ec | /UniquePaths.py | 5e5e9cbb532d75cf1af78435b7bb02a6042e8d8b | [] | no_license | adityachhajer/LeetCodeJuneChallenge | 5a998baf6dc5207c56c48ccd36c82ef44f41217c | 8ed8b0c012691387e417bcf45009debe4d5f8551 | refs/heads/master | 2022-11-12T16:40:25.360578 | 2020-07-01T06:31:51 | 2020-07-01T06:31:51 | 268,749,503 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 686 | py | class Solution:
def solve(self,n,m,t):
if n-1==0 and m-1==0:
return 1
elif t[n][m]!=0:
return t[n][m]
else:
if n - 1 == 0 and m - 1 != 0:
t[n][m]=self.solve(n, m - 1, t)
return t[n][m]
elif n - 1 != 0 and m - 1 == 0:
t[n][m]=self.solve(n - 1, m, t)
return t[n][m]
else:
t[n][m]=self.solve(n - 1, m, t) + self.solve(n, m - 1, t)
return t[n][m]
def uniquePaths(self, m: int, n: int) -> int:
t=[[0 for _ in range(m+1)]for _ in range(n+1)]
return self.solve(n,m,t) | [
"[email protected]"
] | |
a4121b364c978347da27194b9061f3bd495259a0 | 871b3fa6647983570ecc0a8f4764dd2af4765427 | /roxanne/main.py | 08b439409202f5e1b432bc061bb3ebca88dcd4b2 | [] | no_license | kwarwp/anita | 5eaa8f6b587bdc8d702aeae775803bec60856fbc | b813492175365a0f7b8934c710cc09b0ff26763f | refs/heads/master | 2022-10-17T01:02:37.845442 | 2022-09-26T15:05:06 | 2022-09-26T15:05:06 | 216,602,314 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 462 | py | # anita.roxanne.main.pyhttps://i.imgur.com/u1dActr.jpgV
from _ spy. violino.main import cena,elemento,texto, STYLE
STYLE["width"]=600
STYLE["heigth]= "200px"
linkdatalita="https://i.imgur.com/6rLmVNz.jpg"
linkdocolete="https://i.imgur.com/PV7WWPJ.jpg"
linkquartodatalita="https://i.imgur.com/wdKENXo.jpg"
linkdosubmarino="https://i.imgur.com/fJWGYNu.jpg"
linkdoquadro1="https://i.imgur.com/ydF1bV2.jpg"
linkdoquadro2="https://i.imgur.com/u1dActr.jpg"
img_moeda=" | [
"[email protected]"
] | |
f3a01b868d231358e9f762b09e2d8400c33d7e03 | b39d9ef9175077ac6f03b66d97b073d85b6bc4d0 | /Torisel_WC500039912.1.py | e171724fdab26401b59865112ccd9ffbabce2d73 | [] | no_license | urudaro/data-ue | 2d840fdce8ba7e759b5551cb3ee277d046464fe0 | 176c57533b66754ee05a96a7429c3e610188e4aa | refs/heads/master | 2021-01-22T12:02:16.931087 | 2013-07-16T14:05:41 | 2013-07-16T14:05:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,874 | py | {'_data': [['Common',
[['Immune system',
u'Diabetes mellitus 10 (3,1) 2 (0,6) Dehydrering 17 (5,3) 8 (2,5) Hypokalcemi 21 (6,5) 5 (1,6) Hypofosfatemi 26 (8,1) 14 (4,4) Hyperlipidemi 4 (1,2) 0 (0,0) Psykiska Mycket vanliga Insomni 45 (14,0) 1 (0,3) st\xf6rningar Depression 16 (5,0) 0 (0,0)'],
['Immune system', u'\xc5ngest 28 (8,7) 0 (0,0)'],
['Eye',
u'Pneumonit 7 (2,2) 2 (0,6) Interstitiell lungsjukdom 6 (1,9) 3 (0,9) Pleurav\xe4tska 19 (5,9) 9 (2,8)'],
['Eye',
u'Gastrointestinal bl\xf6dning (inklusive anal-, rektal-, haemorroidal-, l\xe4pp och munbl\xf6dning, bl\xf6dning i 16 (5,0) 4 (1,2) tandk\xf6tt) Gastrit *** 7 (2,1) 2 (0,6) Dysfagi 13 (4,0) 0 (0,0) Utsp\xe4nd buk 14 (4,4) 1 (0,3) Aft\xf6s stomatit 15 (4,7) 1 (0,3) Oral sm\xe4rta 9 (2,8) 1 (0,3) Gingivit 6 (1,9) 0 (0,0)'],
['Skin',
u'Vanliga Dermatit 6 (1,9) 0 (0,0) Exfoliativa utslag 5 (1,6) 0 (0,0) Akne 15 (4,7) 0 (0,0) Nagelsjukdom 24 (8,1) 0 (0,0) Bl\xe5m\xe4rken**** 5 (1,6) 0 (0,0) Petekier 4 (1,2) 0 (0,0) Muskuloskeletala Mycket vanliga Ledv\xe4rk 50 (15,6) 2 (0,6) systemet och Ryggsm\xe4rta 53 (16,5) 8 (2,5) bindv\xe4v Vanliga Muskelsm\xe4rta 19 (5,9) 0 (0,0) Njurar och Vanliga Njursvikt 5 (1,6) 0 (0,0) urinv\xe4gar Allm\xe4nna symtom Mycket vanliga Fatigue 133 (41,4) 31 (9,7) och/eller symtom \xd6dem (inklusive generellt \xf6dem, vid ansikts\xf6dem, perifert \xf6dem, 122 (38,0) 11 (3,4) administreringsst\xe4 testikel\xf6dem, genitala \xf6dem ) llet Asteni 67 (20,9)) 16 (5,0) Mukosit 66 (20,6) 7 (2,2) Feber 91 (28,3) 5 (1,6) Sm\xe4rta 36 (11,2) 7 (2,2) Frossa 32 (10,0) 1 (0,3) Br\xf6stsm\xe4rta 32 (10,0) 1 (0,3)'],
['Skin', u'Vanliga F\xf6rh\xf6jt aspartataminotransferas 27 (8,4) 5 (1,6)'],
['Skin',
u'Vanliga F\xf6rh\xf6jt alaninaminotransferas 17 (5,3) 2 (0,6) a, b, c: inklusive ett d\xf6dsfall i varje fall']]],
['Uncommon',
[['Eye',
u'Bl\xf6dning i \xf6gat 3 (0,9) 0 (0,0) Hj\xe4rtat Mindre vanliga Perikardv\xe4tska 3 (0,9) 1 (0,3) Blodk\xe4rl Vanliga Ven\xf6s tromboembolism (inklusive djup ventrombos, ven\xf6s trombos) 7 (2,2) 4 (1,2) Tromboflebit 4 (1,2) 0 (0,0) Hypertension 20 (6,2) 3 (0,9) Andningsv\xe4gar, Mycket vanliga Dyspn\xe9 79 (24,6) 27 (8,4) br\xf6stkorg och N\xe4sbl\xf6dning *** 69 (21,5) 1 (0,3) mediastinum Hosta 93 (29) 3 (0,9)'],
['Eye',
u'Lungemboli 2 (0,6) 1 (0,3) Magtarmkanalen Mycket vanliga Illam\xe5ende 109 (34,0) 5 (1,6) Diarr\xe9 109 (34,0) 16 (5,0) Stomatit 67 (20,9) 3 (0,9) Kr\xe4kningar 57 (17,8) 4 (1,2) F\xf6rstoppning 56 (17,4) 0 (0,0) Buksm\xe4rta 56 (17,4) 10 (3,1)'],
['Eye', u'Mindre vanliga Tarmperforation 2 (0,6) 1 (0,3)'],
['Skin',
u'Mindre vanliga F\xf6rs\xe4mrad s\xe5rl\xe4kning 2 (0,6) 0 (0,0) Unders\xf6kningar Mycket vanliga F\xf6rh\xf6jt blodkreatinin 35 (10,9) 4 (1,2)']]],
['Unknown',
[['Immune system',
u'utl\xf6sta reaktioner 24 (7,5) 1 (0,3) Metabolism och Mycket vanliga Hyperglykemi 63 (19,6) 31 (9,7) nutrition Hyperkolesterolemi 60 (18,79) 1 (0,3) Hypertriglyceridemi 56 (17,4) 8 (2,5) Minskad aptit 107 (33,3) 9 (2,8) Hypokalemi 44 (13,7) 13 (4,0)'],
['Eye', u'sjukdomar i t\xe5rapparaten)'],
['Skin',
u'utslag, makulopapul\xf6st utslag, generella utslag, makul\xe4ra utslag, 138 (43,0) 16 (5,0) pustul\xf6st utslag) Pruritus (inklusive generell pruritus) 69 (21,5) 4 (1,2) Torr hud 32 (10,0) 1 (0,3)'],
['Immune system',
u'reaktioner Hud och subkutan v\xe4vnad Ingen k\xe4nd frekvens Stevens-Johnson syndrom Muskuloskeletala systemet Ingen k\xe4nd frekvens Rabdomyolys och bindv\xe4v']]]],
'_pages': [9, 14],
u'_rank': 15,
u'_type': u'LSFU'} | [
"daro@daro-ThinkPad-X220.(none)"
] | daro@daro-ThinkPad-X220.(none) |
4049a02fd60ab4a249f4d40702531b9eafed09fd | a9fe1b5c320cdef138ac4a942a8b741c7f27de7c | /LC742-Closest-Leaf-in-a-Binary-Tree.py | f3a487514bfb862775af04044d8e89b47e295321 | [] | no_license | kate-melnykova/LeetCode-solutions | a6bbb5845310ce082770bcb92ef6f6877962a8ee | ee8237b66975fb5584a3d68b311e762c0462c8aa | refs/heads/master | 2023-06-28T06:35:33.342025 | 2021-07-30T06:59:31 | 2021-07-30T06:59:31 | 325,106,033 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,287 | py | """
Given a binary tree where every node has a unique value, and a target key k,
find the value of the nearest leaf node to target k in the tree.
Here, nearest to a leaf means the least number of edges travelled on the binary
tree to reach any leaf of the tree. Also, a node is called a leaf if it has no
children.
In the following examples, the input tree is represented in flattened form row
by row. The actual root tree given will be a TreeNode object.
Example 1:
Input:
root = [1, 3, 2], k = 1
Diagram of binary tree:
1
/ \
3 2
Output: 2 (or 3)
Explanation: Either 2 or 3 is the nearest leaf node to the target of 1.
Example 2:
Input:
root = [1], k = 1
Output: 1
Explanation: The nearest leaf node is the root node itself.
Example 3:
Input:
root = [1,2,3,4,null,null,null,5,null,6], k = 2
Diagram of binary tree:
1
/ \
2 3
/
4
/
5
/
6
Output: 3
Explanation: The leaf node with value 3 (and not the leaf node with value 6)
is nearest to the node with value 2.
Note:
root represents a binary tree with at least 1 node and at most 1000 nodes.
Every node has a unique node.val in range [1, 1000].
There exists some node in the given binary tree for which node.val == k.
"""
from TreeNode import TreeNode
class Solution:
def findClosestLeaf(self, root: TreeNode, k: int) -> int:
"""
Time complexity: O(n)
Space complexity: O(n)
"""
# assign parent
root.parent = None
self.assignParent(root)
# compute distance the closest leaf downwards and the leaf value
self.distToLeaf(root)
# find the node with value k
node = self.getNode(root, k)
# find the distance to the closest leaf
closest = node.to_leaf + 1
leaf_value = node.leaf_value
node = node.parent
steps_up = 2
while node is not None:
if node.to_leaf + steps_up < closest:
closest = node.to_leaf + steps_up
leaf_value = node.leaf_value
node = node.parent
steps_up += 1
return leaf_value
def distToLeaf(self, root: TreeNode):
"""
Time complexity: O(n)
Space complexity: O(n)
"""
if root is None:
pass
elif root.left is None and root.right is None:
root.to_leaf = 1
root.leaf_value = root.val
else:
self.distToLeaf(root.left)
self.distToLeaf(root.right)
if getattr(root.left, 'to_leaf', float('inf')) < getattr(root.right, 'to_leaf', float('inf')):
root.to_leaf = root.left.to_leaf + 1
root.leaf_value = root.left.leaf_value
else:
root.to_leaf = root.right.to_leaf + 1
root.leaf_value = root.right.leaf_value
def assignParent(self, root: TreeNode):
"""
Time complexity: O(n)
Space complexity: O(n)
"""
if root.left is not None:
root.left.parent = root
self.assignParent(root.left)
if root.right is not None:
root.right.parent = root
self.assignParent(root.right)
def getNode(self, root: TreeNode, k: int) -> TreeNode:
# find the node with value k
level = [root, ]
while level:
new_level = []
for node in level:
if node.val == k:
return node
if node.left is not None:
new_level.append(node.left)
if node.right is not None:
new_level.append(node.right)
level = list(new_level)
if __name__ == '__main__':
from run_tests import run_tests
correct_answers = [
[[1, 3, 2], 1, 2],
[[1], 1, 1],
[[1,2,3,4,None,None,None,5,None,6], 2, 3],
[[1, 2, 3, 4, None, None, None, 5, None, 6], 5, 6],
[[1, 2, 3, 4, None, None, None, 5, None, 6], 1, 3]
]
for i in range(len(correct_answers)):
correct_answers[i][0] = TreeNode.to_treenode(correct_answers[i][0])
print(f'Running tests for findClosestLeaf')
run_tests(Solution().findClosestLeaf, correct_answers) | [
"[email protected]"
] | |
fb8a1445699331f925683dbd999d5a4054e78cd8 | 6d45ba4adff74b2cb1b6764dc684f37407b41ba9 | /PirateBoxMessageBoard/settings.py | 1193cea6cdedb820fe1d514892d7e3735830b66f | [] | no_license | bussiere/PirateBoxMessageBoard | bbf478af1886caf811f38802bde5528593bba2c4 | 8626a8a44d5bdbf06486fac65682a50e4209396d | refs/heads/master | 2021-01-23T11:56:22.905167 | 2013-03-08T16:33:08 | 2013-03-08T16:33:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,926 | py | # Django settings for DrakBus project.
try :
import dj_database_url
except :
pass
import os.path
PROJECT_ROOT = '/home/pi/PirateBox/PirateBoxMessageBoard' # The '/..' is needed to work with Django 1.4+, remove for older versions.
DEBUG = False
TEMPLATE_DEBUG = DEBUG
INTERNAL_IPS = ('127.0.0.1',)
ADMINS = (
# ('Your Name', '[email protected]'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': '/home/pi/PirateBox/PirateBoxMessageBoard/PirateBoxMessageBoard.db', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
ALLOWED_HOSTS = ['*']
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Winnipeg'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = os.path.join(PROJECT_ROOT, 'media')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = '/m/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'staticfiles')
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/s/'
# Additional locations of static files
STATICFILES_DIRS = (
os.path.join(PROJECT_ROOT, 'staticfiles'),
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'tamereenslip'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'message.middleware.UserBasedExceptionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
# Include the default Django email handler for errors
# This is what you'd get without configuring logging at all.
'mail_admins': {
'class': 'django.utils.log.AdminEmailHandler',
'level': 'ERROR',
# But the emails are plain text by default - HTML is nicer
'include_html': True,
},
# Log to a text file that can be rotated by logrotate
'logfile': {
'class': 'logging.handlers.WatchedFileHandler',
'filename': '/home/pi/myapp.log'
},
},
'loggers': {
# Again, default Django configuration to email unhandled exceptions
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
# Might as well log any errors anywhere else in Django
'django': {
'handlers': ['logfile'],
'level': 'ERROR',
'propagate': False,
},
# Your own app - this assumes all your logger names start with "myapp."
'message': {
'handlers': ['logfile'],
'level': 'WARNING', # Or maybe INFO or DEBUG
'propagate': False
},
},
}
ROOT_URLCONF = 'urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'wsgi.application'
TEMPLATE_DIRS = (
os.path.join(PROJECT_ROOT, 'templates'),
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.tz',
'django.contrib.messages.context_processors.messages',
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'message',
'django.contrib.admin',
'django.contrib.admindocs',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
| [
"[email protected]"
] | |
532900bebd339267ebc10dfaff3592998f41e76f | 5b22437902bffa0f62b375d56bfb2b4485ef43f0 | /src/video_inpainting/create_padded_masked_video_dataset.py | 2550634c0f8d379f97bcce53a9e71e7662817ff7 | [
"MIT",
"CC-BY-SA-3.0",
"CC-BY-SA-4.0"
] | permissive | JohnsonzxChang/devil | eafa09f5258b4f33eda9564077814c6e63473a0f | 296115cd5f4952c7dc65bbcaaf2d1d5c55ef5d35 | refs/heads/public | 2023-07-03T12:07:58.917440 | 2021-08-10T00:06:38 | 2021-08-10T00:06:38 | 555,846,483 | 1 | 0 | MIT | 2022-10-22T13:22:43 | 2022-10-22T13:22:42 | null | UTF-8 | Python | false | false | 820 | py | import os
from .padded_masked_video_folder_dataset import PaddedMaskedVideoFolderDataset
from .padded_masked_video_tar_dataset import PaddedMaskedVideoTarDataset
def create_padded_masked_video_dataset(frames_dataset_path, masks_dataset_path):
if os.path.isdir(frames_dataset_path) and os.path.isdir(masks_dataset_path):
return PaddedMaskedVideoFolderDataset(frames_dataset_path, masks_dataset_path)
else:
_, frames_dataset_ext = os.path.splitext(frames_dataset_path)
_, masks_dataset_ext = os.path.splitext(masks_dataset_path)
if frames_dataset_ext == '.tar' and masks_dataset_ext == '.tar':
return PaddedMaskedVideoTarDataset(frames_dataset_path, masks_dataset_path)
else:
raise ValueError('Given paths must both be directories or .tar files')
| [
"[email protected]"
] | |
06e6a38e865091a553dcec1b2b286abf65848f83 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_344/ch53_2019_03_31_15_57_40_910628.py | 842c153c78e87eff9b5ce32522a0897f56a27f48 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 101 | py | def inverte_lista(lista):
i=0
while i<len(lista):
lista=lista[::-1]
return lista
| [
"[email protected]"
] | |
ba50dd86cdaa99cd34695548a76e5f6592516bc7 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2696/60765/305370.py | 3d1f7821f1511367fc86d5ab1dbfcfe888cd8ca5 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,954 | py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
import math
import sys
import re
from collections import *
from itertools import *
from functools import *
def solve():
# =list(map(int,input().split()))
# =int(input())
# def root(i):
# if unions[i]<0:
# return i
# else:
# return root(unions[i])
# def union(x,y):
# roota=root(x)
# rootb=root(y)
# # unions[roota] += unions[rootb]
# unions[rootb]=roota
# n =input()[2:-2].split('],[')
# target=int(input())
n=int(input())
a=[]
a.append(list(map(int,input().split())))
a.append(list(map(int,input().split())))
a.append(list(map(int,input().split())))
a.append(a[2])
dp=[[1,1,1,1]]
for i in range(n-1):
dp.append([0,0,0,0])
for k in range(1,n):
for i in range(4):
for j in range(k):
if a[i][j]<=dp[k][0]:
dp[k][0]=max(dp[k][0],dp[j][i]+1)
if a[i][j] >= dp[k][1]:
dp[k][1] = max(dp[k][1], dp[j][i] + 1)
if a[i][j] <= dp[k][2] and j!=3:
dp[k][2] = max(dp[k][2], dp[j][i] + 1)
if a[i][j] >= dp[k][3] and j!=2:
dp[k][3] = max(dp[k][3], dp[j][i] + 1)
res=0
for i in range(4):
res=max(dp[i][-1],res)
m=a[0][0]
if n == 7 and m == 19:
print('7',end='')
elif n == 5 and m == 1:
print('5',end='')
elif n == 6 and m == 1:
print('6')
elif n == '3' and m == '1':
print('32')
elif n == '1' and m == '3':
print('4')
elif n == '15' and m == '1':
print('704')
elif n == '3' and m == '35':
print('10')
elif n == '18' and m == '1'and l=='2':
print('859')
elif n == '' and m == '':
print('')
elif n == '' and m == '':
print('')
else:
print(n)
print(m)
solve()
| [
"[email protected]"
] | |
175a95df1b171942b913d77561029a1915f14dea | 7162c36b73d97c82b165d6fd14d568552a0269d8 | /setup.py | 37f71d57aab9125965f253b2077c743f7b916d16 | [] | no_license | welbornprod/findfunc | d90cbe0110a0f9b656b0ff70846e0c29a583f703 | 0247cba193fb3193c60399c3d2f9910e85319493 | refs/heads/master | 2021-01-19T09:29:10.707496 | 2019-04-02T23:14:08 | 2019-04-02T23:14:08 | 87,763,285 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,355 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
FindFunc Setup
-Christopher Welborn 04-09-2017
"""
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
# Try using the latest DESC.txt.
shortdesc = 'Finds function definitions/signatures from the command line.'
try:
with open('DESC.txt', 'r') as f:
shortdesc = f.read()
except FileNotFoundError:
pass
# Default README files to use for the longdesc, if pypandoc fails.
readmefiles = ('docs/README.txt', 'README.txt', 'docs/README.rst')
for readmefile in readmefiles:
try:
with open(readmefile, 'r') as f:
longdesc = f.read()
break
except EnvironmentError:
# File not found or failed to read.
pass
else:
# No readme file found.
# If a README.md exists, and pypandoc is installed, generate a new readme.
try:
import pypandoc
except ImportError:
print('Pypandoc not installed, using default description.')
longdesc = shortdesc
else:
# Convert using pypandoc.
try:
longdesc = pypandoc.convert('README.md', 'rst')
except EnvironmentError:
# No readme file, no fresh conversion.
print('Pypandoc readme conversion failed, using default desc.')
longdesc = shortdesc
setup(
name='FindFunc',
version='0.4.4',
author='Christopher Welborn',
author_email='[email protected]',
packages=['findfunc'],
url='https://github.com/welbornprod/findfunc',
description=shortdesc,
long_description=longdesc,
keywords=(
'python 3 command line tool function class definition signature'
),
classifiers=[
'License :: OSI Approved :: MIT License',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python',
'Programming Language :: Python :: 3 :: Only',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
],
install_requires=[
'colr >= 0.8.1',
'docopt >= 0.6.2',
'pygments >= 2.1.3',
'printdebug >= 0.3.0',
],
entry_points={
'console_scripts': [
'findfunc = findfunc.__main__:entry_point',
],
}
)
| [
"[email protected]"
] | |
46cb1c572815f8a6d20635acff1d237fcd0d8db4 | 215bf668b69b5ebea1b538be217a7cd91db1772a | /bin/condor-compute-psd | 7e2e02878351d44edbb14260ba2a9af208a2b15f | [
"MIT"
] | permissive | reedessick/exposure | a2fce2e0a2ff059200d0498ae68e81a6e2bb1b9c | 10aeb1fb29befbbb305d65c379d983fd8bad5693 | refs/heads/master | 2022-04-05T09:36:15.969152 | 2020-02-28T23:24:05 | 2020-02-28T23:24:05 | 105,042,945 | 0 | 0 | MIT | 2019-08-08T16:09:08 | 2017-09-27T16:35:52 | Python | UTF-8 | Python | false | false | 6,825 | #!/usr/bin/env python
__doc__ = "a very basic wrapper that schedules `compute-psd` jobs. The resulting DAG should run to completion if everything worked correctly (i.e. nodes should not raise exceptions"
__author__ = "Reed Essick ([email protected])"
#-------------------------------------------------
import os
import getpass ### for default accounting_group_user
import subprocess as sp
from distutils.spawn import find_executable
from argparse import ArgumentParser
### non-standard libraries
from exposure import utils
from exposure import datafind
#-------------------------------------------------
parser = ArgumentParser(description=__doc__)
parser.add_argument('channel', type=str)
parser.add_argument('frametype', type=str)
parser.add_argument('gpsstart', type=int)
parser.add_argument('gpsstop', type=int)
parser.add_argument("-v", "--verbose", default=False, action="store_true")
parser.add_argument("-V", "--Verbose", default=False, action="store_true")
parser.add_argument("--include-flag", default=[], type=str, action='append',
help='the flags used to select subsets of [gpsstart, gpsstop] for analysis. \
Can be repeated to take the intersection of multiple flags. \
DEFAULT=[] (analyze all time in [gpsstart, gpsstop]).')
parser.add_argument("--exclude-flag", default=[], type=str, action='append',
help='the same as --include-flag, except we only retain times that are \
outside of these flags instead of inside them')
parser.add_argument("--win", default=60, type=int,
help="estimate PSDs separately in sequential windows of this duration. \
DEFAULT=60")
parser.add_argument("--seglen", default=4, type=int,
help='the length of segments used to estimate the PSD via an averaging procedure (specify in seconds). \
NOTE: if we do not obtain an integer number of segments based on --seglen, --overlap, gpsstart, and gpsstop, \
we will raise a ValueError. DEFAULT=4')
parser.add_argument("--overlap", default=2, type=float,
help='the amount of time overlapped for segments used to estimate the PSD (specify in seconds). \
NOTE: if we do not obtain an integer number of segments based on --seglen, --overlap, gpsstart, and gpsstop, \
we will raise a ValueError. DEFAULT=2')
parser.add_argument("--tukey-alpha", default=0.50, type=float,
help='the Tukey "alpha" value used for windowing the DFT. \
DEFAULT=0.50')
parser.add_argument('--universe', default='vanilla', type=str,
help='DEFAULT=vanilla')
parser.add_argument('--exe', default='compute-psd', type=str,
help='specify the explicit path to the executable. \
DEFAULT=compute-psd')
parser.add_argument('--accounting-group', default=utils.DEFAULT_ACCOUNTING_GROUP, type=str)
parser.add_argument('--accounting-group-user', default=getpass.getuser(), type=str,
help='DEFAULT='+getpass.getuser())
parser.add_argument('--retry', default=utils.DEFAULT_RETRY, type=int)
parser.add_argument('--psd-suffix', default='csv.gz', type=str)
parser.add_argument("-o", "--output-dir", default='.', type=str)
parser.add_argument("-t", "--tag", default="", type=str)
parser.add_argument('-s', '--condor-submit', default=False, action='store_true',
help='submit the DAG to condor')
args = parser.parse_args()
stride = args.gpsstop - args.gpsstart
assert args.channel[0]==args.frametype[0], 'I do not believe you want a channel and frametype \
from different IFOs\n\tchannel : %s\n\tframetype : %s'%(args.channel, args.frametype)
assert args.seglen > args.overlap, '--seglen must be larger than --overlap'
if args.tag:
filetag = "_"+args.tag
else:
filetag = ""
args.output_dir = os.path.abspath(args.output_dir)
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
logdir = os.path.join(args.output_dir, 'log')
if not os.path.exists(logdir):
os.makedirs(logdir)
args.verbose |= args.Verbose
#-------------------------------------------------
### query segments to define individual runs
### ensure we have proper coverage
segments = [[args.gpsstart, args.gpsstop]]
segments = datafind.include_flags(segments, args.include_flag, args.gpsstart, stride, verbose=args.verbose)
segments = datafind.exclude_flags(segments, args.exclude_flag, args.gpsstart, stride, verbose=args.verbose)
### check to make sure we have livetime left, etc
assert len(segments), 'no remaining livetime after filtering by flags!'
lvtm = utils.livetime(segments) ### amount of time requested within segments
#------------------------
### write sub file
subname = "%s/compute-psd%s-%d-%d.sub"%(args.output_dir, filetag, args.gpsstart, stride)
if args.verbose:
print( "writing : "+subname )
with open(subname, 'w') as f:
f.write(utils.compute_psd_sub%{\
'universe' : args.universe,
'exe' : os.path.abspath(find_executable(args.exe)),
'channel' : args.channel,
'frametype' : args.frametype,
'accounting_group' : args.accounting_group,
'accounting_group_user' : args.accounting_group_user,
'tag' : "--tag "+args.tag if args.tag else '',
'filetag' : filetag,
'start' : args.gpsstart,
'dur' : stride,
'seglen' : args.seglen,
'overlap' : args.overlap,
'tukey_alpha' : args.tukey_alpha,
'suffix' : args.psd_suffix,
})
### iterate over segments and define compute-psd jobs for each
dagname = subname.replace('.sub', '.dag')
if args.verbose:
print( "writing : "+dagname )
with open(dagname, 'w') as f:
covered = 0 ### amount of time that's covered by a PSD estimate
for segstart, segstop in segments:
segdur = segstop - segstart
if args.verbose:
print( "scheduling jobs for %d -- %d"%(segstart, segstop) )
s = (segstart/args.win)*args.win ### line-up start with integer number of windows. Needed to guarantee files will line up later -> integer division!
if s < segstart: ### mostly likely case, but we need to check just in case
s += args.win
while s+args.win < segstop:
f.write(utils.compute_psd_dag%{\
'jobid' : '%d'%s,
'sub' : subname,
'gpsstart' : s,
'gpsstop' : s+args.win,
'retry' : args.retry,
'outdir' : args.output_dir,
})
s += args.win
covered += args.win
#-------------------------------------------------
if args.verbose: ### report amount of time covered
print( 'requested : %d sec'%stride )
print( 'within segments : %d sec'%lvtm )
print( 'covered by PSD : %d sec'%covered )
### submit
if args.condor_submit:
if args.verbose:
print( 'submitting : '+dagname )
import subprocess as sp
sp.Popen(['condor_submit_dag', dagname]).wait()
elif args.verbose:
print( 'you can now submit : '+dagname )
| [
"[email protected]"
] | ||
42c5f9db30567097bc2bfb0f4424be748d2301fc | 0b5be4b9162c19cf0d98972e52ce80aa8af47f0a | /Thread/thread/concurrent_futures.py | 5b0040e31097304271f9bf34a179dee1e60395c7 | [] | no_license | Air-Zhuang/Test35 | 374c974a2a7693fff21be81278c1bb59a050f7ee | d9f92b7a025c91b7503f02afc896ac769f818a84 | refs/heads/master | 2021-06-19T12:36:13.383878 | 2019-09-21T08:02:43 | 2019-09-21T08:02:43 | 147,629,865 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,004 | py | '''
用于线程池和进程池编程(顶层的包,高度封装)
主线程中可以获取某一个线程的状态或者某一个任务的状态,以及返回值
当一个线程完成的时候我们主线程能立即知道
futures可以让多线程和多进程编码接口一致
'''
from concurrent.futures import ThreadPoolExecutor,as_completed,wait
import time
def get_html(times):
time.sleep(times)
print("get page {} success".format(times))
return str(times) #使用线程池可以获取返回值
'''基本用法'''
exector=ThreadPoolExecutor(max_workers=2) #创造最大进程数为2的线程池
task1=exector.submit(get_html,(0.5)) #传参必须这么写,不知道原因
task2=exector.submit(get_html,(0.3))
task3=exector.submit(get_html,(0.4))
print("task3任务已取消:",task3.cancel()) #取消任务(任务必须还未开始执行)
print("task1任务已完成:",task1.done()) #判断任务是否已执行完(立即执行,不会被上面的代码阻塞)
time.sleep(1)
print("task1任务已完成:",task1.done())
print("task1返回值:",task1.result()) #可以获取任务的返回值
print()
'''获取已经完成的task的返回'''
urls=[2,1,3]
all_task=[exector.submit(get_html,(i)) for i in urls]
wait(all_task) #等待某个任务执行完成,必须传iterable
print("main")
for i in as_completed(all_task):
res=i.result()
print("返回值为:",res)
print()
'''通过executor获取已经完成的task的返回'''
for i in exector.map(get_html,urls):
print("返回值为:", i)
print()
'''with'''
def fib(n):
if n<2:
return 1
return fib(n-1)+fib(n-2)
with ThreadPoolExecutor(3) as exector:
all_task=[exector.submit(fib,(num)) for num in range(25,35)]
start_time=time.time()
for i in as_completed(all_task):
res = i.result()
print("exe result:{}".format(res))
print(time.time()-start_time)
print()
| [
"[email protected]"
] | |
c97c300158c4f94cf5638626ee2d67678df0fbee | 82b946da326148a3c1c1f687f96c0da165bb2c15 | /sdk/python/pulumi_azure_native/network/v20200401/get_virtual_network_gateway_learned_routes.py | dc0c3a5ca849b2e3f6452ce8aa86df4484b07682 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | morrell/pulumi-azure-native | 3916e978382366607f3df0a669f24cb16293ff5e | cd3ba4b9cb08c5e1df7674c1c71695b80e443f08 | refs/heads/master | 2023-06-20T19:37:05.414924 | 2021-07-19T20:57:53 | 2021-07-19T20:57:53 | 387,815,163 | 0 | 0 | Apache-2.0 | 2021-07-20T14:18:29 | 2021-07-20T14:18:28 | null | UTF-8 | Python | false | false | 2,485 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetVirtualNetworkGatewayLearnedRoutesResult',
'AwaitableGetVirtualNetworkGatewayLearnedRoutesResult',
'get_virtual_network_gateway_learned_routes',
]
@pulumi.output_type
class GetVirtualNetworkGatewayLearnedRoutesResult:
"""
List of virtual network gateway routes.
"""
def __init__(__self__, value=None):
if value and not isinstance(value, list):
raise TypeError("Expected argument 'value' to be a list")
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def value(self) -> Optional[Sequence['outputs.GatewayRouteResponse']]:
"""
List of gateway routes.
"""
return pulumi.get(self, "value")
class AwaitableGetVirtualNetworkGatewayLearnedRoutesResult(GetVirtualNetworkGatewayLearnedRoutesResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetVirtualNetworkGatewayLearnedRoutesResult(
value=self.value)
def get_virtual_network_gateway_learned_routes(resource_group_name: Optional[str] = None,
virtual_network_gateway_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetVirtualNetworkGatewayLearnedRoutesResult:
"""
List of virtual network gateway routes.
:param str resource_group_name: The name of the resource group.
:param str virtual_network_gateway_name: The name of the virtual network gateway.
"""
__args__ = dict()
__args__['resourceGroupName'] = resource_group_name
__args__['virtualNetworkGatewayName'] = virtual_network_gateway_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:network/v20200401:getVirtualNetworkGatewayLearnedRoutes', __args__, opts=opts, typ=GetVirtualNetworkGatewayLearnedRoutesResult).value
return AwaitableGetVirtualNetworkGatewayLearnedRoutesResult(
value=__ret__.value)
| [
"[email protected]"
] | |
c52d224e402e87d8d7bbd7f8372abf8d931c4167 | 02338bb8111fc1aa88e830ac09a11664720eb2d4 | /tmp/azure_rm_jobversion.py | 544592c6f9d2166c6ff192b0e8a5c5bd17aecd3a | [] | no_license | Fred-sun/fred_yaml | a49977b0e8505c7447df23dd80c7fef1be70e6bc | 295ca4cd2b59b8d2758f06eb7fd79920327ea524 | refs/heads/master | 2023-04-28T05:51:56.599488 | 2023-04-25T13:52:10 | 2023-04-25T13:52:10 | 131,376,340 | 0 | 1 | null | 2020-07-06T14:22:46 | 2018-04-28T05:34:49 | TSQL | UTF-8 | Python | false | false | 7,350 | py | #!/usr/bin/python
#
# Copyright (c) 2020 GuopengLin, (@t-glin)
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_jobversion
version_added: '2.9'
short_description: Manage Azure JobVersion instance.
description:
- 'Create, update and delete instance of Azure JobVersion.'
options:
resource_group_name:
description:
- >-
The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
required: true
type: str
server_name:
description:
- The name of the server.
required: true
type: str
job_agent_name:
description:
- The name of the job agent.
required: true
type: str
job_name:
description:
- The name of the job.
required: true
type: str
job_version:
description:
- The version of the job to get.
required: true
type: integer
state:
description:
- Assert the state of the JobVersion.
- >-
Use C(present) to create or update an JobVersion and C(absent) to delete
it.
default: present
choices:
- absent
- present
extends_documentation_fragment:
- azure
author:
- GuopengLin (@t-glin)
'''
EXAMPLES = '''
'''
RETURN = '''
id:
description:
- Resource ID.
returned: always
type: str
sample: null
name:
description:
- Resource name.
returned: always
type: str
sample: null
type:
description:
- Resource type.
returned: always
type: str
sample: null
'''
import time
import json
import re
from ansible.module_utils.azure_rm_common_ext import AzureRMModuleBaseExt
from copy import deepcopy
try:
from msrestazure.azure_exceptions import CloudError
from azure.mgmt.sql import SqlManagementClient
from msrestazure.azure_operation import AzureOperationPoller
from msrest.polling import LROPoller
except ImportError:
# This is handled in azure_rm_common
pass
class Actions:
NoAction, Create, Update, Delete = range(4)
class AzureRMJobVersion(AzureRMModuleBaseExt):
def __init__(self):
self.module_arg_spec = dict(
resource_group_name=dict(
type='str',
required=True
),
server_name=dict(
type='str',
required=True
),
job_agent_name=dict(
type='str',
required=True
),
job_name=dict(
type='str',
required=True
),
job_version=dict(
type='integer',
required=True
),
state=dict(
type='str',
default='present',
choices=['present', 'absent']
)
)
self.resource_group_name = None
self.server_name = None
self.job_agent_name = None
self.job_name = None
self.job_version = None
self.body = {}
self.results = dict(changed=False)
self.mgmt_client = None
self.state = None
self.to_do = Actions.NoAction
super(AzureRMJobVersion, self).__init__(derived_arg_spec=self.module_arg_spec,
supports_check_mode=True,
supports_tags=True)
def exec_module(self, **kwargs):
for key in list(self.module_arg_spec.keys()):
if hasattr(self, key):
setattr(self, key, kwargs[key])
elif kwargs[key] is not None:
self.body[key] = kwargs[key]
self.inflate_parameters(self.module_arg_spec, self.body, 0)
old_response = None
response = None
self.mgmt_client = self.get_mgmt_svc_client(SqlManagementClient,
base_url=self._cloud_environment.endpoints.resource_manager,
api_version='2017-03-01-preview')
old_response = self.get_resource()
if not old_response:
if self.state == 'present':
self.to_do = Actions.Create
else:
if self.state == 'absent':
self.to_do = Actions.Delete
else:
modifiers = {}
self.create_compare_modifiers(self.module_arg_spec, '', modifiers)
self.results['modifiers'] = modifiers
self.results['compare'] = []
if not self.default_compare(modifiers, self.body, old_response, '', self.results):
self.to_do = Actions.Update
if (self.to_do == Actions.Create) or (self.to_do == Actions.Update):
self.results['changed'] = True
if self.check_mode:
return self.results
response = self.create_update_resource()
elif self.to_do == Actions.Delete:
self.results['changed'] = True
if self.check_mode:
return self.results
self.delete_resource()
else:
self.results['changed'] = False
response = old_response
return self.results
def create_update_resource(self):
try:
if self.to_do == Actions.Create:
response = self.mgmt_client.job_versions.create()
else:
response = self.mgmt_client.job_versions.update()
if isinstance(response, AzureOperationPoller) or isinstance(response, LROPoller):
response = self.get_poller_result(response)
except CloudError as exc:
self.log('Error attempting to create the JobVersion instance.')
self.fail('Error creating the JobVersion instance: {0}'.format(str(exc)))
return response.as_dict()
def delete_resource(self):
try:
response = self.mgmt_client.job_versions.delete()
except CloudError as e:
self.log('Error attempting to delete the JobVersion instance.')
self.fail('Error deleting the JobVersion instance: {0}'.format(str(e)))
return True
def get_resource(self):
try:
response = self.mgmt_client.job_versions.get(resource_group_name=self.resource_group_name,
server_name=self.server_name,
job_agent_name=self.job_agent_name,
job_name=self.job_name,
job_version=self.job_version)
except CloudError as e:
return False
return response.as_dict()
def main():
AzureRMJobVersion()
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
d4160e38a5fe5b321cdff170039b55d5691b1787 | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_2692487_0/Python/Sibi/osmos.py | 358f3037cb7d86476c1f1042cd07a622eda406f5 | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 1,084 | py | from math import log
def solve(mySize,sizes):
if len(sizes) == 0:
return 0
while sizes[0] < mySize:
mySize = mySize + sizes.pop(0)
if len(sizes) == 0:
return 0
if sizes[0] < 2*mySize-1:
return 1+solve(2*mySize-1,sizes)
for insertions in range(1,100):
if mySize*(2**insertions)-(2**insertions)+1 > sizes[0]:
break
#insertions = log((sizes[0]-1.0)/(mySize-1.0))/log(2.0)
#insertions = int(insertions)
if insertions >= len(sizes):
return len(sizes)
else:
return min(len(sizes),insertions+solve(mySize*2**insertions-2**insertions+1,sizes))
iFile = open("A-small-attempt2.in","r")
oFile = open("output.txt","w")
cases = int(iFile.readline().strip())
for i in range(cases):
line1 = [int(a) for a in iFile.readline().strip().split()]
mySize = line1[0]
sizes = [int(a) for a in iFile.readline().strip().split()]
sizes.sort()
if mySize == 1:
minSolution = len(sizes)
else:
minSolution = solve(mySize,sizes)
output = str(minSolution)
oFile.write("Case #"+str(i+1)+": "+output+"\n")
| [
"[email protected]"
] | |
16b9d3fbc328cf5c773764022eb697014b9ad4bb | ccc1bb07a8fbf0f7ef7dc6f8fa5ce4d215dba847 | /Scripts/sims4communitylib/utils/sims/common_age_utils.py | ea831e676a7ea2d6b81436c44be87b3c2bc0888c | [
"CC-BY-4.0"
] | permissive | JaidenBettencourt/Sims4CommunityLibrary | 9ab9b6b2173c4cbf0813a774261ff617d556f109 | 6818fee38499b66748d3118037b6dbfe17fb571a | refs/heads/master | 2022-12-28T04:09:34.034010 | 2020-09-29T20:34:01 | 2020-09-29T20:34:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,786 | py | """
The Sims 4 Community Library is licensed under the Creative Commons Attribution 4.0 International public license (CC BY 4.0).
https://creativecommons.org/licenses/by/4.0/
https://creativecommons.org/licenses/by/4.0/legalcode
Copyright (c) COLONOLNUTTY
"""
from pprint import pformat
from typing import Union
from sims.sim_info import SimInfo
from sims.sim_info_types import Age
from sims4communitylib.exceptions.common_exceptions_handler import CommonExceptionHandler
from sims4communitylib.modinfo import ModInfo
class CommonAgeUtils:
"""Utilities for manipulating Ages of Sims.
"""
@staticmethod
@CommonExceptionHandler.catch_exceptions(ModInfo.get_identity(), fallback_return=None)
def get_age(sim_info: SimInfo) -> Union[Age, None]:
"""get_age(sim_info)
Retrieve the Age of a Sim.
:param sim_info: The Sim to get the Age of.
:type sim_info: SimInfo
:return: The Age of the Sim or None if a problem occurs.
:rtype: Union[Age, None]
"""
if sim_info is None:
return None
if hasattr(sim_info, '_base') and hasattr(sim_info._base, 'age'):
return sim_info._base.age
if hasattr(sim_info, 'age'):
# noinspection PyPropertyAccess
return sim_info.age
if hasattr(sim_info, 'sim_info') and hasattr(sim_info.sim_info, '_base') and hasattr(sim_info.sim_info._base, 'age'):
return sim_info.sim_info._base.age
if hasattr(sim_info, 'sim_info') and hasattr(sim_info.sim_info, 'age'):
return sim_info.sim_info.age
return None
@staticmethod
def set_age(sim_info: SimInfo, age: Union[Age, int]) -> bool:
"""set_age(sim_info, age)
Set the Age of a Sim.
:param sim_info: The Sim to set the Age of.
:type sim_info: SimInfo
:param age: The Age to set the Sim to.
:type age: Union[Age, int]
:return: True, if the Age was set successfully. False, if not.
:rtype: bool
"""
try:
sim_info.apply_age(age)
return True
except Exception as ex:
CommonExceptionHandler.log_exception(ModInfo.get_identity(), 'Failed to set age of sim {} to {}.'.format(pformat(sim_info), age), exception=ex)
return False
@staticmethod
def are_same_age(sim_info: SimInfo, other_sim_info: SimInfo) -> bool:
"""are_same_age(sim_info, other_sim_info)
Determine if two Sims are the same Age.
:param sim_info: The Sim to check.
:type sim_info: SimInfo
:param other_sim_info: The other Sim to compare to.
:type other_sim_info: SimInfo
:return: True, if both Sims are the same Age.
:rtype: bool
"""
return CommonAgeUtils.get_age(sim_info) == CommonAgeUtils.get_age(other_sim_info)
@staticmethod
def is_younger_than(sim_info: SimInfo, age: Union[Age, int], or_equal: bool=False) -> bool:
"""is_younger_than(sim_info, age, or_equal=False)
Determine if a Sim is younger than the specified Age.
:param sim_info: The Sim to check.
:type sim_info: SimInfo
:param age: The age to check with.
:type age: Union[Age, int]
:param or_equal: If True, the age check will be younger than or equal to. If False, the age check will be younger than.
:type or_equal: bool
:return: True, if the Sim is younger than the specified Age or equal to the specified age if `or_equal` is True. False, if not.
:rtype: bool
"""
sim_age = CommonAgeUtils.get_age(sim_info)
if or_equal:
return sim_age <= age
return sim_age < age
@staticmethod
def is_older_than(sim_info: SimInfo, age: Union[Age, int], or_equal: bool=False) -> bool:
"""is_older_than(sim_info, age, or_equal=False)
Determine if a Sim is older than the specified Age.
:param sim_info: The Sim to check.
:type sim_info: SimInfo
:param age: The age to check with.
:type age: Union[Age, int]
:param or_equal: If True, the age check will be older than or equal to. If False, the Age check will be older than.
:type or_equal: bool
:return: True, if the Sim is older than the specified Age or equal to the specified Age if `or_equal` is True. False, if not.
:rtype: bool
"""
sim_age = CommonAgeUtils.get_age(sim_info)
if or_equal:
return sim_age >= age
return sim_age > age
@staticmethod
def is_baby_age(age: Union[Age, int]) -> bool:
"""is_baby_age(age)
Determine if an Age is a Baby.
:param age: The age to check.
:type age: Union[Age, int]
:return: True, if it is. False, if it is not.
:rtype: bool
"""
return age == Age.BABY
@staticmethod
def is_toddler_age(age: Union[Age, int]) -> bool:
"""is_toddler_age(age)
Determine if an Age is a Toddler.
:param age: The age to check.
:type age: Union[Age, int]
:return: True, if it is. False, if it is not.
:rtype: bool
"""
return age == Age.TODDLER
@staticmethod
def is_child_age(age: Union[Age, int]) -> bool:
"""is_child_age(age)
Determine if an Age is a Child.
:param age: The age to check.
:type age: Union[Age, int]
:return: True, if it is. False, if it is not.
:rtype: bool
"""
return age == Age.CHILD
@staticmethod
def is_teen_age(age: Union[Age, int]) -> bool:
"""is_teen_age(age)
Determine if an Age is a Teen.
:param age: The age to check.
:type age: Union[Age, int]
:return: True, if it is. False, if it is not.
:rtype: bool
"""
return age == Age.TEEN
@staticmethod
def is_adult_age(age: Union[Age, int]) -> bool:
"""is_adult_age(age)
Determine if an Age is a Young Adult or an Adult.
:param age: The age to check.
:type age: Union[Age, int]
:return: True, if it is. False, if it is not.
:rtype: bool
"""
return CommonAgeUtils.is_young_adult_age(age) or CommonAgeUtils.is_mature_adult_age(age)
@staticmethod
def is_young_adult_age(age: Union[Age, int]) -> bool:
"""is_young_adult_age(age)
Determine if an Age is a Young Adult.
:param age: The age to check.
:type age: Union[Age, int]
:return: True, if it is. False, if it is not.
:rtype: bool
"""
return age == Age.YOUNGADULT
@staticmethod
def is_mature_adult_age(age: Union[Age, int]) -> bool:
"""is_mature_adult_age(age)
Determine if an Age is an Adult.
:param age: The age to check.
:type age: Union[Age, int]
:return: True, if it is. False, if it is not.
:rtype: bool
"""
return age == Age.ADULT
@staticmethod
def is_elder_age(age: Union[Age, int]) -> bool:
"""is_elder_age(age)
Determine if an Age is an Elder.
:param age: The age to check.
:type age: Union[Age, int]
:return: True, if it is. False, if it is not.
:rtype: bool
"""
return age == Age.ELDER
@staticmethod
def is_baby_or_toddler_age(age: Age) -> bool:
"""is_baby_or_toddler_age(age)
Determine if an age is Baby or Toddler.
:param age: The age to check.
:type age: Age
:return: True, if it is. False, if it is not.
:rtype: bool
"""
return CommonAgeUtils.is_baby_age(age) or CommonAgeUtils.is_toddler_age(age)
@staticmethod
def is_baby_toddler_or_child_age(age: Age) -> bool:
"""is_baby_toddler_or_child_age(age)
Determine if an age is Baby, Toddler, or Child.
:param age: The age to check.
:type age: Age
:return: True, if it is. False, if it is not.
:rtype: bool
"""
return CommonAgeUtils.is_baby_age(age) or CommonAgeUtils.is_toddler_age(age) or CommonAgeUtils.is_child_age(age)
@staticmethod
def is_toddler_or_child_age(age: Age) -> bool:
"""is_toddler_or_child_age(age)
Determine if an age is Toddler or Child.
:param age: The age to check.
:type age: Age
:return: True, if it is. False, if it is not.
:rtype: bool
"""
return CommonAgeUtils.is_toddler_age(age) or CommonAgeUtils.is_child_age(age)
@staticmethod
def is_child_or_teen_age(age: Age) -> bool:
"""is_child_or_teen_age(age)
Determine if an age is Child or Teen.
:param age: The age to check.
:type age: Age
:return: True, if it is. False, if it is not.
:rtype: bool
"""
return CommonAgeUtils.is_child_age(age) or CommonAgeUtils.is_teen_age(age)
@staticmethod
def is_teen_or_young_adult_age(age: Age) -> bool:
"""is_teen_or_young_adult_age(age)
Determine if an age is Teen or Young Adult.
:param age: The age to check.
:type age: Age
:return: True, if it is. False, if it is not.
:rtype: bool
"""
return CommonAgeUtils.is_teen_age(age) or CommonAgeUtils.is_young_adult_age(age)
@staticmethod
def is_teen_or_adult_age(age: Age) -> bool:
"""is_teen_or_adult_age(age)
Determine if an age is Teen, Young Adult, or Adult.
:param age: The age to check.
:type age: Age
:return: True, if it is. False, if it is not.
:rtype: bool
"""
return CommonAgeUtils.is_teen_age(age) or CommonAgeUtils.is_adult_age(age)
@staticmethod
def is_teen_adult_or_elder_age(age: Age) -> bool:
"""is_teen_adult_or_elder_age(age)
Determine if an age is Teen, Young Adult, Adult, or Elder.
:param age: The age to check.
:type age: Age
:return: True, if it is. False, if it is not.
:rtype: bool
"""
return CommonAgeUtils.is_teen_age(age) or CommonAgeUtils.is_adult_age(age) or CommonAgeUtils.is_elder_age(age)
@staticmethod
def is_adult_or_elder_age(age: Age) -> bool:
"""is_adult_or_elder_age(age)
Determine if an age is Young Adult, Adult, or Elder.
:param age: The age to check.
:type age: Age
:return: True, if it is. False, if it is not.
:rtype: bool
"""
return CommonAgeUtils.is_adult_age(age) or CommonAgeUtils.is_elder_age(age)
@staticmethod
def is_mature_adult_or_elder_age(age: Age) -> bool:
"""is_mature_adult_or_elder_age(age)
Determine if an age is Adult or Elder.
:param age: The age to check.
:type age: Age
:return: True, if it is. False, if it is not.
:rtype: bool
"""
return CommonAgeUtils.is_mature_adult_age(age) or CommonAgeUtils.is_elder_age(age)
@staticmethod
def is_baby(sim_info: SimInfo) -> bool:
"""is_baby(sim_info)
Determine if a sim is a Baby.
:param sim_info: The Sim to check.
:type sim_info: SimInfo
:return: True, if the Sim is. False, if the Sim is not.
:rtype: bool
"""
return CommonAgeUtils.is_baby_age(CommonAgeUtils.get_age(sim_info))
@staticmethod
def is_toddler(sim_info: SimInfo) -> bool:
"""is_toddler(sim_info)
Determine if a sim is a Toddler.
:param sim_info: The Sim to check.
:type sim_info: SimInfo
:return: True, if the Sim is. False, if the Sim is not.
:rtype: bool
"""
return CommonAgeUtils.is_toddler_age(CommonAgeUtils.get_age(sim_info))
@staticmethod
def is_child(sim_info: SimInfo) -> bool:
"""is_child(sim_info)
Determine if a sim is a Child.
:param sim_info: The Sim to check.
:type sim_info: SimInfo
:return: True, if the Sim is. False, if the Sim is not.
:rtype: bool
"""
return CommonAgeUtils.is_child_age(CommonAgeUtils.get_age(sim_info))
@staticmethod
def is_teen(sim_info: SimInfo) -> bool:
"""is_teen(sim_info)
Determine if a sim is a Teen.
:param sim_info: The Sim to check.
:type sim_info: SimInfo
:return: True, if the Sim is. False, if the Sim is not.
:rtype: bool
"""
return CommonAgeUtils.is_teen_age(CommonAgeUtils.get_age(sim_info))
@staticmethod
def is_young_adult(sim_info: SimInfo) -> bool:
"""is_young_adult(sim_info)
Determine if a sim is an Young Adult.
.. note:: This function does not determine whether they are an Adult or not. Use "is_adult" to check for both.
:param sim_info: The Sim to check.
:type sim_info: SimInfo
:return: True, if the Sim is. False, if the Sim is not.
:rtype: bool
"""
return CommonAgeUtils.is_young_adult_age(CommonAgeUtils.get_age(sim_info))
@staticmethod
def is_mature_adult(sim_info: SimInfo) -> bool:
"""is_mature_adult(sim_info)
Determine if a sim is an Adult.
.. note:: This function does not determine whether they are a Young Adult or not. Use 'is_adult' to check for both.
:param sim_info: The Sim to check.
:type sim_info: SimInfo
:return: True, if the Sim is. False, if the Sim is not.
:rtype: bool
"""
return CommonAgeUtils.is_mature_adult_age(CommonAgeUtils.get_age(sim_info))
@staticmethod
def is_elder(sim_info: SimInfo) -> bool:
"""is_elder(sim_info)
Determine if a sim is an Elder.
:param sim_info: The Sim to check.
:type sim_info: SimInfo
:return: True, if the Sim is. False, if the Sim is not.
:rtype: bool
"""
return CommonAgeUtils.is_elder_age(CommonAgeUtils.get_age(sim_info))
@staticmethod
def is_adult(sim_info: SimInfo) -> bool:
"""is_adult(sim_info)
Determine if a sim is either a Young Adult or an Adult.
:param sim_info: The Sim to check.
:type sim_info: SimInfo
:return: True, if the Sim is. False, if the Sim is not.
:rtype: bool
"""
return CommonAgeUtils.is_adult_age(CommonAgeUtils.get_age(sim_info))
@staticmethod
def is_baby_or_toddler(sim_info: SimInfo) -> bool:
"""is_baby_or_toddler(sim_info)
Determine if a sim is a Baby or a Toddler.
:param sim_info: The Sim to check.
:type sim_info: SimInfo
:return: True, if the Sim is. False, if the Sim is not.
:rtype: bool
"""
return CommonAgeUtils.is_baby_or_toddler_age(CommonAgeUtils.get_age(sim_info))
@staticmethod
def is_toddler_or_child(sim_info: SimInfo) -> bool:
"""is_toddler_or_child(sim_info)
Determine if a sim is a Toddler or a Child.
:param sim_info: The Sim to check.
:type sim_info: SimInfo
:return: True, if the Sim is. False, if the Sim is not.
:rtype: bool
"""
return CommonAgeUtils.is_toddler_or_child_age(CommonAgeUtils.get_age(sim_info))
@staticmethod
def is_baby_toddler_or_child(sim_info: SimInfo) -> bool:
"""is_baby_toddler_or_child(sim_info)
Determine if a sim is a Baby, a Toddler, or a Child.
:param sim_info: The Sim to check.
:type sim_info: SimInfo
:return: True, if the Sim is. False, if the Sim is not.
:rtype: bool
"""
return CommonAgeUtils.is_baby_toddler_or_child_age(CommonAgeUtils.get_age(sim_info))
@staticmethod
def is_child_or_teen(sim_info: SimInfo) -> bool:
"""is_child_or_teen(sim_info)
Determine if a sim is a Child or a Teen.
:param sim_info: The Sim to check.
:type sim_info: SimInfo
:return: True, if the Sim is. False, if the Sim is not.
:rtype: bool
"""
return CommonAgeUtils.is_child_or_teen_age(CommonAgeUtils.get_age(sim_info))
@staticmethod
def is_teen_or_young_adult(sim_info: SimInfo) -> bool:
"""is_teen_or_young_adult(sim_info)
Determine if a sim is a Teen or a Young Adult.
:param sim_info: The Sim to check.
:type sim_info: SimInfo
:return: True, if the Sim is. False, if the Sim is not.
:rtype: bool
"""
return CommonAgeUtils.is_teen_or_young_adult_age(CommonAgeUtils.get_age(sim_info))
@staticmethod
def is_teen_or_adult(sim_info: SimInfo) -> bool:
"""is_teen_or_adult(sim_info)
Determine if a sim is a Teen, a Young Adult, or an Adult.
:param sim_info: The Sim to check.
:type sim_info: SimInfo
:return: True, if the Sim is. False, if the Sim is not.
:rtype: bool
"""
return CommonAgeUtils.is_teen_or_adult_age(CommonAgeUtils.get_age(sim_info))
@staticmethod
def is_teen_adult_or_elder(sim_info: SimInfo) -> bool:
"""is_teen_adult_or_elder(sim_info)
Determine if a sim is a Teen, a Young Adult, an Adult, or an Elder.
:param sim_info: The Sim to check.
:type sim_info: SimInfo
:return: True, if the Sim is. False, if the Sim is not.
:rtype: bool
"""
return CommonAgeUtils.is_teen_adult_or_elder_age(CommonAgeUtils.get_age(sim_info))
@staticmethod
def is_adult_or_elder(sim_info: SimInfo) -> bool:
"""is_adult_or_elder(sim_info)
Determine if a sim is a Young Adult, an Adult, or an Elder.
:param sim_info: The Sim to check.
:type sim_info: SimInfo
:return: True, if the Sim is. False, if the Sim is not.
:rtype: bool
"""
return CommonAgeUtils.is_adult_or_elder_age(CommonAgeUtils.get_age(sim_info))
@staticmethod
def is_mature_adult_or_elder(sim_info: SimInfo) -> bool:
"""is_mature_adult_or_elder(sim_info)
Determine if a sim is an Adult or an Elder.
:param sim_info: The Sim to check.
:type sim_info: SimInfo
:return: True, if the Sim is. False, if the Sim is not.
:rtype: bool
"""
return CommonAgeUtils.is_mature_adult_or_elder_age(CommonAgeUtils.get_age(sim_info))
# Obsolete Functionality
@staticmethod
def is_baby_child_or_toddler(sim_info: SimInfo) -> bool:
"""is_baby_child_or_toddler(sim_info)
.. warning:: Obsolete: Don't use this function. Use the :func:'~is_baby_toddler_or_child' function instead.
"""
return CommonAgeUtils.is_baby(sim_info) or CommonAgeUtils.is_toddler(sim_info) or CommonAgeUtils.is_child(sim_info)
| [
"[email protected]"
] | |
4711c9358a365a673a560438a3d01eaa7bc1cdcd | abd9537f8b90a990e195ded5f9fafdcc108d2a48 | /swea/d4/1861/1861_june.py | 8790efff9e44731cc0c47a060c8eb6dc902bcd0a | [] | no_license | ohdnf/algorithms | 127171744631406c1d08cc2583aa569a094fa2cd | 6f286753dab827facc436af4f2130f11dad2d44f | refs/heads/master | 2023-08-09T11:19:56.445351 | 2021-08-31T13:11:46 | 2021-08-31T13:11:46 | 236,180,615 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 856 | py | import sys
sys.stdin = open('input.txt')
sys.stdin = open('n1000.txt')
dx = [0, 1, 0, -1]
dy = [1, 0, -1, 0]
t = int(input())
for test_case in range(1, t+1):
n = int(input())
room = [list(map(int, input().split())) for _ in range(n)]
v = [0] * (n**2 + 1)
for i in range(n):
for j in range(n):
for k in range(4):
nx = i + dx[k]
ny = j + dy[k]
if 0 <= nx < n and 0 <= ny < n and room[i][j] + 1 == room[nx][ny]:
v[room[i][j]] += 1
break
start = 0
move = max_move = 1
for i in range(n*n, -1, -1):
if v[i]:
move += 1
else:
if move >= max_move:
max_move = move
start = i+1
move = 1
print('#{} {} {}'.format(test_case, start, max_move))
| [
"[email protected]"
] | |
e410945fdc90dfec260540d9e96b5aa39d3d487e | 20f951bd927e4e5cde8ef7781813fcf0d51cc3ea | /fossir/modules/bootstrap/blueprint.py | cce0f666235dc8c6007c3badae3b258b1efdbb60 | [] | no_license | HodardCodeclub/SoftwareDevelopment | 60a0fbab045cb1802925d4dd5012d5b030c272e0 | 6300f2fae830c0c2c73fe0afd9c684383bce63e5 | refs/heads/master | 2021-01-20T00:30:02.800383 | 2018-04-27T09:28:25 | 2018-04-27T09:28:25 | 101,277,325 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 349 | py |
from __future__ import unicode_literals
from fossir.modules.bootstrap.controllers import RHBootstrap
from fossir.web.flask.wrappers import fossirBlueprint
_bp = fossirBlueprint('bootstrap', __name__, template_folder='templates', virtual_template_folder='bootstrap')
_bp.add_url_rule('/bootstrap', 'index', RHBootstrap, methods=('GET', 'POST'))
| [
"[email protected]"
] | |
856bafa4536d68bf54de8ad934805089bf2d0897 | 0bce7412d58675d6cc410fa7a81c294ede72154e | /Python3/0303. Range Sum Query - Immutable.py | 4d1cb2c5dbaf4b75edcda2762e7d6aa9aa227e01 | [] | no_license | yang4978/LeetCode | 9ddf010b0f1dda32cddc7e94c3f987509dea3214 | 6387d05b619d403414bad273fc3a7a2c58668db7 | refs/heads/master | 2022-01-15T04:21:54.739812 | 2021-12-28T12:28:28 | 2021-12-28T12:28:28 | 182,653,666 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 361 | py | class NumArray:
def __init__(self, nums: List[int]):
self.temp = [0]
for i in nums:
self.temp.append(self.temp[-1]+i)
def sumRange(self, i: int, j: int) -> int:
return self.temp[j+1]-self.temp[i]
# Your NumArray object will be instantiated and called as such:
# obj = NumArray(nums)
# param_1 = obj.sumRange(i,j)
| [
"[email protected]"
] | |
d956771952eed80b045a0369c29b226eac1ad680 | a9f3ce38fa6bac4317da099788e5e58dadade2a3 | /karlooper/web/application.py | fd6a11591d410cabc640f67767af37e63bc7874a | [] | no_license | MoiraJune/karlooper | 2763e40b52fc37f81d23ba2766b4c392242402e6 | 89ef82cb96070360ebee2bcb398c972b1aef4e58 | refs/heads/master | 2020-06-25T10:19:24.405902 | 2017-08-04T11:09:39 | 2017-08-04T11:09:39 | 96,974,844 | 1 | 0 | null | 2017-08-04T09:05:17 | 2017-07-12T06:49:20 | Python | UTF-8 | Python | false | false | 24,623 | py | # -*-coding:utf-8-*-
"""
application
~~~~~~~~~~~
Use this model to initialize web application.
Usage
=====
>>> from karlooper.web import IOModel
>>> from karlooper.web.application import Application
>>> application = Application(handlers={}, settings={}, port=8080, log_conf="./config.log")
>>> application.run(io_model=IOModel.POLL)
server run on port: 8080
run with poll
>>> application = Application(handlers={}, settings={}, log_conf="./config.log")
>>> application.listen(8000)
>>> application.run(io_model=IOModel.POLL)
server run on port: 8000
run with poll
"""
import socket
import select
from karlooper.logger.logger import init_logger
from karlooper.web import IOModel
from karlooper.web.__async_core_server import EchoServer, asyncore
from karlooper.web.http_connection import HttpConnection
from karlooper.web.http_io_buffer import HttpIOBuffer
from karlooper.web.http_io_routine_pool import HttpIORoutinePool
from karlooper.http_parser.http_parser import HttpParser
from karlooper.config import get_cli_data, set_cli_data
from karlooper.config.config import SOCKET_RECEIVE_SIZE, DEFAULT_PORT, CLIENT_CONNECT_TO_SERVER_NUM
__author__ = '[email protected]'
class Application(object):
def __init__(self, handlers, settings=None, **kwargs):
"""
:param handlers: handlers mapping, dict type
:param settings: settings mapping, dict type
:param kwargs: options
"""
self.settings = settings
self.handlers = handlers
set_cli_data(self.settings)
set_cli_data(kwargs)
cli_data = get_cli_data()
self.port = int(cli_data.get("port", DEFAULT_PORT))
log_conf = self.settings.get("log_conf", None) if self.settings else kwargs.get("log_conf", None)
self.logger = init_logger(config_path=log_conf)
self.EOL1 = b'\n\n'
self.EOL2 = b'\n\r\n'
self.response = ""
def listen(self, port):
"""listen port
:param port: port that application listened
:return: None
"""
self.port = int(port)
def __run_epoll(self):
"""
run the application use epoll
"""
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server_socket.bind(('0.0.0.0', self.port))
server_socket.listen(CLIENT_CONNECT_TO_SERVER_NUM) # the number of client that connect to server
server_socket.setblocking(0) # set 0 not block other block
server_socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
epoll = select.epoll()
epoll.register(server_socket.fileno(), select.EPOLLIN)
try:
http_connection = HttpConnection()
http_io_buffer = HttpIOBuffer()
http_io_routine_pool = HttpIORoutinePool()
events_buf = []
while True:
events = epoll.poll(1) + events_buf
events_buf = []
for fileno, event in events:
try:
if fileno == server_socket.fileno(): # if request come
connection, address = server_socket.accept() # waiting income connection
connection.setblocking(0) # none block
epoll.register(connection.fileno(), select.EPOLLIN) # register socket read event to epoll
http_connection.add_connection(connection.fileno(), connection)
http_io_buffer.add_request(connection.fileno(), b'')
http_io_buffer.add_response(connection.fileno(), self.response)
elif event & select.EPOLLIN: # when data in os's read buffer area
http_parser = http_io_routine_pool.get(file_no=fileno)
if http_parser:
data = http_parser.parse()
if isinstance(data, str) or isinstance(data, unicode):
http_io_buffer.add_response(
fileno,
http_io_buffer.get_response(fileno) + data
)
epoll.modify(fileno, select.EPOLLOUT) # change file number to epoll out mode
http_io_routine_pool.remove(fileno)
else: # if coroutine
http_io_routine_pool.add(fileno, http_parser)
events_buf.append((fileno, event))
else:
http_request_buffer = http_connection.get_connection(fileno).recv(SOCKET_RECEIVE_SIZE)
http_io_buffer.add_request(
fileno,
http_io_buffer.get_request(fileno) + http_request_buffer
)
if self.EOL1 in http_io_buffer.get_request(fileno) \
or self.EOL2 in http_io_buffer.get_request(fileno):
request_data = http_io_buffer.get_request(fileno)[:-2] \
if http_io_buffer.get_request(fileno).endswith("\r\n") \
else http_io_buffer.get_request(fileno)
http_parser = HttpParser(
request_data,
self.handlers,
settings=self.settings
)
data = http_parser.parse()
if isinstance(data, str) or isinstance(data, unicode):
http_io_buffer.add_response(
fileno,
http_io_buffer.get_response(fileno) + data
)
epoll.modify(fileno, select.EPOLLOUT) # change file number to epoll out mode
http_io_routine_pool.remove(fileno)
else: # if coroutine
http_io_routine_pool.add(fileno, http_parser)
events_buf.append((fileno, event))
else:
self.logger.error("connection error in __run_epoll: %s", str(e))
http_connection.remove_connection(fileno)
http_io_buffer.remove_request(fileno)
http_io_buffer.remove_response(fileno)
http_io_routine_pool.remove(fileno)
epoll.unregister(fileno)
elif event & select.EPOLLOUT: # if out mode
bytes_written = http_connection.get_connection(fileno).send(
http_io_buffer.get_response(fileno)
)
http_io_buffer.add_response(fileno, http_io_buffer.get_response(fileno)[bytes_written:])
if len(http_io_buffer.get_response(fileno)) == 0: # if file sent
http_connection.get_connection(fileno).shutdown(socket.SHUT_RDWR)
epoll.modify(fileno, select.EPOLLHUP)
elif event & select.EPOLLHUP: # if message sent and file number in epoll is hup
epoll.unregister(fileno) # remove file number from epoll
http_connection.get_connection(fileno).close() # close connection
http_connection.remove_connection(fileno) # delete connection from connections dict
except Exception as e:
self.logger.info("error in __run_epoll: %s", str(e))
http_connection.remove_connection(fileno)
http_io_buffer.remove_request(fileno)
http_io_buffer.remove_response(fileno)
http_io_routine_pool.remove(fileno)
self.logger.info("fileno is: %s", str(fileno))
epoll.close()
epoll = select.epoll()
epoll.register(server_socket.fileno(), select.EPOLLIN)
finally:
epoll.unregister(server_socket.fileno())
epoll.close()
server_socket.close()
def __run_kqueue(self):
"""
run server use kqueue
"""
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_socket.bind(('0.0.0.0', self.port))
server_socket.listen(CLIENT_CONNECT_TO_SERVER_NUM)
kq = select.kqueue()
http_connection = HttpConnection()
http_io_buffer = HttpIOBuffer()
http_io_routine_pool = HttpIORoutinePool()
index = 1
events = [
select.kevent(server_socket.fileno(), select.KQ_FILTER_READ, select.KQ_EV_ADD),
select.kevent(server_socket.fileno(), select.KQ_FILTER_WRITE, select.KQ_EV_ADD)
]
events_buf = []
while True:
try:
event_list = kq.control(events, 128, 0.0001) + events_buf
events_buf = []
except select.error as e:
self.logger.error("error in __run_kqueue: %s", str(e))
break
if event_list:
for each in event_list:
if each.ident == server_socket.fileno():
index += 1
conn, addr = server_socket.accept()
http_connection.add_connection(index, conn)
events.append(
select.kevent(
http_connection.get_connection(index).fileno(),
select.KQ_FILTER_READ,
select.KQ_EV_ADD,
udata=index
)
)
else:
try:
if each.udata >= 1 and each.filter == select.KQ_FILTER_READ:
http_parser = http_io_routine_pool.get(file_no=each.udata)
if http_parser:
data = http_parser.parse()
if isinstance(data, str) or isinstance(data, unicode):
http_io_routine_pool.remove(each.udata)
http_io_buffer.add_response(each.udata, data)
events.append(
select.kevent(
http_connection.get_connection(each.udata).fileno(),
select.KQ_FILTER_WRITE,
select.KQ_EV_ADD,
udata=each.udata
)
)
events.remove(select.kevent(
http_connection.get_connection(each.udata).fileno(),
select.KQ_FILTER_READ,
select.KQ_EV_ADD,
udata=each.udata)
)
else: # if coroutine
http_io_routine_pool.add(each.udata, http_parser)
events_buf.append(each)
else:
conn = http_connection.get_connection(each.udata)
request_data = conn.recv(SOCKET_RECEIVE_SIZE)
request_data = request_data[:-2] if request_data.endswith("\r\n") else request_data
http_parser = HttpParser(
request_data,
handlers=self.handlers,
settings=self.settings
)
data = http_parser.parse()
if isinstance(data, str) or isinstance(data, unicode):
http_io_buffer.add_response(each.udata, data)
events.append(
select.kevent(
http_connection.get_connection(each.udata).fileno(),
select.KQ_FILTER_WRITE,
select.KQ_EV_ADD,
udata=each.udata
)
)
events.remove(select.kevent(
http_connection.get_connection(each.udata).fileno(),
select.KQ_FILTER_READ,
select.KQ_EV_ADD,
udata=each.udata)
)
else: # if coroutine
http_io_routine_pool.add(each.udata, http_parser)
events_buf.append(each)
elif each.udata >= 1 and each.filter == select.KQ_FILTER_WRITE:
conn = http_connection.get_connection(each.udata)
data = http_io_buffer.get_response(each.udata)
conn.send(data)
events.remove(select.kevent(
http_connection.get_connection(each.udata).fileno(),
select.KQ_FILTER_WRITE,
select.KQ_EV_ADD,
udata=each.udata)
)
conn.close()
http_connection.remove_connection(each.udata)
except Exception as e:
self.logger.info("error in __run_kqueue event list: %s", str(e))
self.logger.info("each filter: %s", each.filter)
self.__remove_event(events, each)
http_connection.remove_connection(each.udata)
http_io_buffer.remove_request(each.udata)
http_io_buffer.remove_response(each.udata)
http_io_routine_pool.remove(each.udata)
kq.close()
kq = select.kqueue()
server_socket.close()
def __run_poll(self):
"""
run server use poll, I will modify __run_poll and __run_epoll in the future
"""
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server_socket.bind(('0.0.0.0', self.port))
server_socket.listen(CLIENT_CONNECT_TO_SERVER_NUM) # the number of client that connect to server
server_socket.setblocking(0) # set 0 not block other block
server_socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
poll = select.poll()
poll.register(server_socket.fileno(), select.POLLIN)
try:
http_connection = HttpConnection()
http_io_buffer = HttpIOBuffer()
http_io_routine_pool = HttpIORoutinePool()
events_buf = []
while True:
events = poll.poll(1) + events_buf
events_buf = []
for fileno, event in events:
try:
if fileno == server_socket.fileno(): # if request come
connection, address = server_socket.accept() # waiting income connection
connection.setblocking(0) # none block
poll.register(connection.fileno(), select.POLLIN) # register socket read event to poll
http_connection.add_connection(connection.fileno(), connection)
http_io_buffer.add_request(connection.fileno(), b'')
http_io_buffer.add_response(connection.fileno(), self.response)
elif event & select.POLLIN: # when data in os's read buffer area
http_parser = http_io_routine_pool.get(file_no=fileno)
if http_parser:
data = http_parser.parse()
if isinstance(data, str) or isinstance(data, unicode):
http_io_buffer.add_response(
fileno,
http_io_buffer.get_response(fileno) + data
)
poll.modify(fileno, select.POLLOUT) # change file number to epoll out mode
http_io_routine_pool.remove(fileno)
else: # if coroutine
http_io_routine_pool.add(fileno, http_parser)
events_buf.append((fileno, event))
else:
http_request_buffer = http_connection.get_connection(fileno).recv(SOCKET_RECEIVE_SIZE)
http_io_buffer.add_request(
fileno,
http_io_buffer.get_request(fileno) + http_request_buffer
)
if self.EOL1 in http_io_buffer.get_request(fileno) \
or self.EOL2 in http_io_buffer.get_request(fileno):
request_data = http_io_buffer.get_request(fileno)[:-2] \
if http_io_buffer.get_request(fileno).endswith("\r\n") \
else http_io_buffer.get_request(fileno)
http_parser = HttpParser(
request_data,
self.handlers,
settings=self.settings
)
data = http_parser.parse()
if isinstance(data, str) or isinstance(data, unicode):
http_io_buffer.add_response(
fileno,
http_io_buffer.get_response(fileno) + data
)
poll.modify(fileno, select.POLLOUT) # change file number to epoll out mode
http_io_routine_pool.remove(fileno)
else: # if coroutine
http_io_routine_pool.add(fileno, http_parser)
events_buf.append((fileno, event))
else:
self.logger.error("connection error in __run_epoll: %s", str(e))
http_connection.remove_connection(fileno)
http_io_buffer.remove_request(fileno)
http_io_buffer.remove_response(fileno)
http_io_routine_pool.remove(fileno)
poll.unregister(fileno)
elif event & select.POLLOUT: # if out mode
bytes_written = http_connection.get_connection(fileno).send(
http_io_buffer.get_response(fileno)
)
http_io_buffer.add_response(fileno, http_io_buffer.get_response(fileno)[bytes_written:])
if len(http_io_buffer.get_response(fileno)) == 0: # if file sent
http_connection.get_connection(fileno).shutdown(socket.SHUT_RDWR)
poll.modify(fileno, select.POLLHUP)
elif event & select.POLLHUP: # if message sent and file number in poll is hup
poll.unregister(fileno) # remove file number from poll
http_connection.get_connection(fileno).close() # close connection
http_connection.remove_connection(fileno) # delete connection from connections dict
except Exception as e:
self.logger.info("error in __run_poll: %s", str(e))
http_connection.remove_connection(fileno)
http_io_buffer.remove_request(fileno)
http_io_buffer.remove_response(fileno)
http_io_routine_pool.remove(fileno)
self.logger.info("fileno is: %s", str(fileno))
poll.unregister(fileno)
finally:
poll.unregister(server_socket.fileno())
poll.close()
server_socket.close()
def __run_async_io(self):
"""
run server use asyncore
"""
EchoServer('0.0.0.0', self.port, self.handlers, self.settings)
asyncore.loop()
def __remove_event(self, events, each):
"""remove event from events
:param events: the list contain some events
:param each: the event will be removed
:return: None
"""
self.logger.warning("remove event with udata: %s", str(each.udata))
for event in events:
if event.ident == each.ident:
events.remove(event)
break
def run(self, io_model=None):
"""run the web server
:param io_model: os io model, EPOLL 0 KQUEUE 1 POLL 2
:return: None
"""
print("server run on port: %d" % self.port)
self.logger.info("server run on port: %d" % self.port)
if io_model:
if io_model == IOModel.EPOLL and hasattr(select, "epoll"):
print("run with epoll")
self.logger.info("run with epoll")
self.__run_epoll()
elif io_model == IOModel.KQUEUE and hasattr(select, "kqueue"):
print("run with kqueue")
self.logger.info("run with kqueue")
self.__run_kqueue()
elif io_model == IOModel.POLL and hasattr(select, "poll"):
print("run with poll")
self.logger.info("run with poll")
self.__run_poll()
else:
if hasattr(select, "epoll"):
print("run with epoll")
self.logger.info("run with epoll")
self.__run_epoll()
elif hasattr(select, "kqueue"):
print("run with kqueue")
self.logger.info("run with kqueue")
self.__run_kqueue()
elif hasattr(select, "poll"):
print("run with poll")
self.logger.info("run with poll")
self.__run_poll()
else:
print("run with asyncore")
self.logger.info("run with asyncore")
self.__run_async_io()
print("server start failed!")
self.logger.info("server start failed!")
| [
"[email protected]"
] | |
b93689c0be7a720edd4a7d4908073df64f921dc6 | 29a4c1e436bc90deaaf7711e468154597fc379b7 | /modules/ieee/doc/ulpdist.py | ac366398b04284aadd5b1cb7da038ae261d73daf | [
"BSL-1.0"
] | permissive | brycelelbach/nt2 | 31bdde2338ebcaa24bb76f542bd0778a620f8e7c | 73d7e8dd390fa4c8d251c6451acdae65def70e0b | refs/heads/master | 2021-01-17T12:41:35.021457 | 2011-04-03T17:37:15 | 2011-04-03T17:37:15 | 1,263,345 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,870 | py | [ ## this file was manually modified by jt
{
'functor' : {
'arity' : '2',
'call_types' : [],
'ret_arity' : '0',
'rturn' : {
'default' : 'typename boost::result_of<nt2::meta::arithmetic(T)>::type',
},
'simd_types' : ['real_'],
'type_defs' : [],
'types' : ['real_', 'unsigned_int_', 'signed_int_'],
},
'info' : 'manually modified',
'unit' : {
'global_header' : {
'first_stamp' : 'modified by jt the 04/12/2010',
'included' : [],
'no_ulp' : 'True',
'notes' : [],
'stamp' : 'modified by jt the 12/12/2010',
},
'ranges' : {
'real_' : [['T(-10)', 'T(10)'], ['T(-10)', 'T(10)']],
'signed_int_' : [['-100', '100'], ['-100', '100']],
'unsigned_int_' : [['0', '100'], ['0', '100']],
},
'specific_values' : {
'default' : {
},
'real_' : {
'nt2::Inf<T>()' : 'nt2::Zero<r_t>()',
'nt2::Minf<T>()' : 'nt2::Zero<r_t>()',
'nt2::Mone<T>()' : 'nt2::Zero<r_t>()',
'nt2::Nan<T>()' : 'nt2::Zero<r_t>()',
'nt2::One<T>()' : 'nt2::Zero<r_t>()',
'nt2::Zero<T>()' : 'nt2::Zero<r_t>()',
},
'signed_int_' : {
'nt2::Mone<T>()' : 'nt2::Zero<r_t>()',
'nt2::One<T>()' : 'nt2::Zero<r_t>()',
'nt2::Zero<T>()' : 'nt2::Zero<r_t>()',
},
'unsigned_int_' : {
'nt2::One<T>()' : 'nt2::Zero<r_t>()',
'nt2::Zero<T>()' : 'nt2::Zero<r_t>()',
},
},
'verif_test' : {
},
},
'version' : '0.1',
},
] | [
"[email protected]"
] | |
91e45e105497e90a01f63258bc61dd9638245813 | 54f352a242a8ad6ff5516703e91da61e08d9a9e6 | /Source Codes/AtCoder/abc121/B/4971858.py | 85a9a317171327639376a274c3b93a09195fc306 | [] | no_license | Kawser-nerd/CLCDSA | 5cbd8a4c3f65173e4e8e0d7ed845574c4770c3eb | aee32551795763b54acb26856ab239370cac4e75 | refs/heads/master | 2022-02-09T11:08:56.588303 | 2022-01-26T18:53:40 | 2022-01-26T18:53:40 | 211,783,197 | 23 | 9 | null | null | null | null | UTF-8 | Python | false | false | 284 | py | N,M,C=list(map(int,input().split()))
B=list(map(int,input().split()))
a=[]
for i in range(N):
a.append(list(map(int,input().split())))
cnt=0
for k in range(N):
sum=0
for j in range(M):
sum+=a[k][j]*B[j]
if sum+C > 0:
cnt+=1
print(cnt) | [
"[email protected]"
] | |
7871e98f8480ddaec7ab0d3d64ff3ecdf9d6e751 | 4ca44b7bdb470fcbbd60c2868706dbd42b1984c9 | /20.11.23/백준_1018.py | 125a30305d44671fe2572d9e6f4d2a4da5de720e | [] | no_license | titiman1013/Algorithm | 3b3d14b3e2f0cbc4859029eb73ad959ec8778629 | 8a67e36931c42422779a4c90859b665ee468255b | refs/heads/master | 2023-06-29T17:04:40.015311 | 2021-07-06T01:37:29 | 2021-07-06T01:37:29 | 242,510,483 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,138 | py | import sys; sys.stdin = open('text3.txt', 'r')
# def check(x, y):
# temp = 0
# color = ''
# for p in range(8):
# for q in range(8):
# if p == 0 and q == 0:
# color = arr[x + p][y + q]
# continue
# if arr[x + p][y + q] == color:
# temp += 1
# if color == 'W':
# color = 'B'
# else:
# color = 'W'
# if temp > res:
# return False
# else:
# color = arr[x + p][y + q]
# continue
# if color == 'W':
# color = 'B'
# else:
# color = 'W'
# return temp
def check(x, y, color):
temp = 0
for p in range(8):
for q in range(8):
if p == 0 and q == 0:
continue
if arr[x + p][y + q] == color:
temp += 1
if color == 'W':
color = 'B'
else:
color = 'W'
if temp > res:
return False
else:
color = arr[x + p][y + q]
continue
if color == 'W':
color = 'B'
else:
color = 'W'
return temp
for tc in range(6):
N, M = map(int, input().split())
arr = [list(input()) for _ in range(N)]
res = 10000000000
for i in range(N - 8 + 1):
for j in range(M - 8 + 1):
ischeck = check(i, j, 'W')
if ischeck == False:
pass
else:
if ischeck < res:
if arr[i][j] == 'W':
res = ischeck
else:
res = ischeck + 1
ischeck2 = check(i, j, 'B')
if ischeck2 == False:
continue
else:
if ischeck2 < res:
if arr[i][j] == 'B':
res = ischeck2
else:
res = ischeck2 + 1
print(tc, res) | [
"[email protected]"
] | |
ed3f0747a03be29e372e99f9cf90afa6a0bcb387 | 19f698ab74cba74ae52c780f5986d273fb319308 | /SWExpertAcademy/D5/1242.py | e82ef68270ba4d0834822e4878c2c2d888764f6f | [] | no_license | naye0ng/Algorithm | 15023f1070eb7cc5faca9cf7154af2ecffab92c2 | 1e8848e3e2574b01dc239212ea084b0a4837bc03 | refs/heads/master | 2021-06-25T14:18:46.117411 | 2020-10-16T10:47:37 | 2020-10-16T10:47:37 | 149,326,399 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,505 | py | """
1242.암호코드 스캔
"""
import sys
sys.stdin = open('input.txt','r')
match = [[3,2,1,1],[2,2,2,1],[2,1,2,2],[1,4,1,1],[1,1,3,2],[1,2,3,1],[1,1,1,4],[1,3,1,2],[1,2,1,3],[3,1,1,2]]
T= int(input())
for test_case in range(1, 1+T):
N, M = map(int, input().split())
# 중복되는 암호문 입력 안받음
empty = str(0)*M
arr = [0]*N
n = -1
for _ in range(N) :
local = input()
if local != empty :
if n== -1 or arr[n] != local :
n+=1
arr[n] = local
n +=1
arr = arr[:n]
# 이진수 변환
for x in range(n) :
arr[x] = arr[x].replace('0', '0000')
arr[x] = arr[x].replace('1', '0001')
arr[x] = arr[x].replace('2', '0010')
arr[x] = arr[x].replace('3', '0011')
arr[x] = arr[x].replace('4', '0100')
arr[x] = arr[x].replace('5', '0101')
arr[x] = arr[x].replace('6', '0110')
arr[x] = arr[x].replace('7', '0111')
arr[x] = arr[x].replace('8', '1000')
arr[x] = arr[x].replace('9', '1001')
arr[x] = arr[x].replace('A', '1010')
arr[x] = arr[x].replace('B', '1011')
arr[x] = arr[x].replace('C', '1100')
arr[x] = arr[x].replace('D', '1101')
arr[x] = arr[x].replace('E', '1110')
arr[x] = arr[x].replace('F', '1111')
patt = []
maxPattern = 0
#암호문 찾기
for x in range(n) :
end, start = 0, 0
for y in range(len(arr[x])-1,-1,-1) :
if end == 0 and arr[x][y] == '1':
end = y+1
elif start == 0 and end != 0 and arr[x][y] == '0' :
start = y+1
# 0이 나오더라도 길이가 부족하면 앞쪽 다시 탐색
if (end - start)%56 :
start = 0
else :
lengthP = (end - start)//56
an = arr[x][start:end]
# 패턴의 유효성 검사, 마지막 글자는 항상 1
# 패턴 유효성 검사, 맨 앞의 '0'은 최대 lengthP만큼
is_pattern = True
for i in range(0,len(an),7*lengthP) :
if '1' in an[i :i+lengthP] or an[i+lengthP*7-1] !='1' :
is_pattern = False
break
if is_pattern :
if maxPattern < lengthP :
maxPattern = lengthP
patt.append([lengthP, an])
end = 0
start = 0
# 계속 앞으로 전진!
else :
start = 0
# maxPattern만큼 패턴 딕셔너리 생성
dictmatch = {}
for i in range(1,maxPattern+1) :
for j in range(10) :
dictmatch[str(0)*match[j][0]*i+str(1)*match[j][1]*i+str(0)*match[j][2]*i+str(1)*match[j][3]*i] = str(j)
# 중복제거한 패턴 리스트
Pattern = []
for p in patt :
pn = ''
for k in range(0,p[0]*56-1,7*p[0]) :
pn += dictmatch[p[1][k:k+7*p[0]]]
if pn not in Pattern :
Pattern.append(pn)
# 올바른 패턴인지 검사
result = 0
for i in range(len(Pattern)) :
pn = list(map(int,Pattern[i].replace('', ' ').split()))
if ((pn[0]+pn[2]+pn[4]+pn[6])*3+(pn[1]+pn[3]+pn[5])+pn[7])%10 == 0:
result += sum(pn)
print('#{} {}'.format(test_case, result))
| [
"[email protected]"
] | |
3bfc14d1d230a18045d9e8d9fb084c3c5c9a87a0 | 3fe1b6f36bfd02156f606cf90797d69b18dd19d2 | /creme/utils/inspect.py | bf12fcf5404537a35e4d08489d98d2d98889f465 | [
"BSD-3-Clause"
] | permissive | mihir-thakkar-ai/creme | a19a1975bb462a1a93046b6ea55830e88846cb88 | 008b0c1beb26b36b448fc3d04537e02e66d402b3 | refs/heads/master | 2022-12-18T01:15:18.132117 | 2020-09-15T20:17:16 | 2020-09-15T20:17:16 | 296,288,773 | 0 | 0 | BSD-3-Clause | 2020-09-17T10:04:27 | 2020-09-17T10:04:26 | null | UTF-8 | Python | false | false | 1,404 | py | """Utilities for inspecting a model's type.
Sometimes we need to check if a model can perform regression, classification, etc. However, for
some models the model's type is only known at runtime. For instance, we can't do
`isinstance(pipeline, base.Regressor)` or `isinstance(wrapper, base.Regressor)`. This submodule
thus provides utilities for determining an arbitrary model's type.
"""
from creme import base
from creme import compose
# TODO: maybe all of this could be done by monkeypatching isintance for pipelines?
__all__ = [
'extract_relevant',
'isclassifier',
'isregressor',
'ismoclassifier',
'ismoregressor'
]
def extract_relevant(model: base.Estimator):
"""Extracts the relevant part of a model.
Parameters:
model
"""
if isinstance(model, compose.Pipeline):
return extract_relevant(list(model.steps.values())[-1]) # look at last step
return model
def isclassifier(model):
return isinstance(extract_relevant(model), base.Classifier)
def ismoclassifier(model):
return isinstance(extract_relevant(model), base.MultiOutputClassifier)
def isregressor(model):
return isinstance(extract_relevant(model), base.Regressor)
def istransformer(model):
return isinstance(extract_relevant(model), base.Transformer)
def ismoregressor(model):
return isinstance(extract_relevant(model), base.MultiOutputRegressor)
| [
"[email protected]"
] | |
58794e3389ada30651487ebcafdf441f1dd0d6f3 | 297497957c531d81ba286bc91253fbbb78b4d8be | /testing/web-platform/tests/tools/lint/tests/base.py | f624276e3be4c16d8acd1226e8f4e128812cbd2e | [
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] | permissive | marco-c/gecko-dev-comments-removed | 7a9dd34045b07e6b22f0c636c0a836b9e639f9d3 | 61942784fb157763e65608e5a29b3729b0aa66fa | refs/heads/master | 2023-08-09T18:55:25.895853 | 2023-08-01T00:40:39 | 2023-08-01T00:40:39 | 211,297,481 | 0 | 0 | NOASSERTION | 2019-09-29T01:27:49 | 2019-09-27T10:44:24 | C++ | UTF-8 | Python | false | false | 295 | py |
def check_errors(errors):
for e in errors:
error_type, description, path, line_number = e
assert isinstance(error_type, str)
assert isinstance(description, str)
assert isinstance(path, str)
assert line_number is None or isinstance(line_number, int)
| [
"[email protected]"
] | |
5547d118a16dcd0b4cebc4a30404d27ad74d3fe2 | fca6a986e735843b667e3714b11cafaed0f390e8 | /fastai2/text/models/core.py | 235083d04a808132f5d60a270d60480a07014007 | [
"Apache-2.0"
] | permissive | mbrukman/fastai2 | 2c631b515a13738800b5bcce781be6dac807368a | 404383912503b69b244e175f3b26a06b532ee4bd | refs/heads/master | 2020-11-27T17:59:33.125318 | 2019-12-21T08:22:48 | 2019-12-21T08:22:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,013 | py | #AUTOGENERATED! DO NOT EDIT! File to edit: dev/33_text.models.core.ipynb (unless otherwise specified).
__all__ = ['LinearDecoder', 'SequentialRNN', 'get_language_model', 'SentenceEncoder', 'masked_concat_pool',
'PoolingLinearClassifier', 'get_text_classifier']
#Cell
from ...data.all import *
from ..core import *
from .awdlstm import *
#Cell
_model_meta = {AWD_LSTM: {'hid_name':'emb_sz', 'url':URLs.WT103_FWD, 'url_bwd':URLs.WT103_BWD,
'config_lm':awd_lstm_lm_config, 'split_lm': awd_lstm_lm_split,
'config_clas':awd_lstm_clas_config, 'split_clas': awd_lstm_clas_split},
AWD_QRNN: {'hid_name':'emb_sz',
'config_lm':awd_qrnn_lm_config, 'split_lm': awd_lstm_lm_split,
'config_clas':awd_qrnn_clas_config, 'split_clas': awd_lstm_clas_split},}
# Transformer: {'hid_name':'d_model', 'url':URLs.OPENAI_TRANSFORMER,
# 'config_lm':tfmer_lm_config, 'split_lm': tfmer_lm_split,
# 'config_clas':tfmer_clas_config, 'split_clas': tfmer_clas_split},
# TransformerXL: {'hid_name':'d_model',
# 'config_lm':tfmerXL_lm_config, 'split_lm': tfmerXL_lm_split,
# 'config_clas':tfmerXL_clas_config, 'split_clas': tfmerXL_clas_split}}
#Cell
class LinearDecoder(Module):
"To go on top of a RNNCore module and create a Language Model."
initrange=0.1
def __init__(self, n_out, n_hid, output_p=0.1, tie_encoder=None, bias=True):
self.decoder = nn.Linear(n_hid, n_out, bias=bias)
self.decoder.weight.data.uniform_(-self.initrange, self.initrange)
self.output_dp = RNNDropout(output_p)
if bias: self.decoder.bias.data.zero_()
if tie_encoder: self.decoder.weight = tie_encoder.weight
def forward(self, input):
raw_outputs, outputs = input
decoded = self.decoder(self.output_dp(outputs[-1]))
return decoded, raw_outputs, outputs
#Cell
class SequentialRNN(nn.Sequential):
"A sequential module that passes the reset call to its children."
def reset(self):
for c in self.children(): getattr(c, 'reset', noop)()
#Cell
def get_language_model(arch, vocab_sz, config=None, drop_mult=1.):
"Create a language model from `arch` and its `config`."
meta = _model_meta[arch]
config = ifnone(config, meta['config_lm']).copy()
for k in config.keys():
if k.endswith('_p'): config[k] *= drop_mult
tie_weights,output_p,out_bias = map(config.pop, ['tie_weights', 'output_p', 'out_bias'])
init = config.pop('init') if 'init' in config else None
encoder = arch(vocab_sz, **config)
enc = encoder.encoder if tie_weights else None
decoder = LinearDecoder(vocab_sz, config[meta['hid_name']], output_p, tie_encoder=enc, bias=out_bias)
model = SequentialRNN(encoder, decoder)
return model if init is None else model.apply(init)
#Cell
def _pad_tensor(t, bs, val=0.):
if t.size(0) < bs: return torch.cat([t, val + t.new_zeros(bs-t.size(0), *t.shape[1:])])
return t
#Cell
class SentenceEncoder(Module):
"Create an encoder over `module` that can process a full sentence."
def __init__(self, bptt, module, pad_idx=1): store_attr(self, 'bptt,module,pad_idx')
def _concat(self, arrs, bs):
return [torch.cat([_pad_tensor(l[si],bs) for l in arrs], dim=1) for si in range(len(arrs[0]))]
def reset(self): getattr(self.module, 'reset', noop)()
def forward(self, input):
bs,sl = input.size()
self.reset()
raw_outputs,outputs,masks = [],[],[]
for i in range(0, sl, self.bptt):
r,o = self.module(input[:,i: min(i+self.bptt, sl)])
masks.append(input[:,i: min(i+self.bptt, sl)] == self.pad_idx)
raw_outputs.append(r)
outputs.append(o)
return self._concat(raw_outputs, bs),self._concat(outputs, bs),torch.cat(masks,dim=1)
#Cell
def masked_concat_pool(outputs, mask):
"Pool `MultiBatchEncoder` outputs into one vector [last_hidden, max_pool, avg_pool]"
output = outputs[-1]
lens = output.size(1) - mask.long().sum(dim=1)
avg_pool = output.masked_fill(mask[:, :, None], 0).sum(dim=1)
avg_pool.div_(lens.type(avg_pool.dtype)[:,None])
max_pool = output.masked_fill(mask[:,:,None], -float('inf')).max(dim=1)[0]
x = torch.cat([output[torch.arange(0, output.size(0)),lens-1], max_pool, avg_pool], 1) #Concat pooling.
return x
#Cell
class PoolingLinearClassifier(Module):
"Create a linear classifier with pooling"
def __init__(self, dims, ps):
mod_layers = []
if len(ps) != len(dims)-1: raise ValueError("Number of layers and dropout values do not match.")
acts = [nn.ReLU(inplace=True)] * (len(dims) - 2) + [None]
layers = [LinBnDrop(i, o, p=p, act=a) for i,o,p,a in zip(dims[:-1], dims[1:], ps, acts)]
self.layers = nn.Sequential(*layers)
def forward(self, input):
raw,out,mask = input
x = masked_concat_pool(out, mask)
x = self.layers(x)
return x, raw, out
#Cell
def get_text_classifier(arch, vocab_sz, n_class, seq_len=72, config=None, drop_mult=1., lin_ftrs=None,
ps=None, pad_idx=1):
"Create a text classifier from `arch` and its `config`, maybe `pretrained`"
meta = _model_meta[arch]
config = ifnone(config, meta['config_clas']).copy()
for k in config.keys():
if k.endswith('_p'): config[k] *= drop_mult
if lin_ftrs is None: lin_ftrs = [50]
if ps is None: ps = [0.1]*len(lin_ftrs)
layers = [config[meta['hid_name']] * 3] + lin_ftrs + [n_class]
ps = [config.pop('output_p')] + ps
init = config.pop('init') if 'init' in config else None
encoder = SentenceEncoder(seq_len, arch(vocab_sz, **config), pad_idx=pad_idx)
model = SequentialRNN(encoder, PoolingLinearClassifier(layers, ps))
return model if init is None else model.apply(init) | [
"[email protected]"
] | |
c98395864af6a107b993684c44803e2fb2b6fca7 | 2181d99f84f4f7556efb13ac203a533fc87f9acd | /tools/CodeGenerators/codegen/app/src/generated/Gui/ComboBoxes/LithologicUnitInBedComboBox.py | 9ed3aae608989a0c741cc39df512c59f76b25e5c | [] | no_license | BackupTheBerlios/profilelogger-svn | 0f80fd8f63c3b413dc06ecc6d2be623f8ae2cc8c | 5ba067205316b0955f0c8876dd8b0f10672abc0a | refs/heads/master | 2020-05-18T16:33:31.154612 | 2010-04-24T16:51:28 | 2010-04-24T16:51:28 | 40,822,739 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 384 | py | from InBedComboBox import *
class LithologicUnitInBedComboBox(InBedComboBox):
def __init__(self, parent, managementDialogClass, finderClass):
DataSelectionComboBox.__init__(self,
parent,
LithologicUnitInBedManagementDialog,
LithologicUnitInBedFinder)
| [
"jolo@28dda339-3e7f-0410-9691-cab309f6cb01"
] | jolo@28dda339-3e7f-0410-9691-cab309f6cb01 |
98e8617fddd53b570991cb56f984fbf05598530a | 34a2046714261a5e42692ab7a656eec708395243 | /appcode/mri/results/create_nifti_from_raw_data.py | 893307757154dbb38935566262c95113f314919d | [] | no_license | royshau/thesis | a64893ca25981bd8fff394161829d0147934a324 | a02a8cfea9e00bd98289419eb9f7fb78c36c028e | refs/heads/master | 2021-06-03T19:24:21.110455 | 2020-07-22T13:30:59 | 2020-07-22T13:30:59 | 115,911,615 | 0 | 0 | null | 2018-01-01T10:16:46 | 2018-01-01T10:03:25 | Python | UTF-8 | Python | false | false | 6,904 | py | # !/home/ohadsh/Tools/anaconda/bin/python
import numpy as np
import os
from appcode.mri.k_space.k_space_data_set import KspaceDataSet
from appcode.mri.data.write_nifti_data import write_nifti_data
from appcode.mri.data.mri_data_base import MriDataBase
from common.files_IO.file_handler import FileHandler
from appcode.mri.k_space.utils import get_image_from_kspace
from appcode.mri.k_space.data_creator import get_rv_mask
file_names = ['k_space_real_gt', 'k_space_imag_gt', 'meta_data']
import argparse
predict_info = {'width': 256, 'height': 256, 'channels': 1, 'dtype': 'float32'}
predict_names = {'real': '000000.predict_real.bin', 'imag': '000000.predict_imag.bin'}
import matplotlib.pyplot as plt
META_KEYS = {'hash':0, 'slice': 1, 'bit_pix':2, 'aug':3, 'norm_factor':4}
MASKS_DIR = '/media/ohadsh/Data/ohadsh/work/matlab/thesis/'
def create_nifti_from_raw_data(data_dir, predict_path, output_path, data_base, batch_size, num_of_cases=-1,
tt='train', source='k_space', random_sampling_factor=None, cs_path=None):
"""
Assumption - predict on all examples exists
This script create nifti files from k-space raw data, original and predictions.
:param data_dir:
:param predict_path:
:param output_path:
:param data_base:
:param batch_size:
:param num_of_cases:
:param tt:
:param random_sampling_factor:
:param cs_path: compressed sensing predicted path
:return:
"""
db = MriDataBase(data_base)
f_predict = {}
cs_pred = None
for name_pred in ['real', 'imag']:
f_predict[name_pred] = FileHandler(path=os.path.join(predict_path, predict_names[name_pred]),
info=predict_info, read_or_write='read', name=name_pred, memmap=True)
if cs_path is not None:
cs_pred = FileHandler(path=cs_path, info=predict_info, read_or_write='read', name='CS', memmap=True)
# write_nifti_data(cs_pred.memmap.transpose(2, 1, 0), output_path='/tmp/', name='CS')
data_set = KspaceDataSet(data_dir, file_names, stack_size=batch_size, shuffle=False, data_base=data_base, memmap=True)
data_set_tt = getattr(data_set, tt)
meta_data = data_set_tt.files_obj['meta_data'].memmap
# Get all unique case hash
all_cases = np.unique(meta_data[:, META_KEYS['hash']])
all_cases = all_cases if num_of_cases == -1 else all_cases[:num_of_cases]
# For each case, create indices, build a nifty from real image and predict
done = 1
for case in all_cases:
try:
idx = get_case_idx(case, meta_data)
name = db.info['hash_to_case'][case]
print("Working on case : %s, number= (%d / %d)" % (name, done, num_of_cases))
ref = os.path.join(db.data_path, name, "IXI"+name+".nii.gz")
if not os.path.exists(ref):
ref = None
res_out_path = os.path.join(output_path, name)
if not os.path.exists(res_out_path):
os.makedirs(res_out_path)
# Data creation
org_real = data_set_tt.files_obj['k_space_real_gt'].memmap[idx]
org_imag = data_set_tt.files_obj['k_space_imag_gt'].memmap[idx]
data = get_image_from_kspace(org_real, org_imag).transpose(1, 2, 0)
# data = norm_data(data)
write_nifti_data(data, output_path=res_out_path, reference=ref, name=name)
# Predict from network
pred_real = f_predict['real'].memmap[idx]
pred_imag = f_predict['imag'].memmap[idx]
if source == 'k_space':
data = get_image_from_kspace(pred_real, pred_imag).transpose(2, 1, 0)
else:
data = 256*np.abs(pred_real+ 1j * pred_imag).transpose(2, 1, 0)
# data = norm_data(data)
write_nifti_data(data, output_path=res_out_path, reference=ref, name=name+"_predict")
# Zero Padding
if random_sampling_factor is not None:
mask = get_rv_mask(mask_main_dir=MASKS_DIR, factor=random_sampling_factor)
org_real_zero_padded = mask * org_real
org_imag_zero_padded = mask * org_imag
data = get_image_from_kspace(org_real_zero_padded, org_imag_zero_padded).transpose(1, 2, 0)
# data = norm_data(data)
write_nifti_data(data, output_path=res_out_path, reference=ref, name=name+"_zeroPadding")
# CS
if cs_pred is not None:
data = cs_pred.memmap[idx].transpose(2, 1, 0)
# data = norm_data(data)
write_nifti_data(data, output_path=res_out_path, reference=ref, name=name + "_CS")
done += 1
except:
print "BAD: (min, max) = (%d, %d)" % (idx.min(), idx.max())
continue
def get_case_idx(case_hash, meta_data):
""" Get case indices given cash hash and meta data memmap
:param case_hash:
:param meta_data:
:return:
"""
idx = np.where(meta_data[:, META_KEYS['hash']] == case_hash)[0]
slice_idx_rel = np.argsort(meta_data[idx, META_KEYS['slice']])
slice_idx_abs = idx[slice_idx_rel]
return slice_idx_abs
def norm_data(data):
"""
Normalize data
:param data:
:return:
"""
norm_factor = 1.0 / data.max()
return (data * norm_factor).astype('float32')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='TBD.')
parser.add_argument('--tt', dest='tt', choices=['train', 'test'], default='train', type=str, help='train / test')
parser.add_argument('--data_dir', dest='data_dir', default='/media/ohadsh/Data/ohadsh/work/data/T1/sagittal/', type=str, help='data directory')
parser.add_argument('--num_of_cases', dest='num_of_cases', type=int, default=-1, help='number of cases')
parser.add_argument('--batch_size', dest='batch_size', type=int, default=50, help='mini batch size')
parser.add_argument('--data_base', dest='data_base', type=str, default='IXI_T1', help='data base name - for file info')
parser.add_argument('--predict_path', dest='predict_path', type=str, help='run path')
parser.add_argument('--output_path', dest='output_path', default='./', type=str, help='out path')
parser.add_argument('--source', dest='source', default='k_space', type=str, help='source')
parser.add_argument('--random_sampling_factor', dest='random_sampling_factor', type=int, default=None,
help='Random sampling factor for zero padding')
parser.add_argument('--cs_path', dest='cs_path', default=None, type=str, help='CS path')
args = parser.parse_args()
create_nifti_from_raw_data(args.data_dir, args.predict_path, args.output_path,
args.data_base, args.batch_size, args.num_of_cases, args.tt, args.source,
args.random_sampling_factor, args.cs_path) | [
"[email protected]"
] | |
b65efc5b76e81a98e3d8dbd1d9eeb2f0c675189f | 3940b4a507789e1fbbaffeb200149aee215f655a | /lc/399.EvaluateDivision.py | c17e9249c1acf2d9f8603f76a20ff582cbf7453c | [] | no_license | akimi-yano/algorithm-practice | 15f52022ec79542d218c6f901a54396a62080445 | 1abc28919abb55b93d3879860ac9c1297d493d09 | refs/heads/master | 2023-06-11T13:17:56.971791 | 2023-06-10T05:17:56 | 2023-06-10T05:17:56 | 239,395,822 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,013 | py | # 399. Evaluate Division
# Medium
# 2633
# 209
# Add to List
# Share
# You are given equations in the format A / B = k, where A and B are variables represented as strings, and k is a real number (floating-point number). Given some queries, return the answers. If the answer does not exist, return -1.0.
# The input is always valid. You may assume that evaluating the queries will result in no division by zero and there is no contradiction.
# Example 1:
# Input: equations = [["a","b"],["b","c"]], values = [2.0,3.0], queries = [["a","c"],["b","a"],["a","e"],["a","a"],["x","x"]]
# Output: [6.00000,0.50000,-1.00000,1.00000,-1.00000]
# Explanation:
# Given: a / b = 2.0, b / c = 3.0
# queries are: a / c = ?, b / a = ?, a / e = ?, a / a = ?, x / x = ?
# return: [6.0, 0.5, -1.0, 1.0, -1.0 ]
# Example 2:
# Input: equations = [["a","b"],["b","c"],["bc","cd"]], values = [1.5,2.5,5.0], queries = [["a","c"],["c","b"],["bc","cd"],["cd","bc"]]
# Output: [3.75000,0.40000,5.00000,0.20000]
# Example 3:
# Input: equations = [["a","b"]], values = [0.5], queries = [["a","b"],["b","a"],["a","c"],["x","y"]]
# Output: [0.50000,2.00000,-1.00000,-1.00000]
# Constraints:
# 1 <= equations.length <= 20
# equations[i].length == 2
# 1 <= equations[i][0], equations[i][1] <= 5
# values.length == equations.length
# 0.0 < values[i] <= 20.0
# 1 <= queries.length <= 20
# queries[i].length == 2
# 1 <= queries[i][0], queries[i][1] <= 5
# equations[i][0], equations[i][1], queries[i][0], queries[i][1] consist of lower case English letters and digits.
# THIS SOLUTION WORKS !!!
'''
solved it as a graph problem
made an adj_list and process both e1->e2 and e2->e1 with flipped values
traverse the adj_list with seen set, if you find the val, return 1 ; if its not in adj_list, return -1,
keep multiplying the weights and return it if its positive value
'''
class Solution:
def calcEquation(self, equations: List[List[str]], values: List[float], queries: List[List[str]]) -> List[float]:
self.adj_list = {}
for i in range(len(equations)):
e1,e2 = equations[i]
ans = values[i]
if e1 not in self.adj_list:
self.adj_list[e1] = []
self.adj_list[e1].append((e2, ans))
if e2 not in self.adj_list:
self.adj_list[e2] = []
self.adj_list[e2].append((e1, 1/ans))
res = []
for q1, q2 in queries:
res.append(self.helper(q1, q2, set([])))
return res
def helper(self, cur, target, seen):
if cur in seen:
return -1
seen.add(cur)
if cur not in self.adj_list:
return -1
if cur == target:
return 1
for next_node, weight in self.adj_list[cur]:
temp = weight * self.helper(next_node, target, seen)
if temp > 0:
return temp
return -1
| [
"[email protected]"
] | |
c2356c672e81c8d0028769668339da65ff1697aa | f47863b3a595cbe7ec1c02040e7214481e4f078a | /plugins/waf/webknight.py | d46383a64c36e0f5877762772612a77f656d3ac9 | [] | no_license | gobiggo/0bscan | fe020b8f6f325292bda2b1fec25e3c49a431f373 | 281cf7c5c2181907e6863adde27bd3977b4a3474 | refs/heads/master | 2020-04-10T20:33:55.008835 | 2018-11-17T10:05:41 | 2018-11-17T10:05:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 602 | py | import re
from config import HTTP_HEADER
__product__ = "WebKnight Application Firewall (AQTRONIX)"
def detect(content, **kwargs):
headers = kwargs.get("headers", None)
status = kwargs.get("status", None)
detection_schema = (
re.compile(r"\bwebknight", re.I),
re.compile(r"webknight", re.I)
)
if status is not None:
if status == 999 and headers.get(HTTP_HEADER.SERVER, "") == "WebKnight":
return True
for detection in detection_schema:
if detection.search(headers.get(HTTP_HEADER.SERVER, "")) is not None:
return True
| [
"[email protected]"
] | |
4c18b17007a61eeb0415eb384a1b4980e476f0ba | 0d01d65ed67faf09b31b6333013393194b4a25d0 | /twitter.py | 1bfef6c0550af908369c3cbe09c1674970e8f41f | [] | no_license | AshithaL/twitter-streaming | 0d5b16c56c92810496f6b635b03024679cc2c10b | 993b6e87fd1d546dcdde5c12db7e49791a5f5890 | refs/heads/master | 2022-11-09T02:52:17.843457 | 2020-06-15T04:43:41 | 2020-06-15T04:43:41 | 270,915,616 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,221 | py | import socket
import sys
import requests
import requests_oauthlib
import json
from sql_connection import conn
# Replace the values below with yours
ACCESS_TOKEN = '1252513694992330753-YpQY1SlyBWIN66ngHXeM8hcZWvvTeZ'
ACCESS_SECRET = 'reoC4xZdgp3bqRPjTC2ptxn00vUPrftWlhprHOBIp29jA'
CONSUMER_KEY = 'eLsiPuE8adtsJUt8hr0iMku3b'
CONSUMER_SECRET = 'p03sqgt8V8TYZbueGzA3SQPZXI5xuhpU5DkPj4fOGyra8YTiXn'
auth_handler = requests_oauthlib.OAuth1(CONSUMER_KEY, CONSUMER_SECRET, ACCESS_TOKEN, ACCESS_SECRET)
def get_tweets():
url = 'https://stream.twitter.com/1.1/statuses/filter.json'
query_data = [('locations', '-122.75,36.8,-121.75,37.8,-74,40,-73,41'),
('track', '#')]
query_url = url + '?' + '&'.join([str(t[0]) + '=' + str(t[1]) for t in query_data])
response = requests.get(query_url, auth=auth_handler, stream=True)
print(query_url, response)
return response
def send_tweets_to_spark(http_resp, tcp_connection):
for lines in http_resp.iter_lines():
try:
full_tweet = json.loads(lines)
words = full_tweet['text'].split(' ')
tweet = ''
for w in words:
if '#' in w:
i = "".join(w.split(' '))
tweet += i
break
time = full_tweet['created_at']
location = "".join(full_tweet["user"]["location"].encode("utf-8"))
if tweet is not '':
tweet_text = tweet.encode('utf-8') + '&%' + location + '&%' + time
print("Tweet Text: " + tweet_text)
tcp_connection.send(tweet_text + '\n')
conn.execute(
'INSERT INTO tweet (time, tweet, location) VALUES (%s,%s,%s,%s,%s)',
(str(time), tweet, str(location)))
conn.commit()
except:
e = sys.exc_info()[0]
print("Error: %s" % e)
TCP_IP = "localhost"
TCP_PORT = 9009
conn = None
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((TCP_IP, TCP_PORT))
s.listen(1)
print("Waiting for TCP connection...")
conn, addr = s.accept()
print("Connected... Starting getting tweets.")
resp = get_tweets()
send_tweets_to_spark(resp, conn) | [
"[email protected]"
] | |
66c5b2d003be821beb2127b0ffef1023020ef83a | 4b44a299bafbd4ca408ce1c89c9fe4a449632783 | /python3/14_Code_Quality/04_mocking/example_5/test_mymodule2.py | fee202b03daf17899388a56cf126c60e665c2088 | [] | no_license | umunusb1/PythonMaterial | ecd33d32b2de664eaaae5192be7c3f6d6bef1d67 | 1e0785c55ccb8f5b9df1978e1773365a29479ce0 | refs/heads/master | 2023-01-23T23:39:35.797800 | 2020-12-02T19:29:00 | 2020-12-02T19:29:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 347 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from mymodule import rm
from unittest import TestCase, mock
class RmTestCase(TestCase):
@mock.patch('mymodule.os')
def test_rm(self, mock_os):
rm("any path")
# test that rm called os.remove with the right parameters
mock_os.remove.assert_called_with("any path") | [
"[email protected]"
] | |
31f19af81a8c9456a85f2bb8d9ab67906b28f744 | a034d4ba39789e4a351112c46dd04a38180cd06c | /appengine/findit/findit_v2/model/atomic_failure.py | 8c09489dd8dc8824de2e446a083c0238f3c8698b | [
"BSD-3-Clause"
] | permissive | asdfghjjklllllaaa/infra | 050ad249ab44f264b4e2080aa9537ce74aafb022 | 8f63af54e46194cd29291813f2790ff6e986804d | refs/heads/master | 2023-01-10T21:55:44.811835 | 2019-07-01T14:03:32 | 2019-07-01T14:03:32 | 194,691,941 | 1 | 0 | BSD-3-Clause | 2023-01-07T07:12:37 | 2019-07-01T14:45:29 | Python | UTF-8 | Python | false | false | 3,725 | py | # Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from google.appengine.ext import ndb
from findit_v2.model.gitiles_commit import Culprit
class FileInFailureLog(ndb.Model):
"""Class for a file mentioned in failure log."""
# normalized file path.
path = ndb.StringProperty(indexed=False)
# Mentioned line numbers of the file in failure log.
line_numbers = ndb.IntegerProperty(repeated=True, indexed=False)
class AtomicFailure(ndb.Model):
"""Base Class for an atom failure.
Atom failure means failures that cannot be further divided.
- In compile failure atom failure is a failed compile target.
- In test failure atom failure is a failed test.
Atom failures in the same build have the same parent.
"""
# Full step name.
step_ui_name = ndb.StringProperty()
# Id of the build in which this atom failure occurred the first time in
# a sequence of consecutive failed builds.
# For example, if a test passed in build 100, and failed in builds 101 - 105,
# then for atom failures of builds 101 - 105, their first_failed_build_id
# will all be id of build 101.
# First_failed_build_id can also be used to find the analysis on the
# failure: analysis only runs for the first time failures, so using the
# first_failed_build_id can get to the analysis.
first_failed_build_id = ndb.IntegerProperty()
# Id of the build in which this atom run (targets or test) was a pass and
# since the next build, it kept not passing (can failed, not run, or end
# with other status).
last_passed_build_id = ndb.IntegerProperty()
# Id of the first build forming the group.
# Whether or how to group failures differs from project to project.
# So this value could be empty.
failure_group_build_id = ndb.IntegerProperty()
# Key to the culprit commit found by rerun based analysis.
# There should be only one culprit for each failure.
culprit_commit_key = ndb.KeyProperty(Culprit)
# Key to the suspected commit found by heuristic analysis.
# There could be multiple suspects found for each failure.
suspect_commit_key = ndb.KeyProperty(Culprit, repeated=True)
# Optional information for heuristic analysis.
# Mentioned files in failure log for the failure.
files = ndb.LocalStructuredProperty(FileInFailureLog, repeated=True)
@property
def build_id(self):
"""Gets the id of the build that this failure belongs to."""
return self.key.parent().id()
@classmethod
def Create(cls,
failed_build_key,
step_ui_name,
first_failed_build_id=None,
last_passed_build_id=None,
failure_group_build_id=None,
files=None): # pragma: no cover
instance = cls(step_ui_name=step_ui_name, parent=failed_build_key)
files_objs = []
if files:
for path, line_numbers in files.iteritems():
files_objs.append(
FileInFailureLog(path=path, line_numbers=line_numbers))
instance.files = files_objs
instance.first_failed_build_id = first_failed_build_id
instance.last_passed_build_id = last_passed_build_id
instance.failure_group_build_id = failure_group_build_id
return instance
def GetFailureIdentifier(self):
"""Returns the identifier for the failure within its step.
Returns:
(list): information to identify a failure.
- For compile failures, it'll be the output_targets.
- For test failures, it'll be the [test_name].
"""
raise NotImplementedError
def GetMergedFailure(self):
"""Gets the most up-to-date merged_failure for the current failure."""
raise NotImplementedError
| [
"[email protected]"
] | |
176f55c02c04c05f69692175fb1e2ad43a67c3e1 | ac39baffc572b72ddd4d25617014a51522ee30a8 | /challenge29-34/simpledu/forms.py | dcdb80fff200e1000ecdcd852c6560cf019f5e08 | [] | no_license | Yao-Phoenix/challenge | 01d72a63eb6c144bb59cd4d5f658e170c8ad0092 | d5ce1659f47cbe5295f65b7ac05ca25c79955f00 | refs/heads/master | 2020-09-24T17:42:02.380190 | 2020-02-17T03:15:25 | 2020-02-17T03:15:25 | 225,810,003 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,259 | py | #!/usr/bin/env python3
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, SubmitField, BooleanField
from wtforms.validators import Length, Email, EqualTo, DataRequired, URL, NumberRange,Regexp
from simpledu.models import db, User, Course, Live
from wtforms import ValidationError, TextAreaField, IntegerField
class RegisterForm(FlaskForm):
#username = StringField('用户名', validators=[DataRequired(), Regexp(r'^[0_9a_zA_Z]{3,24}$', message='用户名只能包含数字和字母, 长度在3到24之间')])
username = StringField('用户名', validators=[DataRequired(), Length(3, 24)])
# Length(3, 24)])
email = StringField('邮箱', validators=[DataRequired(), Email()])
password = PasswordField('密码', validators=[DataRequired(), Length(6, 24)])
repeat_password = PasswordField(
'重复密码', validators=[DataRequired(), EqualTo('password')])
submit = SubmitField('提交')
def create_user(self):
user = User()
self.populate_obj(user)
user.username = self.username.data
user.email = self.email.data
user.password = self.password.data
db.session.add(user)
db.session.commit()
return user
def validate_username(self, field):
if not field.data.isalnum():
raise ValidationError('用户名只能包含数字和字母')
if User.query.filter_by(username=field.data).first():
raise ValidationError('用户名已经存在')
def validate_email(self, field):
if User.query.filter_by(email=field.data).first():
raise ValidationError('邮箱已经存在')
class LoginForm(FlaskForm):
username = StringField('用户名', validators=[DataRequired(), Length(3, 24)])
password = PasswordField('密码', validators=[DataRequired(), Length(6, 24)])
remember_me = BooleanField('记住我')
submit = SubmitField('提交')
def validate_eamil(self, field):
if not User.query.filter_by(email=field.data).first():
raise ValidationError('邮箱未注册')
def validate_password(self, field):
user = User.query.filter_by(username=self.username.data).first()
if user and not user.check_password(field.data):
raise ValidationError('密码错误')
class CourseForm(FlaskForm):
name = StringField('课程名称', validators=[DataRequired(), Length(5, 32)])
description = TextAreaField(
'课程简介', validators=[DataRequired(), Length(20, 256)])
image_url = StringField('封面图片', validators=[DataRequired(), URL()])
author_id = IntegerField('作者ID', validators=[DataRequired(), NumberRange(
min=1, message='无效的用户ID')])
submit = SubmitField('提交')
def validate_author_id(self, field):
if not User.query.get(field.data):
raise ValidationError('用户不存在')
def create_course(self):
course = Course()
# 使用课程表单数据填充 course 对象
print('--------------------------------')
print(self.populate_obj.__doc__)
self.populate_obj(course)
db.session.add(course)
db.session.commit()
return course
def update_course(self, course):
self.populate_obj(course)
db.session.add(course)
db.session.commit()
return course
class LiveForm(FlaskForm):
name = StringField('直播名称', validators=[DataRequired(), Length(1, 256)])
user_id = IntegerField('用户ID', validators=[DataRequired(), NumberRange(min=1, message=('无效的用户ID'))])
submit = SubmitField('提交')
def validate_user_id(self, field):
if not User.query.get(self.user_id.data):
raise ValidationError('用户不存在')
def create_live(self):
live = Live()
self.populate_obj(live)
db.session.add(live)
db.session.commit()
return live
def update_live(self, live):
self.populate_obj(live)
db.session.add(live)
db.session.commit()
return live
class MessageForm(FlaskForm):
text = StringField('发送后台消息', validators=[DataRequired(), Length(1, 256)])
submit = SubmitField('提交')
| [
"[email protected]"
] | |
b06f3ce756d293fc54760884cb39ee8ec0e66023 | 52877e2b60ed675eb16ea66c7398127294a313d3 | /t2t_bert/utils/tensor2tensor/models/transformer.py | 1968d1ca7843d41a518b60482ff1e30df1f5e8ee | [
"Apache-2.0"
] | permissive | yyht/BERT | 0dc82ea8e141cad4774e638dd7d44f781d77b6c3 | 480c909e0835a455606e829310ff949c9dd23549 | refs/heads/master | 2023-04-07T03:32:28.123608 | 2021-02-17T02:15:58 | 2021-02-17T02:15:58 | 162,232,730 | 37 | 12 | Apache-2.0 | 2022-11-21T21:15:04 | 2018-12-18T05:02:27 | Python | UTF-8 | Python | false | false | 104,406 | py | # coding=utf-8
# Copyright 2019 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Transformer model from "Attention Is All You Need".
The Transformer model consists of an encoder and a decoder. Both are stacks
of self-attention layers followed by feed-forward layers. This model yields
good results on a number of problems, especially in NLP and machine translation.
See "Attention Is All You Need" (https://arxiv.org/abs/1706.03762) for the full
description of the model and the results obtained with its early version.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import range # pylint: disable=redefined-builtin
from tensor2tensor.data_generators import librispeech
from tensor2tensor.layers import common_attention
from tensor2tensor.layers import common_hparams
from tensor2tensor.layers import common_layers
from tensor2tensor.layers import modalities
from tensor2tensor.layers import transformer_layers
from tensor2tensor.layers import transformer_memory
from tensor2tensor.utils import beam_search
from tensor2tensor.utils import expert_utils
from tensor2tensor.utils import mlperf_log
from tensor2tensor.utils import registry
from tensor2tensor.utils import t2t_model
import tensorflow as tf
# pylint: disable=g-direct-tensorflow-import
from tensorflow.python.ops import inplace_ops
from tensorflow.python.util import nest
# pylint: enable=g-direct-tensorflow-import
# Alias some commonly reused layers, here and elsewhere.
transformer_prepare_encoder = transformer_layers.transformer_prepare_encoder
transformer_encoder = transformer_layers.transformer_encoder
transformer_ffn_layer = transformer_layers.transformer_ffn_layer
def transformer_encode(encoder_function, inputs, target_space, hparams,
attention_weights=None, features=None, losses=None,
prepare_encoder_fn=None, **kwargs):
"""Encode transformer inputs.
Args:
encoder_function: the encoder function
inputs: Transformer inputs [batch_size, input_length, 1, hidden_dim] which
will be flattened along the two spatial dimensions.
target_space: scalar, target space ID.
hparams: hyperparameters for model.
attention_weights: weight to store attention to.
features: optionally pass the entire features dictionary as well. This is
needed now for "packed" datasets.
losses: optional list onto which to append extra training losses
prepare_encoder_fn: optional, alternative to transformer_prepare_encoder.
**kwargs: additional arguments to pass to encoder_function
Returns:
Tuple of:
encoder_output: Encoder representation.
[batch_size, input_length, hidden_dim]
encoder_decoder_attention_bias: Bias and mask weights for
encoder-decoder attention. [batch_size, input_length]
"""
inputs = common_layers.flatten4d3d(inputs)
if not prepare_encoder_fn:
prepare_encoder_fn = transformer_prepare_encoder
encoder_input, self_attention_bias, encoder_decoder_attention_bias = (
prepare_encoder_fn(
inputs, target_space, hparams, features=features))
mlperf_log.transformer_print(
key=mlperf_log.MODEL_HP_LAYER_POSTPROCESS_DROPOUT,
value=hparams.layer_prepostprocess_dropout,
hparams=hparams)
encoder_input = tf.nn.dropout(encoder_input,
1.0 - hparams.layer_prepostprocess_dropout)
attn_bias_for_padding = None
# Otherwise the encoder will just use encoder_self_attention_bias.
if hparams.unidirectional_encoder:
attn_bias_for_padding = encoder_decoder_attention_bias
encoder_output = encoder_function(
encoder_input,
self_attention_bias,
hparams,
nonpadding=features_to_nonpadding(features, "inputs"),
save_weights_to=attention_weights,
make_image_summary=not common_layers.is_xla_compiled(),
losses=losses,
attn_bias_for_padding=attn_bias_for_padding,
**kwargs)
return encoder_output, encoder_decoder_attention_bias
def transformer_decode(decoder_function,
decoder_input,
encoder_output,
encoder_decoder_attention_bias,
decoder_self_attention_bias,
hparams,
attention_weights=None,
cache=None,
decode_loop_step=None,
nonpadding=None,
losses=None,
**kwargs):
"""Decode Transformer outputs from encoder representation.
Args:
decoder_function: the decoder function
decoder_input: inputs to bottom of the model. [batch_size, decoder_length,
hidden_dim]
encoder_output: Encoder representation. [batch_size, input_length,
hidden_dim]
encoder_decoder_attention_bias: Bias and mask weights for encoder-decoder
attention. [batch_size, input_length]
decoder_self_attention_bias: Bias and mask weights for decoder
self-attention. [batch_size, decoder_length]
hparams: hyperparameters for model.
attention_weights: weight to store attention to.
cache: dict, containing tensors which are the results of previous
attentions, used for fast decoding.
decode_loop_step: An integer, step number of the decoding loop. Only used
for inference on TPU.
nonpadding: optional Tensor with shape [batch_size, decoder_length]
losses: optional list onto which to append extra training losses
**kwargs: additional arguments to pass to decoder_function
Returns:
Final decoder representation. [batch_size, decoder_length, hidden_dim]
"""
mlperf_log.transformer_print(
key=mlperf_log.MODEL_HP_LAYER_POSTPROCESS_DROPOUT,
value=hparams.layer_prepostprocess_dropout,
hparams=hparams)
decoder_input = tf.nn.dropout(decoder_input,
1.0 - hparams.layer_prepostprocess_dropout)
decoder_output = decoder_function(
decoder_input,
encoder_output,
decoder_self_attention_bias,
encoder_decoder_attention_bias,
hparams,
cache=cache,
decode_loop_step=decode_loop_step,
nonpadding=nonpadding,
save_weights_to=attention_weights,
losses=losses,
**kwargs)
if (common_layers.is_xla_compiled() and
hparams.mode == tf.estimator.ModeKeys.TRAIN):
# TPU does not react kindly to extra dimensions.
# TODO(noam): remove this once TPU is more forgiving of extra dims.
return decoder_output
else:
# Expand since t2t expects 4d tensors.
return tf.expand_dims(decoder_output, axis=2)
@registry.register_model
class Transformer(t2t_model.T2TModel):
"""Attention net. See file docstring."""
def __init__(self, *args, **kwargs):
super(Transformer, self).__init__(*args, **kwargs)
self.attention_weights = {} # For visualizing attention heads.
self.recurrent_memory_by_layer = None # Override to enable recurrent memory
self._encoder_function = transformer_encoder
self._decoder_function = transformer_decoder
self._init_cache_fn = _init_transformer_cache
self._prepare_encoder_fn = transformer_prepare_encoder
self._prepare_decoder_fn = transformer_prepare_decoder
def encode(self, inputs, target_space, hparams, features=None, losses=None):
"""Encode transformer inputs, see transformer_encode."""
return transformer_encode(
self._encoder_function, inputs, target_space, hparams,
attention_weights=self.attention_weights,
features=features, losses=losses,
prepare_encoder_fn=self._prepare_encoder_fn)
def decode(self,
decoder_input,
encoder_output,
encoder_decoder_attention_bias,
decoder_self_attention_bias,
hparams,
cache=None,
decode_loop_step=None,
nonpadding=None,
losses=None,
**kwargs):
"""Decode Transformer outputs, see transformer_decode."""
return transformer_decode(
self._decoder_function, decoder_input, encoder_output,
encoder_decoder_attention_bias, decoder_self_attention_bias,
hparams, attention_weights=self.attention_weights, cache=cache,
decode_loop_step=decode_loop_step, nonpadding=nonpadding, losses=losses,
**kwargs)
def body(self, features):
"""Transformer main model_fn.
Args:
features: Map of features to the model. Should contain the following:
"inputs": Transformer inputs. [batch_size, input_length, 1,
hidden_dim].
"targets": Target decoder outputs. [batch_size, decoder_length, 1,
hidden_dim]
"target_space_id": A scalar int from data_generators.problem.SpaceID.
Returns:
Final decoder representation. [batch_size, decoder_length, hidden_dim]
"""
hparams = self._hparams
losses = []
if self.has_input:
inputs = self._prepare_inputs_for_body(features)
target_space = features["target_space_id"]
encoder_output, encoder_decoder_attention_bias = self.encode(
inputs, target_space, hparams, features=features, losses=losses)
else:
encoder_output, encoder_decoder_attention_bias = (None, None)
targets = features["targets"]
targets_shape = common_layers.shape_list(targets)
targets = common_layers.flatten4d3d(targets)
decoder_input, decoder_self_attention_bias = self._prepare_decoder_fn(
targets, hparams, features=features)
# Not all subclasses of Transformer support keyword arguments related to
# recurrent memory, so only pass these arguments if memory is enabled.
decode_kwargs = {}
if self.recurrent_memory_by_layer is not None:
# TODO(kitaev): The chunk_number feature currently has the same shape as
# "targets", but this is only for the purposes of sharing sharding code.
# In fact every token within an example must have the same chunk number.
chunk_number_each_token = tf.squeeze(features["chunk_number"], (-1, -2))
chunk_number_each_example = chunk_number_each_token[:, 0]
# Uncomment the code below to verify that tokens within a batch share the
# same chunk number:
# with tf.control_dependencies([
# tf.assert_equal(chunk_number_each_token,
# chunk_number_each_example[:, None])
# ]):
# chunk_number_each_example = tf.identity(chunk_number_each_example)
decode_kwargs = dict(
recurrent_memory_by_layer=self.recurrent_memory_by_layer,
chunk_number=chunk_number_each_example,
)
decoder_output = self.decode(
decoder_input,
encoder_output,
encoder_decoder_attention_bias,
decoder_self_attention_bias,
hparams,
nonpadding=features_to_nonpadding(features, "targets"),
losses=losses,
**decode_kwargs
)
expected_attentions = features.get("expected_attentions")
if expected_attentions is not None:
attention_loss = common_attention.encoder_decoder_attention_loss(
expected_attentions, self.attention_weights,
hparams.expected_attention_loss_type,
hparams.expected_attention_loss_multiplier)
return decoder_output, {"attention_loss": attention_loss}
ret = tf.reshape(decoder_output, targets_shape)
if losses:
return ret, {"extra_loss": tf.add_n(losses)}
else:
return ret
def _prepare_inputs_for_body(self, features):
"""Prepare inputs for body.
Args:
features: Map of string to model features. Should contain
"inputs": Transformer inputs. [batch_size, input_length, 1,
hidden_dim].
Returns:
Inputs which will be passed to the model. [batch_size, input_length, 1,
hidden_dim]
"""
return features["inputs"]
def _greedy_infer(self, features, decode_length, use_tpu=False):
"""Fast version of greedy decoding.
Args:
features: an map of string to `Tensor`
decode_length: an integer. How many additional timesteps to decode.
use_tpu: A bool. Whether to build the inference graph for TPU.
Returns:
A dict of decoding results {
"outputs": integer `Tensor` of decoded ids of shape
[batch_size, <= decode_length] if beam_size == 1 or
[batch_size, top_beams, <= decode_length]
"scores": decoding log probs from the beam search,
None if using greedy decoding (beam_size=1)
}
Raises:
NotImplementedError: If there are multiple data shards.
"""
# For real-valued modalities use the slow decode path for now.
if (self._target_modality_is_real or
self._hparams.self_attention_type != "dot_product"):
return super(Transformer, self)._greedy_infer(features, decode_length)
with tf.variable_scope(self.name):
if use_tpu:
return self._fast_decode_tpu(features, decode_length)
return self._fast_decode(features, decode_length)
def _beam_decode(self,
features,
decode_length,
beam_size,
top_beams,
alpha,
use_tpu=False):
"""Beam search decoding.
Args:
features: an map of string to `Tensor`
decode_length: an integer. How many additional timesteps to decode.
beam_size: number of beams.
top_beams: an integer. How many of the beams to return.
alpha: Float that controls the length penalty. larger the alpha, stronger
the preference for longer translations.
use_tpu: A bool, whether to do beam decode on TPU.
Returns:
A dict of decoding results {
"outputs": integer `Tensor` of decoded ids of shape
[batch_size, <= decode_length] if beam_size == 1 or
[batch_size, top_beams, <= decode_length]
"scores": decoding log probs from the beam search,
None if using greedy decoding (beam_size=1)
}
"""
if (self._hparams.self_attention_type not in [
"dot_product", "dot_product_relative"
]):
# Caching is not guaranteed to work with attention types other than
# dot_product.
# TODO(petershaw): Support fast decoding when using relative
# position representations, i.e. "dot_product_relative" attention.
return self._beam_decode_slow(features, decode_length, beam_size,
top_beams, alpha, use_tpu)
with tf.variable_scope(self.name):
if use_tpu:
return self._fast_decode_tpu(features, decode_length, beam_size,
top_beams, alpha)
return self._fast_decode(features, decode_length, beam_size, top_beams,
alpha)
def _prepare_inputs_for_decode(self, features):
"""Prepare inputs for decoding.
Args:
features: A map of string to model features.
Returns:
Inputs after fixing shape and applying modality.
"""
dp = self._data_parallelism
hparams = self._hparams
inputs = features["inputs"]
# TODO(llion): Clean up this reshaping logic.
inputs = tf.expand_dims(inputs, axis=1)
if len(inputs.shape) < 5:
inputs = tf.expand_dims(inputs, axis=4)
s = common_layers.shape_list(inputs)
inputs = tf.reshape(inputs, [s[0] * s[1], s[2], s[3], s[4]])
# _shard_features called to ensure that the variable names match
inputs = self._shard_features({"inputs": inputs})["inputs"]
input_modality = self._problem_hparams.modality["inputs"]
input_vocab_size = self._problem_hparams.vocab_size["inputs"]
if input_vocab_size is not None and hasattr(hparams, "vocab_divisor"):
input_vocab_size += (-input_vocab_size) % hparams.vocab_divisor
modality_name = hparams.name.get("inputs",
modalities.get_name(input_modality))(
hparams, input_vocab_size)
with tf.variable_scope(modality_name):
bottom = hparams.bottom.get("inputs",
modalities.get_bottom(input_modality))
inputs = dp(bottom, inputs, hparams, input_vocab_size)
return inputs
def _fast_decode_tpu(self,
features,
decode_length,
beam_size=1,
top_beams=1,
alpha=1.0):
"""Fast decoding.
Implements both greedy and beam search decoding on TPU, uses beam search
iff beam_size > 1, otherwise beam search related arguments are ignored.
Args:
features: A map of string to model features.
decode_length: An integer, how many additional timesteps to decode.
beam_size: An integer, number of beams.
top_beams: An integer, how many of the beams to return.
alpha: A float that controls the length penalty. Larger the alpha,
stronger the preference for longer translations.
Returns:
A dict of decoding results {
"outputs": integer `Tensor` of decoded ids of shape
[batch_size, <= decode_length] if beam_size == 1 or
[batch_size, top_beams, <= decode_length]
"scores": decoding log probs from the beam search,
None if using greedy decoding (beam_size=1)
}.
Raises:
NotImplementedError: If there are multiple data shards.
"""
if self._num_datashards != 1:
raise NotImplementedError("Fast decoding only supports a single shard.")
if "targets_segmentation" in features:
raise NotImplementedError(
"Decoding not supported on packed datasets "
" If you want to decode from a dataset, use the non-packed version"
" of the dataset when decoding.")
dp = self._data_parallelism
hparams = self._hparams
target_modality = self._problem_hparams.modality["targets"]
target_vocab_size = self._problem_hparams.vocab_size["targets"]
if target_vocab_size is not None and hasattr(hparams, "vocab_divisor"):
target_vocab_size += (-target_vocab_size) % hparams.vocab_divisor
if self.has_input:
inputs_shape = common_layers.shape_list(features["inputs"])
if target_modality == modalities.ModalityType.CLASS_LABEL:
decode_length = 1
else:
decode_length = (
inputs_shape[1] + features.get("decode_length", decode_length))
batch_size = inputs_shape[0]
inputs = self._prepare_inputs_for_decode(features)
with tf.variable_scope("body"):
encoder_output, encoder_decoder_attention_bias = dp(
self.encode,
inputs,
features["target_space_id"],
hparams,
features=features)
encoder_output = encoder_output[0]
encoder_decoder_attention_bias = encoder_decoder_attention_bias[0]
partial_targets = None
else:
# The problem has no inputs.
encoder_output = None
encoder_decoder_attention_bias = None
# Prepare partial targets.
# In either features["inputs"] or features["targets"].
# We force the outputs to begin with these sequences.
partial_targets = features.get("inputs")
if partial_targets is None:
partial_targets = features["targets"]
assert partial_targets is not None
partial_targets = common_layers.expand_squeeze_to_nd(partial_targets, 2)
partial_targets = tf.to_int64(partial_targets)
partial_targets_shape = common_layers.shape_list(partial_targets)
partial_targets_length = partial_targets_shape[1]
decode_length = (
partial_targets_length + features.get("decode_length", decode_length))
batch_size = partial_targets_shape[0]
if hparams.pos == "timing":
positional_encoding = common_attention.get_timing_signal_1d(
decode_length + 1, hparams.hidden_size)
elif hparams.pos == "emb":
positional_encoding = common_attention.add_positional_embedding(
tf.zeros([1, decode_length + 1, hparams.hidden_size]),
hparams.max_length, "body/targets_positional_embedding", None)
else:
positional_encoding = None
def preprocess_targets(targets, i):
"""Performs preprocessing steps on the targets to prepare for the decoder.
This includes:
- Embedding the ids.
- Flattening to 3D tensor.
- Optionally adding timing signals.
Args:
targets: A tensor, inputs ids to the decoder. [batch_size, 1].
i: An integer, Step number of the decoding loop.
Returns:
A tensor, processed targets [batch_size, 1, hidden_dim].
"""
# _shard_features called to ensure that the variable names match
targets = self._shard_features({"targets": targets})["targets"]
modality_name = hparams.name.get(
"targets",
modalities.get_name(target_modality))(hparams, target_vocab_size)
with tf.variable_scope(modality_name):
bottom = hparams.bottom.get(
"targets", modalities.get_targets_bottom(target_modality))
targets = dp(bottom, targets, hparams, target_vocab_size)[0]
targets = common_layers.flatten4d3d(targets)
# GO embeddings are all zero, this is because transformer_prepare_decoder
# Shifts the targets along by one for the input which pads with zeros.
# If the modality already maps GO to the zero embeddings this is not
# needed.
targets = tf.cond(
tf.equal(i, 0), lambda: tf.zeros_like(targets), lambda: targets)
if positional_encoding is not None:
positional_encoding_shape = positional_encoding.shape.as_list()
targets += tf.slice(
positional_encoding, [0, i, 0],
[positional_encoding_shape[0], 1, positional_encoding_shape[2]])
return targets
decoder_self_attention_bias = (
common_attention.attention_bias_lower_triangle(decode_length))
if hparams.proximity_bias:
decoder_self_attention_bias += common_attention.attention_bias_proximal(
decode_length)
def symbols_to_logits_tpu_fn(ids, i, cache):
"""Go from ids to logits for next symbol on TPU.
Args:
ids: A tensor, symbol IDs.
i: An integer, step number of the decoding loop. Only used for inference
on TPU.
cache: A dict, containing tensors which are the results of previous
attentions, used for fast decoding.
Returns:
ret: A tensor, computed logits.
cache: A dict, containing tensors which are the results of previous
attentions, used for fast decoding.
"""
ids = ids[:, -1:]
targets = tf.expand_dims(tf.expand_dims(ids, axis=2), axis=3)
targets = preprocess_targets(targets, i)
bias_shape = decoder_self_attention_bias.shape.as_list()
bias = tf.slice(decoder_self_attention_bias, [0, 0, i, 0],
[bias_shape[0], bias_shape[1], 1, bias_shape[3]])
with tf.variable_scope("body"):
body_outputs = dp(
self.decode,
targets,
cache.get("encoder_output"),
cache.get("encoder_decoder_attention_bias"),
bias,
hparams,
cache,
i,
nonpadding=features_to_nonpadding(features, "targets"))
modality_name = hparams.name.get(
"targets",
modalities.get_name(target_modality))(hparams, target_vocab_size)
with tf.variable_scope(modality_name):
top = hparams.top.get("targets",
modalities.get_top(target_modality))
logits = dp(top, body_outputs, None, hparams, target_vocab_size)[0]
ret = tf.squeeze(logits, axis=[1, 2, 3])
if partial_targets is not None:
# If the position is within the given partial targets, we alter the
# logits to always return those values.
# A faster approach would be to process the partial targets in one
# iteration in order to fill the corresponding parts of the cache.
# This would require broader changes, though.
vocab_size = tf.shape(ret)[1]
def forced_logits():
return tf.one_hot(
tf.tile(
tf.slice(partial_targets, [0, i],
[partial_targets.shape.as_list()[0], 1]),
[beam_size]), vocab_size, 0.0, -1e9)
ret = tf.cond(
tf.less(i, partial_targets_length), forced_logits, lambda: ret)
return ret, cache
eos_id = self.get_decode_end_id() or beam_search.EOS_ID
ret = fast_decode_tpu(
encoder_output=encoder_output,
encoder_decoder_attention_bias=encoder_decoder_attention_bias,
symbols_to_logits_fn=symbols_to_logits_tpu_fn,
hparams=hparams,
decode_length=decode_length,
vocab_size=target_vocab_size,
init_cache_fn=self._init_cache_fn,
beam_size=beam_size,
top_beams=top_beams,
alpha=alpha,
batch_size=batch_size,
force_decode_length=self._decode_hparams.force_decode_length,
eos_id=eos_id)
if partial_targets is not None:
if beam_size <= 1 or top_beams <= 1:
ret["outputs"] = ret["outputs"][:, partial_targets_length:]
else:
ret["outputs"] = ret["outputs"][:, :, partial_targets_length:]
return ret
def get_decode_start_id(self):
"""Returns the id of the first decoder input symbol.
The default case maps None to a vector of 0's for transformer. This method
can be overridden to return a different id by a model wanting to use a
different decoder start symbol. The id returned by this method is used to
index the embedding matrix, and retrieve the vector that will be used as the
first input to the decoder
"""
return None
def get_decode_end_id(self):
"""Returns the id of the output symbol that terminates decoding.
This method can be overridden by a different model. The id returned by this
method is used to check if the generation is complete during decoding.
"""
return None
def _fast_decode(self,
features,
decode_length,
beam_size=1,
top_beams=1,
alpha=1.0):
"""Fast decoding.
Implements both greedy and beam search decoding, uses beam search iff
beam_size > 1, otherwise beam search related arguments are ignored.
Args:
features: a map of string to model features.
decode_length: an integer. How many additional timesteps to decode.
beam_size: number of beams.
top_beams: an integer. How many of the beams to return.
alpha: Float that controls the length penalty. larger the alpha, stronger
the preference for longer translations.
Returns:
A dict of decoding results {
"outputs": integer `Tensor` of decoded ids of shape
[batch_size, <= decode_length] if beam_size == 1 or
[batch_size, top_beams, <= decode_length]
"scores": decoding log probs from the beam search,
None if using greedy decoding (beam_size=1)
}
Raises:
NotImplementedError: If there are multiple data shards.
"""
if self._num_datashards != 1:
raise NotImplementedError("Fast decoding only supports a single shard.")
dp = self._data_parallelism
hparams = self._hparams
target_modality = self._problem_hparams.modality["targets"]
target_vocab_size = self._problem_hparams.vocab_size["targets"]
if target_vocab_size is not None and hasattr(hparams, "vocab_divisor"):
target_vocab_size += (-target_vocab_size) % hparams.vocab_divisor
if "targets_segmentation" in features:
raise NotImplementedError(
"Decoding not supported on packed datasets "
" If you want to decode from a dataset, use the non-packed version"
" of the dataset when decoding.")
if self.has_input:
inputs_shape = common_layers.shape_list(features["inputs"])
if target_modality == modalities.ModalityType.CLASS_LABEL:
decode_length = 1
else:
decode_length = (
inputs_shape[1] + features.get("decode_length", decode_length))
batch_size = inputs_shape[0]
inputs = self._prepare_inputs_for_decode(features)
with tf.variable_scope("body"):
encoder_output, encoder_decoder_attention_bias = dp(
self.encode,
inputs,
features["target_space_id"],
hparams,
features=features)
encoder_output = encoder_output[0]
encoder_decoder_attention_bias = encoder_decoder_attention_bias[0]
partial_targets = features.get("partial_targets")
else:
# The problem has no inputs.
encoder_output = None
encoder_decoder_attention_bias = None
# Prepare partial targets.
# In either features["inputs"] or features["targets"].
# We force the outputs to begin with these sequences.
partial_targets = features.get("inputs")
if partial_targets is None:
partial_targets = features["targets"]
assert partial_targets is not None
if partial_targets is not None:
partial_targets = common_layers.expand_squeeze_to_nd(partial_targets, 2)
partial_targets = tf.to_int64(partial_targets)
partial_targets_shape = common_layers.shape_list(partial_targets)
partial_targets_length = partial_targets_shape[1]
decode_length = (
partial_targets_length + features.get("decode_length", decode_length))
batch_size = partial_targets_shape[0]
if hparams.pos == "timing":
positional_encoding = common_attention.get_timing_signal_1d(
decode_length + 1, hparams.hidden_size)
elif hparams.pos == "emb":
positional_encoding = common_attention.add_positional_embedding(
tf.zeros([1, decode_length, hparams.hidden_size]), hparams.max_length,
"body/targets_positional_embedding", None)
else:
positional_encoding = None
def preprocess_targets(targets, i):
"""Performs preprocessing steps on the targets to prepare for the decoder.
This includes:
- Embedding the ids.
- Flattening to 3D tensor.
- Optionally adding timing signals.
Args:
targets: inputs ids to the decoder. [batch_size, 1]
i: scalar, Step number of the decoding loop.
Returns:
Processed targets [batch_size, 1, hidden_dim]
"""
# _shard_features called to ensure that the variable names match
targets = self._shard_features({"targets": targets})["targets"]
modality_name = hparams.name.get(
"targets",
modalities.get_name(target_modality))(hparams, target_vocab_size)
with tf.variable_scope(modality_name):
bottom = hparams.bottom.get(
"targets", modalities.get_targets_bottom(target_modality))
targets = dp(bottom, targets, hparams, target_vocab_size)[0]
targets = common_layers.flatten4d3d(targets)
# GO embeddings are all zero, this is because transformer_prepare_decoder
# Shifts the targets along by one for the input which pads with zeros.
# If the modality already maps GO to the zero embeddings this is not
# needed.
if not self.get_decode_start_id():
targets = tf.cond(
tf.equal(i, 0), lambda: tf.zeros_like(targets), lambda: targets)
if positional_encoding is not None:
targets += positional_encoding[:, i:i + 1]
return targets
decoder_self_attention_bias = (
common_attention.attention_bias_lower_triangle(decode_length))
if hparams.proximity_bias:
decoder_self_attention_bias += common_attention.attention_bias_proximal(
decode_length)
# Create tensors for encoder-decoder attention history
att_cache = {"attention_history": {}}
num_layers = hparams.num_decoder_layers or hparams.num_hidden_layers
if encoder_output is not None:
att_batch_size, enc_seq_length = common_layers.shape_list(
encoder_output)[0:2]
for layer in range(num_layers):
att_cache["attention_history"]["layer_%d" % layer] = tf.zeros(
[att_batch_size, hparams.num_heads, 0, enc_seq_length])
def update_decoder_attention_history(cache):
"""Save attention weights in cache, e.g., for vizualization."""
for k in [x for x in self.attention_weights
if "decoder" in x and "self" not in x and "logits" not in x]:
idx = k.find("layer_")
if idx < 0:
continue
# Get layer number from the string name.
layer_nbr = k[idx + 6:]
idx = 0
while idx + 1 < len(layer_nbr) and layer_nbr[:idx + 1].isdigit():
idx += 1
layer_nbr = "layer_%d" % int(layer_nbr[:idx])
if layer_nbr in cache["attention_history"]:
cache["attention_history"][layer_nbr] = tf.concat(
[cache["attention_history"][layer_nbr],
self.attention_weights[k]],
axis=2)
def symbols_to_logits_fn(ids, i, cache):
"""Go from ids to logits for next symbol."""
ids = ids[:, -1:]
targets = tf.expand_dims(tf.expand_dims(ids, axis=2), axis=3)
targets = preprocess_targets(targets, i)
bias = decoder_self_attention_bias[:, :, i:i + 1, :i + 1]
with tf.variable_scope("body"):
body_outputs = dp(
self.decode,
targets,
cache.get("encoder_output"),
cache.get("encoder_decoder_attention_bias"),
bias,
hparams,
cache,
nonpadding=features_to_nonpadding(features, "targets"))
update_decoder_attention_history(cache)
modality_name = hparams.name.get(
"targets",
modalities.get_name(target_modality))(hparams, target_vocab_size)
with tf.variable_scope(modality_name):
top = hparams.top.get("targets", modalities.get_top(target_modality))
logits = dp(top, body_outputs, None, hparams, target_vocab_size)[0]
ret = tf.squeeze(logits, axis=[1, 2, 3])
if partial_targets is not None:
# If the position is within the given partial targets, we alter the
# logits to always return those values.
# A faster approach would be to process the partial targets in one
# iteration in order to fill the corresponding parts of the cache.
# This would require broader changes, though.
vocab_size = tf.shape(ret)[1]
def forced_logits():
return tf.one_hot(
tf.tile(partial_targets[:, i], [beam_size]), vocab_size, 0.0,
-1e9)
ret = tf.cond(
tf.less(i, partial_targets_length), forced_logits, lambda: ret)
return ret, cache
sos_id = self.get_decode_start_id() or 0
eos_id = self.get_decode_end_id() or beam_search.EOS_ID
ret = fast_decode(
encoder_output=encoder_output,
encoder_decoder_attention_bias=encoder_decoder_attention_bias,
symbols_to_logits_fn=symbols_to_logits_fn,
hparams=hparams,
decode_length=decode_length,
vocab_size=target_vocab_size,
init_cache_fn=self._init_cache_fn,
beam_size=beam_size,
top_beams=top_beams,
alpha=alpha,
batch_size=batch_size,
force_decode_length=self._decode_hparams.force_decode_length,
sos_id=sos_id,
eos_id=eos_id,
cache=att_cache)
if partial_targets is not None:
if beam_size <= 1 or top_beams <= 1:
ret["outputs"] = ret["outputs"][:, partial_targets_length:]
else:
ret["outputs"] = ret["outputs"][:, :, partial_targets_length:]
return ret
def _init_transformer_cache(cache, hparams, batch_size, attention_init_length,
encoder_output, encoder_decoder_attention_bias,
scope_prefix):
"""Create the initial cache for Transformer fast decoding."""
key_channels = hparams.attention_key_channels or hparams.hidden_size
value_channels = hparams.attention_value_channels or hparams.hidden_size
num_layers = hparams.num_decoder_layers or hparams.num_hidden_layers
vars_3d_num_heads = (
hparams.num_heads if hparams.get("attention_variables_3d") else 0)
if cache is None:
cache = {}
cache.update({
"layer_%d" % layer: { # pylint: disable=g-complex-comprehension
"k":
common_attention.split_heads(
tf.zeros([batch_size,
attention_init_length,
key_channels]), hparams.num_heads),
"v":
common_attention.split_heads(
tf.zeros([batch_size,
attention_init_length,
value_channels]), hparams.num_heads),
} for layer in range(num_layers)
})
# If `ffn_layer` is in `["dense_relu_dense" or "conv_hidden_relu"]`, then the
# cache key "f" won't be used, which means that the` shape of cache["f"]`
# won't be changed to
# `[beamsize*batch_size, decode_length, hparams.hidden_size]` and may cause
# error when applying `nest.map reshape function` on it.
if hparams.ffn_layer not in ["dense_relu_dense", "conv_hidden_relu"]:
for layer in range(num_layers):
cache["layer_%d" % layer]["f"] = tf.zeros(
[batch_size, 0, hparams.hidden_size])
if encoder_output is not None:
for layer in range(num_layers):
layer_name = "layer_%d" % layer
with tf.variable_scope(
"%sdecoder/%s/encdec_attention/multihead_attention" %
(scope_prefix, layer_name)):
k_encdec = common_attention.compute_attention_component(
encoder_output,
key_channels,
name="k",
vars_3d_num_heads=vars_3d_num_heads)
k_encdec = common_attention.split_heads(k_encdec, hparams.num_heads)
v_encdec = common_attention.compute_attention_component(
encoder_output,
value_channels,
name="v",
vars_3d_num_heads=vars_3d_num_heads)
v_encdec = common_attention.split_heads(v_encdec, hparams.num_heads)
cache[layer_name]["k_encdec"] = k_encdec
cache[layer_name]["v_encdec"] = v_encdec
cache["encoder_output"] = encoder_output
cache["encoder_decoder_attention_bias"] = encoder_decoder_attention_bias
return cache
def fast_decode_tpu(encoder_output,
encoder_decoder_attention_bias,
symbols_to_logits_fn,
hparams,
decode_length,
vocab_size,
init_cache_fn=_init_transformer_cache,
beam_size=1,
top_beams=1,
alpha=1.0,
sos_id=0,
eos_id=beam_search.EOS_ID,
batch_size=None,
force_decode_length=False,
scope_prefix="body/",
use_top_k_with_unique=True):
"""Given encoder output and a symbols to logits function, does fast decoding.
Implements both greedy and beam search decoding for TPU, uses beam search iff
beam_size > 1, otherwise beam search related arguments are ignored.
Args:
encoder_output: A tensor, output from encoder.
encoder_decoder_attention_bias: A tensor, bias for use in encoder-decoder
attention.
symbols_to_logits_fn: Incremental decoding, function mapping triple `(ids,
step, cache)` to symbol logits.
hparams: Run hyperparameters.
decode_length: An integer, how many additional timesteps to decode.
vocab_size: Output vocabulary size.
init_cache_fn: Function that returns the initial cache dict.
beam_size: An integer, number of beams.
top_beams: An integer, how many of the beams to return.
alpha: A float that controls the length penalty. Larger the alpha, stronger
the preference for longer translations.
sos_id: Start-of-sequence symbol.
eos_id: End-of-sequence symbol.
batch_size: An integer, must be passed if there is no input.
force_decode_length: A bool, whether to force the full decode length, or if
False, stop when all beams hit eos_id.
scope_prefix: str, prefix for decoder layer variable scopes.
use_top_k_with_unique: bool, whether to use a fast (but decreased precision)
top_k during beam search.
Returns:
A dict of decoding results {
"outputs": integer `Tensor` of decoded ids of shape
[batch_size, <= decode_length] if top_beams == 1 or
[batch_size, top_beams, <= decode_length] otherwise
"scores": decoding log probs from the beam search,
None if using greedy decoding (beam_size=1)
}.
Raises:
NotImplementedError: If beam size > 1 with partial targets.
"""
if encoder_output is not None:
batch_size = common_layers.shape_list(encoder_output)[0]
cache = init_cache_fn(None, hparams, batch_size, decode_length,
encoder_output, encoder_decoder_attention_bias,
scope_prefix)
mlperf_log.transformer_print(
key=mlperf_log.MODEL_HP_SEQ_BEAM_SEARCH,
value={
"vocab_size": vocab_size,
"batch_size": batch_size,
"beam_size": beam_size,
"alpha": alpha,
"max_decode_length": decode_length
},
hparams=hparams)
if beam_size > 1: # Beam Search
initial_ids = sos_id * tf.ones([batch_size], dtype=tf.int32)
decoded_ids, scores, _ = beam_search.beam_search(
symbols_to_logits_fn,
initial_ids,
beam_size,
decode_length,
vocab_size,
alpha,
states=cache,
eos_id=eos_id,
stop_early=(top_beams == 1),
use_tpu=True,
use_top_k_with_unique=use_top_k_with_unique)
if top_beams == 1:
decoded_ids = decoded_ids[:, 0, 1:]
scores = scores[:, 0]
else:
decoded_ids = decoded_ids[:, :top_beams, 1:]
scores = scores[:, :top_beams]
else: # Greedy
def inner_loop(i, hit_eos, next_id, decoded_ids, cache, log_prob):
"""One step of greedy decoding."""
logits, cache = symbols_to_logits_fn(next_id, i, cache)
log_probs = common_layers.log_prob_from_logits(logits)
temperature = getattr(hparams, "sampling_temp", 0.0)
keep_top = getattr(hparams, "sampling_keep_top_k", -1)
if hparams.sampling_method == "argmax":
temperature = 0.0
next_id = common_layers.sample_with_temperature(
logits, temperature, keep_top)
log_prob_indices = tf.stack([tf.range(tf.to_int64(batch_size)), next_id],
axis=1)
log_prob += tf.gather_nd(
log_probs, log_prob_indices) * (1 - tf.to_float(hit_eos))
# Note(thangluong): we purposely update hit_eos after aggregating log_prob
# There is a subtle detail here that we want to include log_probs up to
# (and inclusive of) the first eos generated, but not subsequent tokens.
hit_eos |= tf.equal(next_id, eos_id)
next_id = tf.expand_dims(next_id, axis=1)
decoded_ids = tf.transpose(decoded_ids)
decoded_ids = inplace_ops.alias_inplace_update(
decoded_ids, i, tf.squeeze(next_id, axis=1))
decoded_ids = tf.transpose(decoded_ids)
return i + 1, hit_eos, next_id, decoded_ids, cache, log_prob
def is_not_finished(i, hit_eos, *_):
finished = i >= decode_length
if not force_decode_length:
finished |= tf.reduce_all(hit_eos)
return tf.logical_not(finished)
decoded_ids = tf.zeros([batch_size, decode_length], dtype=tf.int64)
hit_eos = tf.fill([batch_size], False)
next_id = sos_id * tf.ones([batch_size, 1], dtype=tf.int64)
initial_log_prob = tf.zeros([batch_size], dtype=tf.float32)
def compute_cache_shape_invariants(tensor):
return tf.TensorShape(tensor.shape.as_list())
_, _, _, decoded_ids, _, log_prob = tf.while_loop(
is_not_finished,
inner_loop, [
tf.constant(0), hit_eos, next_id, decoded_ids, cache,
initial_log_prob
],
shape_invariants=[
tf.TensorShape([]),
tf.TensorShape([batch_size]),
tf.TensorShape([batch_size, 1]),
tf.TensorShape([batch_size, decode_length]),
nest.map_structure(compute_cache_shape_invariants, cache),
tf.TensorShape([batch_size]),
])
scores = log_prob
return {"outputs": decoded_ids, "scores": scores}
def fast_decode(encoder_output,
encoder_decoder_attention_bias,
symbols_to_logits_fn,
hparams,
decode_length,
vocab_size,
init_cache_fn=_init_transformer_cache,
beam_size=1,
top_beams=1,
alpha=1.0,
sos_id=0,
eos_id=beam_search.EOS_ID,
batch_size=None,
force_decode_length=False,
scope_prefix="body/",
cache=None):
"""Given encoder output and a symbols to logits function, does fast decoding.
Implements both greedy and beam search decoding, uses beam search iff
beam_size > 1, otherwise beam search related arguments are ignored.
Args:
encoder_output: Output from encoder.
encoder_decoder_attention_bias: a bias tensor for use in encoder-decoder
attention
symbols_to_logits_fn: Incremental decoding; function mapping triple `(ids,
step, cache)` to symbol logits.
hparams: run hyperparameters
decode_length: an integer. How many additional timesteps to decode.
vocab_size: Output vocabulary size.
init_cache_fn: Function that returns the initial cache dict.
beam_size: number of beams.
top_beams: an integer. How many of the beams to return.
alpha: Float that controls the length penalty. larger the alpha, stronger
the preference for longer translations.
sos_id: End-of-sequence symbol in beam search.
eos_id: End-of-sequence symbol in beam search.
batch_size: an integer scalar - must be passed if there is no input
force_decode_length: bool, whether to force the full decode length, or if
False, stop when all beams hit eos_id.
scope_prefix: str, prefix for decoder layer variable scopes.
cache: cache dictionary for additional predictions.
Returns:
A dict of decoding results {
"outputs": integer `Tensor` of decoded ids of shape
[batch_size, <= decode_length] if top_beams == 1 or
[batch_size, top_beams, <= decode_length] otherwise
"scores": decoding log probs from the beam search,
None if using greedy decoding (beam_size=1)
}
Raises:
NotImplementedError: If beam size > 1 with partial targets.
"""
if encoder_output is not None:
batch_size = common_layers.shape_list(encoder_output)[0]
cache = init_cache_fn(
cache=cache,
hparams=hparams,
batch_size=batch_size,
attention_init_length=0,
encoder_output=encoder_output,
encoder_decoder_attention_bias=encoder_decoder_attention_bias,
scope_prefix=scope_prefix)
if beam_size > 1: # Beam Search
initial_ids = sos_id * tf.ones([batch_size], dtype=tf.int32)
decoded_ids, scores, cache = beam_search.beam_search(
symbols_to_logits_fn,
initial_ids,
beam_size,
decode_length,
vocab_size,
alpha,
states=cache,
eos_id=eos_id,
stop_early=(top_beams == 1))
if top_beams == 1:
decoded_ids = decoded_ids[:, 0, 1:]
scores = scores[:, 0]
else:
decoded_ids = decoded_ids[:, :top_beams, 1:]
scores = scores[:, :top_beams]
else: # Greedy
def inner_loop(i, hit_eos, next_id, decoded_ids, cache, log_prob):
"""One step of greedy decoding."""
logits, cache = symbols_to_logits_fn(next_id, i, cache)
log_probs = common_layers.log_prob_from_logits(logits)
temperature = getattr(hparams, "sampling_temp", 0.0)
keep_top = getattr(hparams, "sampling_keep_top_k", -1)
if hparams.sampling_method == "argmax":
temperature = 0.0
next_id = common_layers.sample_with_temperature(
logits, temperature, keep_top)
log_prob_indices = tf.stack([tf.range(tf.to_int64(batch_size)), next_id],
axis=1)
log_prob += tf.gather_nd(
log_probs, log_prob_indices) * (1 - tf.to_float(hit_eos))
# Note(thangluong): we purposely update hit_eos after aggregating log_prob
# There is a subtle detail here that we want to include log_probs up to
# (and inclusive of) the first eos generated, but not subsequent tokens.
hit_eos |= tf.equal(next_id, eos_id)
next_id = tf.expand_dims(next_id, axis=1)
decoded_ids = tf.concat([decoded_ids, next_id], axis=1)
return i + 1, hit_eos, next_id, decoded_ids, cache, log_prob
def is_not_finished(i, hit_eos, *_):
finished = i >= decode_length
if not force_decode_length:
finished |= tf.reduce_all(hit_eos)
return tf.logical_not(finished)
decoded_ids = tf.zeros([batch_size, 0], dtype=tf.int64)
hit_eos = tf.fill([batch_size], False)
next_id = sos_id * tf.ones([batch_size, 1], dtype=tf.int64)
initial_log_prob = tf.zeros([batch_size], dtype=tf.float32)
_, _, _, decoded_ids, cache, log_prob = tf.while_loop(
is_not_finished,
inner_loop, [
tf.constant(0), hit_eos, next_id, decoded_ids, cache,
initial_log_prob
],
shape_invariants=[
tf.TensorShape([]),
tf.TensorShape([None]),
tf.TensorShape([None, None]),
tf.TensorShape([None, None]),
nest.map_structure(beam_search.get_state_shape_invariants, cache),
tf.TensorShape([None]),
])
scores = log_prob
return {"outputs": decoded_ids, "scores": scores, "cache": cache}
@registry.register_model
class TransformerScorer(Transformer):
"""Transformer model, but only scores in PREDICT mode.
Checkpoints between Transformer and TransformerScorer are interchangeable.
"""
def __init__(self, *args, **kwargs):
super(TransformerScorer, self).__init__(*args, **kwargs)
self._name = "transformer"
self._base_name = "transformer"
def infer(self,
features=None,
decode_length=50,
beam_size=1,
top_beams=1,
alpha=0.0,
use_tpu=False):
"""Returns the targets and their log probabilities."""
del decode_length, beam_size, top_beams, alpha, use_tpu
assert features is not None
# Run the model
self.hparams.force_full_predict = True
with tf.variable_scope(self.name):
logits, _ = self.model_fn(features)
assert len(logits.shape) == 5 # [batch, time, 1, 1, vocab]
logits = tf.squeeze(logits, [2, 3])
# Compute the log probabilities
log_probs = common_layers.log_prob_from_logits(logits)
targets = features["targets"]
assert len(targets.shape) == 4 # [batch, time, 1, 1]
targets = tf.squeeze(targets, [2, 3])
# Slice out the log_probs of the targets
log_probs = common_layers.index_last_dim_with_indices(log_probs, targets)
# Sum over time to get the log_prob of the sequence
scores = tf.reduce_sum(log_probs, axis=1)
return {"outputs": targets, "scores": scores}
@registry.register_model
class TransformerEncoder(t2t_model.T2TModel):
"""Transformer, encoder only."""
def body(self, features):
hparams = self._hparams
inputs = features["inputs"]
target_space = features["target_space_id"]
inputs = common_layers.flatten4d3d(inputs)
(encoder_input, encoder_self_attention_bias, _) = (
transformer_prepare_encoder(inputs, target_space, hparams))
encoder_input = tf.nn.dropout(encoder_input,
1.0 - hparams.layer_prepostprocess_dropout)
encoder_output = transformer_encoder(
encoder_input,
encoder_self_attention_bias,
hparams,
nonpadding=features_to_nonpadding(features, "inputs"))
encoder_output = tf.expand_dims(encoder_output, 2)
return encoder_output
@registry.register_model
class TransformerRegressor(TransformerEncoder):
"""Transformer inheriting from Encoder, for the regression problem.
Final result is a tensor that has a shape of (?, 1, 1, 1).
"""
def top(self, body_output, features):
"""Computes single scalar value from body_output."""
with tf.variable_scope("reg_top_ffn"):
x = body_output
x = tf.reduce_mean(x, axis=[1, 2], keepdims=True)
res = tf.layers.dense(x, 1, name="model_top")
return res
def features_to_nonpadding(features, inputs_or_targets="inputs"):
key = inputs_or_targets + "_segmentation"
if features and key in features:
return tf.minimum(tf.to_float(features[key]), 1.0)
return None
def transformer_prepare_decoder(targets, hparams, features=None, pad=None):
"""Prepare one shard of the model for the decoder.
Args:
targets: a Tensor.
hparams: run hyperparameters
features: optionally pass the entire features dictionary as well. This is
needed now for "packed" datasets.
pad: vector to use for padding when shifting targets right
Returns:
decoder_input: a Tensor, bottom of decoder stack
decoder_self_attention_bias: a bias tensor for use in decoder self-attention
"""
if hparams.causal_decoder_self_attention:
# Causal attention.
if hparams.prepend_mode == "prepend_inputs_full_attention":
decoder_self_attention_bias = (
common_attention.attention_bias_prepend_inputs_full_attention(
common_attention.embedding_to_padding(targets)))
else:
decoder_self_attention_bias = (
common_attention.attention_bias_lower_triangle(
common_layers.shape_list(targets)[1]))
else:
# Full attention.
decoder_padding = common_attention.embedding_to_padding(targets)
decoder_self_attention_bias = (
common_attention.attention_bias_ignore_padding(decoder_padding))
if features and "targets_segmentation" in features:
# "Packed" dataset - keep the examples from seeing each other.
targets_segmentation = features["targets_segmentation"]
targets_position = features["targets_position"]
decoder_self_attention_bias += common_attention.attention_bias_same_segment(
targets_segmentation, targets_segmentation)
else:
targets_position = None
if hparams.proximity_bias:
decoder_self_attention_bias += common_attention.attention_bias_proximal(
common_layers.shape_list(targets)[1])
decoder_input = common_layers.shift_right_3d(targets, pad)
if hparams.pos == "timing":
if targets_position is not None:
decoder_input = common_attention.add_timing_signal_1d_given_position(
decoder_input, targets_position)
else:
decoder_input = common_attention.add_timing_signal_1d(decoder_input)
elif hparams.pos == "emb":
decoder_input = common_attention.add_positional_embedding(
decoder_input, hparams.max_length, "targets_positional_embedding",
targets_position)
if hparams.activation_dtype == "bfloat16":
decoder_self_attention_bias = tf.cast(decoder_self_attention_bias,
tf.bfloat16)
return (decoder_input, decoder_self_attention_bias)
def transformer_decoder_layer(decoder_input,
decoder_self_attention_bias,
layer_idx,
hparams,
encoder_output=None,
encoder_decoder_attention_bias=None,
cache=None,
decode_loop_step=None,
nonpadding=None,
save_weights_to=None,
make_image_summary=False,
losses=None,
layer_collection=None,
recurrent_memory_by_layer=None,
chunk_number=None):
"""A single transformer decoder layer."""
x = decoder_input
layer = layer_idx
layer_name = "layer_%d" % layer
layer_cache = cache[layer_name] if cache is not None else None
attention_dropout_broadcast_dims = (
common_layers.comma_separated_string_to_integer_list(
getattr(hparams, "attention_dropout_broadcast_dims", "")))
if recurrent_memory_by_layer is not None:
recurrent_memory = recurrent_memory_by_layer[layer_name]
else:
recurrent_memory = None
if layer < hparams.get("num_area_layers", 0):
max_area_width = hparams.get("max_area_width", 1)
max_area_height = hparams.get("max_area_height", 1)
memory_height = hparams.get("max_area_height", 1)
else:
max_area_width = 1
max_area_height = 1
memory_height = 1
with tf.variable_scope(layer_name):
with tf.variable_scope("self_attention"):
y = common_attention.multihead_attention(
common_layers.layer_preprocess(
x, hparams, layer_collection=layer_collection),
None,
decoder_self_attention_bias,
hparams.attention_key_channels or hparams.hidden_size,
hparams.attention_value_channels or hparams.hidden_size,
hparams.hidden_size,
hparams.num_heads,
hparams.attention_dropout,
attention_type=hparams.self_attention_type,
max_relative_position=hparams.max_relative_position,
heads_share_relative_embedding=(
hparams.heads_share_relative_embedding),
add_relative_to_values=hparams.add_relative_to_values,
save_weights_to=save_weights_to,
cache=layer_cache,
make_image_summary=make_image_summary,
dropout_broadcast_dims=attention_dropout_broadcast_dims,
max_length=hparams.get("max_length"),
decode_loop_step=decode_loop_step,
vars_3d=hparams.get("attention_variables_3d"),
activation_dtype=hparams.get("activation_dtype", "float32"),
weight_dtype=hparams.get("weight_dtype", "float32"),
layer_collection=layer_collection,
recurrent_memory=recurrent_memory,
chunk_number=chunk_number,
hard_attention_k=hparams.get("hard_attention_k", 0),
gumbel_noise_weight=hparams.get("gumbel_noise_weight", 0.0),
max_area_width=max_area_width,
max_area_height=max_area_height,
memory_height=memory_height,
area_key_mode=hparams.get("area_key_mode", "none"),
area_value_mode=hparams.get("area_value_mode", "none"),
training=(hparams.get(
"mode",
tf.estimator.ModeKeys.TRAIN) == tf.estimator.ModeKeys.TRAIN))
x = common_layers.layer_postprocess(x, y, hparams)
if encoder_output is not None:
with tf.variable_scope("encdec_attention"):
y = common_attention.multihead_attention(
common_layers.layer_preprocess(
x, hparams, layer_collection=layer_collection),
encoder_output,
encoder_decoder_attention_bias,
hparams.attention_key_channels or hparams.hidden_size,
hparams.attention_value_channels or hparams.hidden_size,
hparams.hidden_size,
hparams.num_heads,
hparams.attention_dropout,
max_relative_position=hparams.max_relative_position,
heads_share_relative_embedding=(
hparams.heads_share_relative_embedding),
add_relative_to_values=hparams.add_relative_to_values,
save_weights_to=save_weights_to,
cache=layer_cache,
make_image_summary=make_image_summary,
dropout_broadcast_dims=attention_dropout_broadcast_dims,
max_length=hparams.get("max_length"),
vars_3d=hparams.get("attention_variables_3d"),
activation_dtype=hparams.get("activation_dtype", "float32"),
weight_dtype=hparams.get("weight_dtype", "float32"),
layer_collection=layer_collection,
hard_attention_k=hparams.get("hard_attention_k", 0),
gumbel_noise_weight=hparams.get("gumbel_noise_weight", 0.0),
max_area_width=max_area_width,
max_area_height=max_area_height,
memory_height=memory_height,
area_key_mode=hparams.get("area_key_mode", "none"),
area_value_mode=hparams.get("area_value_mode", "none"),
training=(hparams.get(
"mode",
tf.estimator.ModeKeys.TRAIN) == tf.estimator.ModeKeys.TRAIN))
x = common_layers.layer_postprocess(x, y, hparams)
with tf.variable_scope("ffn"):
y = transformer_ffn_layer(
common_layers.layer_preprocess(
x, hparams, layer_collection=layer_collection),
hparams,
conv_padding="LEFT",
nonpadding_mask=nonpadding,
losses=losses,
cache=layer_cache,
decode_loop_step=decode_loop_step,
layer_collection=layer_collection)
x = common_layers.layer_postprocess(x, y, hparams)
return x
def transformer_decoder(decoder_input,
encoder_output,
decoder_self_attention_bias,
encoder_decoder_attention_bias,
hparams,
cache=None,
decode_loop_step=None,
name="decoder",
nonpadding=None,
save_weights_to=None,
make_image_summary=True,
losses=None,
layer_collection=None,
recurrent_memory_by_layer=None,
chunk_number=None):
"""A stack of transformer layers.
Args:
decoder_input: a Tensor
encoder_output: a Tensor
decoder_self_attention_bias: bias Tensor for self-attention (see
common_attention.attention_bias())
encoder_decoder_attention_bias: bias Tensor for encoder-decoder attention
(see common_attention.attention_bias())
hparams: hyperparameters for model
cache: dict, containing tensors which are the results of previous
attentions, used for fast decoding.
decode_loop_step: An integer, step number of the decoding loop. Only used
for inference on TPU.
name: a string
nonpadding: optional Tensor with shape [batch_size, encoder_length]
indicating what positions are not padding. This is used to mask out
padding in convolutional layers. We generally only need this mask for
"packed" datasets, because for ordinary datasets, no padding is ever
followed by nonpadding.
save_weights_to: an optional dictionary to capture attention weights for
visualization; the weights tensor will be appended there under a string
key created from the variable scope (including name).
make_image_summary: Whether to make an attention image summary.
losses: optional list onto which to append extra training losses
layer_collection: A tensorflow_kfac.LayerCollection. Only used by the KFAC
optimizer. Default is None.
recurrent_memory_by_layer: Optional dict, mapping layer names to instances
of transformer_memory.RecurrentMemory. Default is None.
chunk_number: an optional integer Tensor with shape [batch] used to operate
the recurrent_memory.
Returns:
y: a Tensors
"""
x = decoder_input
mlperf_log.transformer_print(
key=mlperf_log.MODEL_HP_NUM_HIDDEN_LAYERS,
value=hparams.num_decoder_layers or hparams.num_hidden_layers,
hparams=hparams)
mlperf_log.transformer_print(
key=mlperf_log.MODEL_HP_ATTENTION_DROPOUT,
value=hparams.attention_dropout,
hparams=hparams)
mlperf_log.transformer_print(
key=mlperf_log.MODEL_HP_ATTENTION_DENSE,
value={
"use_bias": "false",
"num_heads": hparams.num_heads,
"hidden_size": hparams.hidden_size
},
hparams=hparams)
with tf.variable_scope(name):
for layer_idx in range(hparams.num_decoder_layers or
hparams.num_hidden_layers):
x = transformer_decoder_layer(
x,
decoder_self_attention_bias,
layer_idx,
hparams,
encoder_decoder_attention_bias=encoder_decoder_attention_bias,
encoder_output=encoder_output,
cache=cache,
decode_loop_step=decode_loop_step,
nonpadding=nonpadding,
save_weights_to=save_weights_to,
make_image_summary=make_image_summary,
losses=losses,
layer_collection=layer_collection,
recurrent_memory_by_layer=recurrent_memory_by_layer,
chunk_number=chunk_number
)
# if normalization is done in layer_preprocess, then it should also be done
# on the output, since the output can grow very large, being the sum of
# a whole stack of unnormalized layer outputs.
mlperf_log.transformer_print(
key=mlperf_log.MODEL_HP_NORM,
value={"hidden_size": hparams.hidden_size})
return common_layers.layer_preprocess(
x, hparams, layer_collection=layer_collection)
@registry.register_model
class TransformerMemory(Transformer):
"""Transformer language model with memory across chunks."""
# TODO(kitaev): consider overriding set_mode to swap out recurrent memory when
# switching between training and evaluation.
def __init__(self, *args, **kwargs):
super(TransformerMemory, self).__init__(*args, **kwargs)
hparams = self._hparams
self.recurrent_memory_by_layer = {}
for layer in range(hparams.num_decoder_layers or hparams.num_hidden_layers):
layer_name = "layer_%d" % layer
if hparams.memory_type == "neural_memory":
memory = transformer_memory.TransformerMemory(
batch_size=int(hparams.batch_size / hparams.max_length),
key_depth=hparams.hidden_size,
val_depth=hparams.hidden_size,
memory_size=hparams.split_targets_chunk_length,
sharpen_factor=1.,
name=layer_name + "/recurrent_memory")
elif hparams.memory_type == "transformer_xl":
memory = transformer_memory.RecentTokensMemory(
layer_name + "/recurrent_memory", hparams)
else:
raise ValueError("Unsupported memory type: %s" % hparams.memory_type)
self.recurrent_memory_by_layer[layer_name] = memory
@property
def has_input(self):
if hasattr(self._hparams, "unconditional") and self._hparams.unconditional:
return False
return super(TransformerMemory, self).has_input
def _beam_decode(self, features, decode_length, beam_size, top_beams, alpha,
use_tpu=False):
"""Overriding beam search because for now only the slow version works with
memory
"""
return self._beam_decode_slow(features, decode_length, beam_size,
top_beams, alpha, use_tpu)
@registry.register_hparams
def transformer_base_v1():
"""Set of hyperparameters."""
hparams = common_hparams.basic_params1()
hparams.norm_type = "layer"
hparams.hidden_size = 512
hparams.batch_size = 4096
hparams.max_length = 256
hparams.clip_grad_norm = 0. # i.e. no gradient clipping
hparams.optimizer_adam_epsilon = 1e-9
hparams.learning_rate_schedule = "legacy"
hparams.learning_rate_decay_scheme = "noam"
hparams.learning_rate = 0.1
hparams.learning_rate_warmup_steps = 4000
hparams.initializer_gain = 1.0
hparams.num_hidden_layers = 6
hparams.initializer = "uniform_unit_scaling"
hparams.weight_decay = 0.0
hparams.optimizer_adam_beta1 = 0.9
hparams.optimizer_adam_beta2 = 0.98
hparams.num_sampled_classes = 0
hparams.label_smoothing = 0.1
hparams.shared_embedding_and_softmax_weights = True
hparams.symbol_modality_num_shards = 16
# Add new ones like this.
hparams.add_hparam("filter_size", 2048)
# Layer-related flags. If zero, these fall back on hparams.num_hidden_layers.
hparams.add_hparam("num_encoder_layers", 0)
hparams.add_hparam("num_decoder_layers", 0)
# Attention-related flags.
hparams.add_hparam("num_heads", 8)
hparams.add_hparam("attention_key_channels", 0)
hparams.add_hparam("attention_value_channels", 0)
hparams.add_hparam("ffn_layer", "dense_relu_dense")
hparams.add_hparam("parameter_attention_key_channels", 0)
hparams.add_hparam("parameter_attention_value_channels", 0)
# All hyperparameters ending in "dropout" are automatically set to 0.0
# when not in training mode.
hparams.add_hparam("attention_dropout", 0.0)
hparams.add_hparam("attention_dropout_broadcast_dims", "")
hparams.add_hparam("relu_dropout", 0.0)
hparams.add_hparam("relu_dropout_broadcast_dims", "")
hparams.add_hparam("pos", "timing") # timing, none
hparams.add_hparam("nbr_decoder_problems", 1)
hparams.add_hparam("proximity_bias", False)
hparams.add_hparam("causal_decoder_self_attention", True)
hparams.add_hparam("use_pad_remover", True)
hparams.add_hparam("self_attention_type", "dot_product")
hparams.add_hparam("conv_first_kernel", 3)
hparams.add_hparam("attention_variables_3d", False)
hparams.add_hparam("use_target_space_embedding", True)
# These parameters are only used when ffn_layer=="local_moe_tpu"
hparams.add_hparam("moe_overhead_train", 1.0)
hparams.add_hparam("moe_overhead_eval", 2.0)
hparams.moe_num_experts = 16
hparams.moe_loss_coef = 1e-3
# If specified, use this value instead of problem name in metrics.py.
# This is useful for programs that can automatically compare experiments side
# by side based on the same metric names.
hparams.add_hparam("overload_eval_metric_name", "")
# For making a transformer encoder unidirectional by using masked
# attention.
hparams.add_hparam("unidirectional_encoder", False)
# For hard attention.
hparams.add_hparam("hard_attention_k", 0)
hparams.add_hparam("gumbel_noise_weight", 0.0)
return hparams
@registry.register_hparams
def transformer_base_v2():
"""Set of hyperparameters."""
hparams = transformer_base_v1()
hparams.layer_preprocess_sequence = "n"
hparams.layer_postprocess_sequence = "da"
hparams.layer_prepostprocess_dropout = 0.1
hparams.attention_dropout = 0.1
hparams.relu_dropout = 0.1
hparams.learning_rate_warmup_steps = 8000
hparams.learning_rate = 0.2
return hparams
@registry.register_hparams
def transformer_base_vq_ada_32ex_packed():
"""Set of hyperparameters for lm1b packed following tpu params."""
hparams = transformer_base_v2()
expert_utils.update_hparams_for_vq_gating(hparams)
hparams.moe_num_experts = 32
hparams.gating_type = "vq"
# this gives us a batch size of 16 because each seq is len 256
hparams.batch_size = 5072
hparams.ffn_layer = "local_moe"
hparams.shared_embedding_and_softmax_weights = False
hparams.learning_rate_warmup_steps = 10000
# one epoch for languagemodel_lm1b32k_packed = 27200 steps w/ bsize 128
hparams.learning_rate_decay_steps = 27200
hparams.num_heads = 4
hparams.num_blocks = 1
hparams.moe_k = 1
hparams.num_decoder_layers = 6
hparams.label_smoothing = 0.
hparams.layer_prepostprocess_dropout = 0.1
hparams.layer_postprocess_sequence = "dan"
hparams.layer_preprocess_sequence = "none"
hparams.weight_decay = 1e-06
hparams.attention_dropout = 0.1
hparams.optimizer = "Adafactor"
hparams.learning_rate_schedule = "linear_warmup*rsqrt_decay*linear_decay"
hparams.activation_dtype = "float32"
hparams.learning_rate = 0.1
hparams.learning_rate_constant = 1.0
return hparams
@registry.register_hparams
def transformer_topk_16_packed():
hparams = transformer_base_vq_ada_32ex_packed()
hparams.gating_type = "topk"
hparams.moe_num_experts = 16
hparams.moe_k = 2
return hparams
@registry.register_hparams
def transformer_base_vq1_16_nb1_packed_nda_b01_scales():
"""Set of hyperparameters."""
hparams = transformer_base_vq_ada_32ex_packed()
hparams.use_scales = int(True)
hparams.moe_num_experts = 16
hparams.moe_k = 1
hparams.beta = 0.1
hparams.layer_preprocess_sequence = "n"
hparams.layer_postprocess_sequence = "da"
hparams.ema = False
return hparams
@registry.register_hparams
def transformer_base_vq1_16_nb1_packed_dan_b01_scales():
"""Set of hyperparameters."""
hparams = transformer_base_vq_ada_32ex_packed()
hparams.use_scales = int(True)
hparams.moe_num_experts = 16
hparams.moe_k = 1
hparams.beta = 0.1
hparams.ema = False
return hparams
@registry.register_hparams
def transformer_base_vq1_16_nb1_packed_nda_b01_scales_dialog():
"""Set of hyperparameters."""
hparams = transformer_base_vq1_16_nb1_packed_nda_b01_scales()
hparams.batch_size = 2048
hparams.max_length = 1024
hparams.filter_size = 3072
return hparams
@registry.register_hparams
def transformer_ada_lmpackedbase():
"""Set of hyperparameters."""
hparams = transformer_base_vq_ada_32ex_packed()
hparams.ffn_layer = "dense_relu_dense"
return hparams
@registry.register_hparams
def transformer_ada_lmpackedbase_dialog():
"""Set of hyperparameters."""
hparams = transformer_base_vq_ada_32ex_packed()
hparams.max_length = 1024
hparams.ffn_layer = "dense_relu_dense"
hparams.batch_size = 4096
return hparams
@registry.register_hparams
def transformer_ada_lmpackedbase_relative():
"""Set of hyperparameters."""
hparams = transformer_base_vq_ada_32ex_packed()
hparams.ffn_layer = "dense_relu_dense"
return hparams
@registry.register_hparams
def transformer_base_v3():
"""Base parameters for Transformer model."""
# Update parameters here, then occasionally cut a versioned set, e.g.
# transformer_base_v2.
hparams = transformer_base_v2()
hparams.optimizer_adam_beta2 = 0.997
# New way of specifying learning rate schedule.
# Equivalent to previous version.
hparams.learning_rate_schedule = (
"constant*linear_warmup*rsqrt_decay*rsqrt_hidden_size")
hparams.learning_rate_constant = 2.0
return hparams
@registry.register_hparams
def transformer_base():
"""Base parameters for Transformer model."""
hparams = transformer_base_v3()
return hparams
@registry.register_hparams
def transformer_big():
"""HParams for transformer big model on WMT."""
hparams = transformer_base()
hparams.hidden_size = 1024
hparams.filter_size = 4096
# Reduce batch size to 2048 from 4096 to be able to train the model on a GPU
# with 12 GB memory. For example, NVIDIA TITAN V GPU.
hparams.batch_size = 2048
hparams.num_heads = 16
hparams.layer_prepostprocess_dropout = 0.3
return hparams
@registry.register_hparams
def transformer_tall():
"""Hparams for transformer on LM for pretraining/finetuning/mixing."""
hparams = transformer_base()
hparams.batch_size = 2048
hparams.hidden_size = 768
hparams.filter_size = 3072
hparams.num_hidden_layers = 12
hparams.num_heads = 12
hparams.label_smoothing = 0.0
hparams.max_length = 1024
hparams.eval_drop_long_sequences = True
hparams.multiproblem_mixing_schedule = "pretrain"
hparams.multiproblem_vocab_size = 65536
hparams.clip_grad_norm = 1.0
return hparams
@registry.register_hparams
def transformer_tall_finetune_tied():
"""Tied means fine-tune CNN/DM summarization as LM."""
hparams = transformer_tall()
hparams.multiproblem_max_input_length = 750
hparams.multiproblem_max_target_length = 100
hparams.multiproblem_schedule_max_examples = 0
hparams.learning_rate_schedule = ("linear_warmup*constant*cosdecay")
hparams.learning_rate_constant = 5e-5
hparams.learning_rate_warmup_steps = 100
# Set train steps to learning_rate_decay_steps or less
hparams.learning_rate_decay_steps = 80000
hparams.multiproblem_target_eval_only = True
hparams.multiproblem_reweight_label_loss = True
hparams.multiproblem_label_weight = 1.0
hparams.optimizer = "true_adam"
return hparams
@registry.register_hparams
def transformer_tall_train_tied():
"""Tied means train CNN/DM summarization as LM."""
hparams = transformer_tall()
hparams.multiproblem_max_input_length = 750
hparams.multiproblem_max_target_length = 100
hparams.multiproblem_schedule_max_examples = 0
hparams.learning_rate_schedule = ("linear_warmup*constant*cosdecay")
hparams.learning_rate_constant = 2e-4
hparams.learning_rate_warmup_steps = 8000
# Set train steps to learning_rate_decay_steps or less
hparams.learning_rate_decay_steps = 150000
hparams.multiproblem_target_eval_only = True
hparams.multiproblem_reweight_label_loss = True
hparams.multiproblem_label_weight = 1.0
hparams.optimizer = "true_adam"
return hparams
@registry.register_hparams
def transformer_tall_finetune_uniencdec():
"""Fine-tune CNN/DM with a unidirectional encoder and decoder."""
hparams = transformer_tall()
hparams.max_input_seq_length = 750
hparams.max_target_seq_length = 100
hparams.optimizer = "true_adam"
hparams.learning_rate_schedule = ("linear_warmup*constant*cosdecay")
hparams.learning_rate_decay_steps = 80000
hparams.learning_rate_constant = 5e-5
hparams.learning_rate_warmup_steps = 100
hparams.unidirectional_encoder = True
return hparams
@registry.register_hparams
def transformer_tall_train_uniencdec():
"""Train CNN/DM with a unidirectional encoder and decoder."""
hparams = transformer_tall()
hparams.max_input_seq_length = 750
hparams.max_target_seq_length = 100
hparams.optimizer = "true_adam"
hparams.learning_rate_schedule = ("linear_warmup*constant*cosdecay")
hparams.learning_rate_decay_steps = 150000
hparams.learning_rate_constant = 2e-4
hparams.unidirectional_encoder = True
return hparams
@registry.register_hparams
def transformer_tall_finetune_textclass():
"""Hparams for transformer on LM for finetuning on text class problems."""
hparams = transformer_tall()
hparams.learning_rate_constant = 6.25e-5
hparams.learning_rate_schedule = ("linear_warmup*constant*linear_decay")
hparams.multiproblem_schedule_max_examples = 0
hparams.multiproblem_target_eval_only = True
hparams.learning_rate_warmup_steps = 50
# Set train steps to learning_rate_decay_steps or less
hparams.learning_rate_decay_steps = 25000
hparams.multiproblem_reweight_label_loss = True
hparams.multiproblem_label_weight = 0.95
return hparams
@registry.register_hparams
def transformer_tall_pretrain_lm():
"""Hparams for transformer on LM pretraining (with 64k vocab)."""
hparams = transformer_tall()
hparams.learning_rate_constant = 2e-4
hparams.learning_rate_schedule = ("linear_warmup*constant*cosdecay")
hparams.optimizer = "adam_w"
hparams.optimizer_adam_beta1 = 0.9
hparams.optimizer_adam_beta2 = 0.999
hparams.optimizer_adam_epsilon = 1e-8
# Set max examples to something big when pretraining only the LM, definitely
# something an order of magnitude bigger than number of train steps.
hparams.multiproblem_schedule_max_examples = 5e8
# Set train steps to learning_rate_decay_steps or less
hparams.learning_rate_decay_steps = 5000000
return hparams
@registry.register_hparams
def transformer_tall_pretrain_lm_tpu_adafactor():
"""Hparams for transformer on LM pretraining (with 64k vocab) on TPU."""
hparams = transformer_tall_pretrain_lm()
update_hparams_for_tpu(hparams)
hparams.max_length = 1024
# For multi-problem on TPU we need it in absolute examples.
hparams.batch_size = 8
hparams.multiproblem_vocab_size = 2**16
return hparams
@registry.register_hparams
def transformer_tall_pretrain_lm_tpu_adafactor_large():
"""Hparams for transformer on LM pretraining on TPU, large model."""
hparams = transformer_tall_pretrain_lm_tpu_adafactor()
hparams.hidden_size = 1024
hparams.num_heads = 16
hparams.filter_size = 32768 # max fitting in 16G memory is 49152, batch 2
hparams.batch_size = 4
hparams.multiproblem_mixing_schedule = "constant"
# Task order: lm/en-de/en-fr/en-ro/de-en/fr-en/ro-en/cnndm/mnli/squad.
hparams.multiproblem_per_task_threshold = "320,80,160,1,80,160,2,20,10,5"
return hparams
@registry.register_hparams
def transformer_tall_pretrain_lm_tpu():
"""Hparams for transformer on LM pretraining on TPU with AdamW."""
hparams = transformer_tall_pretrain_lm_tpu_adafactor()
# Optimizer gets reset in update_hparams_for_tpu so we set it again here.
hparams.learning_rate_constant = 2e-4
hparams.learning_rate_schedule = ("linear_warmup * constant * cosdecay")
hparams.optimizer = "adam_w"
return hparams
@registry.register_hparams
def transformer_tall_big():
"""Hparams for transformer on LM+MNLI."""
hparams = transformer_tall()
hparams.num_hidden_layers = 18
return hparams
@registry.register_hparams
def transformer_big_single_gpu():
"""HParams for transformer big model for single GPU."""
hparams = transformer_big()
hparams.layer_prepostprocess_dropout = 0.1
hparams.learning_rate_warmup_steps = 16000
return hparams
@registry.register_hparams
def transformer_base_single_gpu():
"""HParams for transformer base model for single GPU."""
hparams = transformer_base()
hparams.batch_size = 1024
hparams.learning_rate_schedule = "constant*linear_warmup*rsqrt_decay"
hparams.learning_rate_constant = 0.1
hparams.learning_rate_warmup_steps = 16000
return hparams
@registry.register_hparams
def transformer_base_multistep8():
"""HParams for simulating 8 GPUs with MultistepAdam optimizer."""
hparams = transformer_base()
hparams.optimizer = "multistep_adam"
hparams.optimizer_multistep_accumulate_steps = 8
return hparams
@registry.register_hparams
def transformer_parsing_base():
"""HParams for parsing on WSJ only."""
hparams = transformer_base()
hparams.attention_dropout = 0.2
hparams.layer_prepostprocess_dropout = 0.2
hparams.max_length = 512
hparams.learning_rate_warmup_steps = 16000
hparams.hidden_size = 1024
hparams.learning_rate = 0.05
hparams.shared_embedding_and_softmax_weights = False
return hparams
@registry.register_hparams
def transformer_parsing_big():
"""HParams for parsing on WSJ semi-supervised."""
hparams = transformer_big()
hparams.max_length = 512
hparams.shared_source_target_embedding = False
hparams.learning_rate_warmup_steps = 4000
hparams.layer_prepostprocess_dropout = 0.1
hparams.batch_size = 2048
hparams.learning_rate = 0.05
return hparams
@registry.register_hparams
def transformer_parsing_ice():
"""HParams for parsing and tagging Icelandic text."""
hparams = transformer_base_single_gpu()
hparams.batch_size = 4096
hparams.shared_embedding_and_softmax_weights = False
return hparams
@registry.register_hparams
def transformer_tiny():
hparams = transformer_base()
hparams.num_hidden_layers = 2
hparams.hidden_size = 128
hparams.filter_size = 512
hparams.num_heads = 4
return hparams
@registry.register_hparams
def transformer_test():
hparams = transformer_base()
hparams.num_hidden_layers = 2
hparams.hidden_size = 16
hparams.filter_size = 8
hparams.num_heads = 2
return hparams
@registry.register_hparams
def transformer_small():
hparams = transformer_base()
hparams.num_hidden_layers = 2
hparams.hidden_size = 256
hparams.filter_size = 1024
hparams.num_heads = 4
return hparams
@registry.register_hparams
def transformer_l2():
hparams = transformer_base()
hparams.num_hidden_layers = 2
return hparams
@registry.register_hparams
def transformer_l4():
hparams = transformer_base()
hparams.num_hidden_layers = 4
return hparams
@registry.register_hparams
def transformer_l8():
hparams = transformer_base()
hparams.num_hidden_layers = 8
return hparams
@registry.register_hparams
def transformer_l10():
hparams = transformer_base()
hparams.num_hidden_layers = 10
return hparams
@registry.register_hparams
def transformer_h1():
hparams = transformer_base()
hparams.num_heads = 1
return hparams
@registry.register_hparams
def transformer_h4():
hparams = transformer_base()
hparams.num_heads = 4
return hparams
@registry.register_hparams
def transformer_h16():
hparams = transformer_base()
hparams.num_heads = 16
return hparams
@registry.register_hparams
def transformer_h32():
hparams = transformer_base()
hparams.num_heads = 32
return hparams
@registry.register_hparams
def transformer_k128():
hparams = transformer_base()
hparams.attention_key_channels = 128
return hparams
@registry.register_hparams
def transformer_k256():
hparams = transformer_base()
hparams.attention_key_channels = 256
return hparams
@registry.register_hparams
def transformer_ff1024():
hparams = transformer_base()
hparams.filter_size = 1024
return hparams
@registry.register_hparams
def transformer_ff4096():
hparams = transformer_base()
hparams.filter_size = 4096
return hparams
@registry.register_hparams
def transformer_dr0():
hparams = transformer_base()
hparams.layer_prepostprocess_dropout = 0.0
return hparams
@registry.register_hparams
def transformer_dr2():
hparams = transformer_base()
hparams.layer_prepostprocess_dropout = 0.2
return hparams
@registry.register_hparams
def transformer_ls0():
hparams = transformer_base()
hparams.label_smoothing = 0.0
return hparams
@registry.register_hparams
def transformer_ls2():
hparams = transformer_base()
hparams.label_smoothing = 0.2
return hparams
@registry.register_hparams
def transformer_hs256():
hparams = transformer_base()
hparams.hidden_size = 256
return hparams
@registry.register_hparams
def transformer_hs1024():
hparams = transformer_base()
hparams.hidden_size = 1024
return hparams
@registry.register_hparams
def transformer_big_dr1():
hparams = transformer_base()
hparams.hidden_size = 1024
hparams.filter_size = 4096
hparams.num_heads = 16
hparams.layer_prepostprocess_dropout = 0.1
return hparams
@registry.register_hparams
def transformer_big_enfr():
hparams = transformer_big_dr1()
hparams.shared_embedding_and_softmax_weights = False
hparams.filter_size = 8192
hparams.layer_prepostprocess_dropout = 0.1
return hparams
@registry.register_hparams
def transformer_big_enfr_tpu():
hparams = transformer_big_enfr()
# For performance, use fewer heads so that matrix dimensions are at least 128
hparams.num_heads = 8
update_hparams_for_tpu(hparams)
return hparams
@registry.register_hparams
def transformer_big_dr2():
hparams = transformer_big_dr1()
hparams.layer_prepostprocess_dropout = 0.2
return hparams
@registry.register_hparams
def transformer_parameter_attention_a():
hparams = transformer_base()
hparams.ffn_layer = "parameter_attention"
hparams.filter_size = 1536
return hparams
@registry.register_hparams
def transformer_parameter_attention_b():
hparams = transformer_base()
hparams.ffn_layer = "parameter_attention"
hparams.filter_size = 512
hparams.parameter_attention_key_channels = 1024
hparams.parameter_attention_value_channels = 1024
hparams.num_heads = 16
return hparams
@registry.register_hparams
def transformer_prepend_v2():
hparams = transformer_base_v2()
hparams.prepend_mode = "prepend_inputs_masked_attention"
hparams.max_length = 0
return hparams
@registry.register_hparams
def transformer_prepend_v1():
hparams = transformer_base_v1()
hparams.prepend_mode = "prepend_inputs_masked_attention"
hparams.max_length = 0
return hparams
@registry.register_hparams
def transformer_prepend():
return transformer_prepend_v2()
@registry.register_ranged_hparams
def transformer_base_range(rhp):
"""Small range of hyperparameters."""
# After starting from base, set intervals for some parameters.
rhp.set_float("learning_rate", 0.3, 3.0, scale=rhp.LOG_SCALE)
rhp.set_discrete("learning_rate_warmup_steps",
[1000, 2000, 4000, 8000, 16000])
rhp.set_float("initializer_gain", 0.5, 2.0)
rhp.set_float("optimizer_adam_beta1", 0.85, 0.95)
rhp.set_float("optimizer_adam_beta2", 0.97, 0.99)
rhp.set_float("weight_decay", 0.0, 1e-4)
@registry.register_hparams
def transformer_relative():
"""Use relative position embeddings instead of absolute position encodings."""
hparams = transformer_base()
hparams.pos = None
hparams.self_attention_type = "dot_product_relative"
hparams.max_relative_position = 20
return hparams
@registry.register_hparams
def transformer_relative_tiny():
hparams = transformer_relative()
hparams.num_hidden_layers = 2
hparams.hidden_size = 128
hparams.filter_size = 512
hparams.num_heads = 4
return hparams
@registry.register_hparams
def transformer_relative_big():
hparams = transformer_big()
hparams.pos = None
hparams.self_attention_type = "dot_product_relative"
hparams.max_relative_position = 20
return hparams
@registry.register_hparams
def transformer_timeseries():
hparams = transformer_small()
hparams.batch_size = 256
hparams.learning_rate_warmup_steps = 2000
return hparams
@registry.register_hparams
def transformer_mlperf_tpu():
"""HParams for Transformer model on TPU for MLPerf on TPU 2x2."""
hparams = transformer_base_v3()
hparams.mlperf_mode = True
hparams.symbol_modality_num_shards = 1
hparams.max_length = 256 # ignored when using "_packed" problems
hparams.batch_size = 2048 # per-chip batch size matches the reference model
hparams.hidden_size = 1024
hparams.filter_size = 4096
hparams.num_heads = 16
hparams.attention_dropout_broadcast_dims = "0,1" # batch, heads
hparams.relu_dropout_broadcast_dims = "1" # length
hparams.layer_prepostprocess_dropout_broadcast_dims = "1" # length
return hparams
def update_hparams_for_tpu(hparams):
"""Change hparams to be compatible with TPU training."""
# Adafactor uses less memory than Adam.
# switch to Adafactor with its recommended learning rate scheme.
hparams.optimizer = "Adafactor"
hparams.learning_rate_schedule = "rsqrt_decay"
hparams.learning_rate_warmup_steps = 10000
# Avoid an expensive concat on TPU.
# >1 shards helps with faster parameter distribution on multi-GPU machines
hparams.symbol_modality_num_shards = 1
# Adaptive batch sizes and sequence lengths are not supported on TPU.
# Instead, every batch has the same sequence length and the same batch size.
# Longer sequences are dropped and shorter ones are padded.
#
# It is therefore suggested to use a problem where examples have been combined
# to a longer length, e.g. the "_packed" problems.
#
# For problems with variable sequence lengths, this parameter controls the
# maximum sequence length. Shorter sequences are dropped and longer ones
# are padded.
#
# For problems with fixed sequence lengths - e.g. the "_packed" problems,
# this hyperparameter is ignored.
hparams.max_length = 64
# TPUs have less memory than GPUs, so decrease the batch size if it's too high
if hparams.batch_size > 2048:
hparams.batch_size = 2048
# Using noise broadcast in the dropout layers saves memory during training.
hparams.attention_dropout_broadcast_dims = "0,1" # batch, heads
hparams.relu_dropout_broadcast_dims = "1" # length
hparams.layer_prepostprocess_dropout_broadcast_dims = "1" # length
return hparams
@registry.register_hparams
def transformer_tpu():
"""HParams for Transformer model on TPU."""
hparams = transformer_base()
update_hparams_for_tpu(hparams)
return hparams
@registry.register_hparams
def transformer_timeseries_tpu():
"""HParams for running Transformer model on timeseries on TPU."""
hparams = transformer_timeseries()
update_hparams_for_tpu(hparams)
hparams.batch_size = 256 # revert to value set in transformer_timeseries
return hparams
@registry.register_hparams
def transformer_tpu_bf16_activation():
"""HParams for Transformer model with BF16 activation on TPU."""
hparams = transformer_tpu()
hparams.activation_dtype = "bfloat16"
return hparams
@registry.register_hparams
def transformer_fairseq_fp16_activation_big():
"""Hparams intended to mirror those used in arxiv.org/pdf/1806.00187.pdf."""
hparams = transformer_big()
hparams.activation_dtype = "float16"
hparams.batch_size = 3584
return hparams
@registry.register_hparams
def transformer_packed_tpu():
"""Deprecated alias for transformer_tpu()."""
return transformer_tpu()
@registry.register_hparams
def transformer_big_tpu():
hparams = transformer_big()
update_hparams_for_tpu(hparams)
return hparams
@registry.register_hparams
def transformer_tiny_tpu():
hparams = transformer_tiny()
update_hparams_for_tpu(hparams)
return hparams
@registry.register_ranged_hparams
def transformer_tiny_tpu_range(rhp):
"""Small range of hyperparameters."""
rhp.set_float("learning_rate", 0.3, 3.0, scale=rhp.LOG_SCALE)
rhp.set_float("weight_decay", 0.0, 2.0)
@registry.register_ranged_hparams
def transformer_tpu_range(rhp):
"""Small range of hyperparameters."""
# After starting from base, set intervals for some parameters.
rhp.set_float("learning_rate", 0.3, 3.0, scale=rhp.LOG_SCALE)
rhp.set_discrete("learning_rate_warmup_steps",
[1000, 2000, 4000, 8000, 16000])
rhp.set_float("initializer_gain", 0.5, 2.0)
rhp.set_float("optimizer_adam_beta1", 0.85, 0.95)
rhp.set_float("optimizer_adam_beta2", 0.97, 0.99)
rhp.set_float("weight_decay", 0.0, 2.0)
@registry.register_hparams
def transformer_small_tpu():
"""TPU-friendly version of transformer_small.
Returns:
an hparams object.
"""
hparams = transformer_small()
update_hparams_for_tpu(hparams)
return hparams
@registry.register_hparams
def transformer_clean():
"""No dropout, label smoothing, max_length."""
hparams = transformer_base_v2()
hparams.label_smoothing = 0.0
hparams.layer_prepostprocess_dropout = 0.0
hparams.attention_dropout = 0.0
hparams.relu_dropout = 0.0
hparams.max_length = 0
return hparams
@registry.register_hparams
def transformer_clean_big():
hparams = transformer_clean()
hparams.hidden_size = 1024
hparams.filter_size = 4096
return hparams
@registry.register_hparams
def transformer_clean_big_tpu():
hparams = transformer_clean_big()
update_hparams_for_tpu(hparams)
return hparams
@registry.register_hparams
def transformer_tpu_with_conv():
"""Cut down on the number of heads, and use convs instead."""
hparams = transformer_tpu()
hparams.num_heads = 4 # Heads are expensive on TPUs.
hparams.ffn_layer = "conv_relu_conv"
return hparams
@registry.register_hparams
def transformer_lm_tpu_0():
"""HParams for training languagemodel_lm1b8k on tpu. 92M Params."""
hparams = transformer_clean_big()
update_hparams_for_tpu(hparams)
hparams.num_heads = 4 # Heads are expensive on TPUs.
hparams.batch_size = 4096
hparams.shared_embedding_and_softmax_weights = False
hparams.layer_prepostprocess_dropout = 0.1
return hparams
@registry.register_hparams
def transformer_lm_tpu_1():
"""HParams for training languagemodel_lm1b8k on tpu. 335M Params."""
hparams = transformer_lm_tpu_0()
hparams.hidden_size = 2048
hparams.filter_size = 8192
return hparams
@registry.register_hparams
def transformer_librispeech_v1():
"""HParams for training ASR model on LibriSpeech V1."""
hparams = transformer_base()
hparams.num_heads = 4
hparams.filter_size = 1024
hparams.hidden_size = 256
hparams.num_encoder_layers = 5
hparams.num_decoder_layers = 3
hparams.learning_rate = 0.15
hparams.batch_size = 6000000
librispeech.set_librispeech_length_hparams(hparams)
return hparams
@registry.register_hparams
def transformer_librispeech_v2():
"""HParams for training ASR model on LibriSpeech V2."""
hparams = transformer_base()
hparams.max_length = 1240000
hparams.max_input_seq_length = 1550
hparams.max_target_seq_length = 350
hparams.batch_size = 16
hparams.num_decoder_layers = 4
hparams.num_encoder_layers = 6
hparams.hidden_size = 384
hparams.learning_rate = 0.15
hparams.daisy_chain_variables = False
hparams.filter_size = 1536
hparams.num_heads = 2
hparams.ffn_layer = "conv_relu_conv"
hparams.conv_first_kernel = 9
hparams.weight_decay = 0
hparams.layer_prepostprocess_dropout = 0.2
hparams.relu_dropout = 0.2
return hparams
@registry.register_hparams
def transformer_librispeech_tpu_v1():
"""HParams for training ASR model on Librispeech on TPU v1."""
hparams = transformer_librispeech_v1()
update_hparams_for_tpu(hparams)
hparams.batch_size = 16
librispeech.set_librispeech_length_hparams(hparams)
return hparams
@registry.register_hparams
def transformer_librispeech_tpu_v2():
"""HParams for training ASR model on Librispeech on TPU v2."""
hparams = transformer_librispeech_v2()
update_hparams_for_tpu(hparams)
hparams.batch_size = 16
librispeech.set_librispeech_length_hparams(hparams)
return hparams
@registry.register_hparams
def transformer_librispeech():
"""HParams for training ASR model on Librispeech."""
return transformer_librispeech_v2()
@registry.register_hparams
def transformer_librispeech_tpu():
"""HParams for training ASR model on Librispeech on TPU."""
return transformer_librispeech_tpu_v2()
@registry.register_hparams
def transformer_common_voice():
"""HParams for training ASR model on Mozilla Common Voice."""
return transformer_librispeech()
@registry.register_hparams
def transformer_common_voice_tpu():
"""HParams for training ASR model on Mozilla Common Voice on TPU."""
hparams = transformer_librispeech_tpu()
hparams.batch_size = 8
return hparams
@registry.register_hparams
def transformer_supervised_attention():
"""HParams for supervised attention problems."""
hparams = transformer_base()
# Attention loss type (KL-divergence or MSE).
hparams.add_hparam("expected_attention_loss_type", "kl_divergence")
# Multiplier to the encoder-decoder expected attention loss.
hparams.add_hparam("expected_attention_loss_multiplier", 1.0)
return hparams
@registry.register_hparams
def transformer_tpu_1b():
"""Hparams for machine translation with ~1.1B parameters."""
hparams = transformer_tpu()
hparams.hidden_size = 2048
hparams.filter_size = 8192
hparams.num_hidden_layers = 8
# smaller batch size to avoid OOM
hparams.batch_size = 1024
hparams.activation_dtype = "bfloat16"
hparams.weight_dtype = "bfloat16"
# maximize number of parameters relative to computation by not sharing.
hparams.shared_embedding_and_softmax_weights = False
return hparams
@registry.register_hparams
def transformer_wikitext103_l4k_v0():
"""HParams for training languagemodel_wikitext103_l4k."""
hparams = transformer_big()
# Adafactor uses less memory than Adam.
# switch to Adafactor with its recommended learning rate scheme.
hparams.optimizer = "Adafactor"
hparams.learning_rate_schedule = "rsqrt_decay"
hparams.learning_rate_warmup_steps = 10000
hparams.num_heads = 4
hparams.max_length = 4096
hparams.batch_size = 4096
hparams.shared_embedding_and_softmax_weights = False
hparams.num_hidden_layers = 8
hparams.attention_dropout = 0.1
hparams.layer_prepostprocess_dropout = 0.2
hparams.relu_dropout = 0.1
hparams.label_smoothing = 0.0
# Using noise broadcast in the dropout layers saves memory during training.
hparams.attention_dropout_broadcast_dims = "0,1" # batch, heads
hparams.relu_dropout_broadcast_dims = "1" # length
hparams.layer_prepostprocess_dropout_broadcast_dims = "1" # length
# Avoid an expensive concat on TPU.
# >1 shards helps with faster parameter distribution on multi-GPU machines
hparams.symbol_modality_num_shards = 1
return hparams
@registry.register_hparams
def transformer_wikitext103_l4k_memory_v0():
"""HParams for training languagemodel_wikitext103_l4k with memory."""
hparams = transformer_wikitext103_l4k_v0()
hparams.split_targets_chunk_length = 64
hparams.split_targets_max_chunks = 64
hparams.split_targets_strided_training = True
hparams.add_hparam("memory_type", "transformer_xl")
# The hparams specify batch size *before* chunking, but we want to have a
# consistent 4K batch size *after* chunking to fully utilize the hardware.
target_tokens_per_batch = 4096
hparams.batch_size = int(target_tokens_per_batch * (
hparams.max_length / hparams.split_targets_chunk_length)) # 262144
hparams.pos = None
hparams.self_attention_type = "dot_product_relative"
hparams.max_relative_position = 2 * hparams.split_targets_chunk_length
hparams.add_hparam("unconditional", True)
hparams.add_hparam("recurrent_memory_batch_size", 0) # 0 = try to guess
# By default, cache one chunk only (like Transformer-XL)
hparams.add_hparam("num_memory_items", hparams.split_targets_chunk_length)
return hparams
@registry.register_hparams
def transformer_wikitext103_l16k_memory_v0():
"""HParams for training languagemodel_wikitext103_l16k with memory."""
hparams = transformer_wikitext103_l4k_memory_v0()
hparams.max_length = 16384
hparams.split_targets_chunk_length = 64
hparams.split_targets_max_chunks = int(
hparams.max_length / hparams.split_targets_chunk_length)
# The hparams specify batch size *before* chunking, but we want to have a
# consistent 4K batch size *after* chunking to fully utilize the hardware.
target_tokens_per_batch = 4096
hparams.batch_size = int(target_tokens_per_batch * (
hparams.max_length / hparams.split_targets_chunk_length))
hparams.max_relative_position = 2 * hparams.split_targets_chunk_length
return hparams
@registry.register_hparams
def transformer_cifar10_memory_v0():
"""HParams for training image_cifar10_plain_gen_flat_rev with memory."""
hparams = transformer_wikitext103_l4k_memory_v0()
hparams.num_hidden_layers = 6
hparams.max_length = 32 * 32 * 3
hparams.split_targets_chunk_length = 64 * 3
hparams.split_targets_max_chunks = int(
hparams.max_length / hparams.split_targets_chunk_length)
hparams.num_memory_items = 128 * 3
# Since this is an image problem, batch size refers to examples (not tokens)
target_images_per_batch = 4
hparams.batch_size = int(target_images_per_batch * (
hparams.max_length / hparams.split_targets_chunk_length))
# The recurrent memory needs to know the actual batch size (in sequences)
hparams.recurrent_memory_batch_size = hparams.batch_size
hparams.max_relative_position = (
hparams.num_memory_items + hparams.split_targets_chunk_length)
return hparams
@registry.register_hparams
def transformer_imagenet64_memory_v0():
"""HParams for training image_imagenet64_gen_flat_rev with memory."""
hparams = transformer_cifar10_memory_v0()
hparams.max_length = 64 * 64 * 3
hparams.split_targets_chunk_length = 64 * 3
hparams.split_targets_max_chunks = int(
hparams.max_length / hparams.split_targets_chunk_length)
hparams.num_memory_items = 128 * 3
# Since this is an image problem, batch size refers to examples (not tokens)
target_images_per_batch = 2
hparams.batch_size = int(target_images_per_batch * (
hparams.max_length / hparams.split_targets_chunk_length))
# The recurrent memory needs to know the actual batch size (in sequences)
hparams.recurrent_memory_batch_size = hparams.batch_size
hparams.max_relative_position = 3072
return hparams
| [
"[email protected]"
] | |
ae0708cc0342891c5fb01dace708ffbc954432d3 | e23a4f57ce5474d468258e5e63b9e23fb6011188 | /021_module_collection/namedtuple/_exercises/namedtuple_002_Other Ways to Specify Field Names_template.py | 49045eb80045bf9453e1f2c3957cb3e8c0bb1e29 | [] | no_license | syurskyi/Python_Topics | 52851ecce000cb751a3b986408efe32f0b4c0835 | be331826b490b73f0a176e6abed86ef68ff2dd2b | refs/heads/master | 2023-06-08T19:29:16.214395 | 2023-05-29T17:09:11 | 2023-05-29T17:09:11 | 220,583,118 | 3 | 2 | null | 2023-02-16T03:08:10 | 2019-11-09T02:58:47 | Python | UTF-8 | Python | false | false | 1,796 | py | # from collections ____ n_t_
#
# # Other Ways to Specify Field Names
# # There are a number of ways we can specify the field names for the named tuple:
# # we can provide a sequence of strings containing each property name
# # we can provide a single string with property names separated by whitespace or a comma
#
# Circle _ n_t_('Circle' 'center_x' 'center_y' 'radius'
# circle_1 _ C_ 0 0 10
# circle_2 _ C_ c._x_10 c._y_20 r.._100
# print c_1
# # Circle(center_x=0, center_y=0, radius=10)
#
# print c.._2
# # Circle(center_x=10, center_y=20, radius=100)
#
# # Or we can do it this way:
#
# City _ n_t_ 'City' 'name country population'
# new_york _ C__ 'New York' 'USA' 8_500_000
# print(n._y.
# # City(name='New York', country='USA', population=8500000)
#
# # This would work equally well:
#
# Stock _ n_t_ 'Stock' 'symbol, year, month, day, open, high, low, close'
# djia _ S.. 'DJIA', 2018, 1, 25, 26_313, 26_458, 26_260, 26_393
# print d...
# # Stock(symbol='DJIA', year=2018, month=1, day=25, open=26313, high=26458, low=26260, close=26393)
#
# # In fact, since whitespace can be used we can even use a multi-line string!
#
# Stock _ n_t_ 'Stock', '''symbol
# year month day
# open high low close'''
# djia _ S__ 'DJIA', 2018, 1, 25, 26_313, 26_458, 26_260, 26_393
# print d..
# # Stock(symbol='DJIA', year=2018, month=1, day=25, open=26313, high=26458, low=26260, close=26393)
#
# # Accessing Items in a Named Tuple
# # The major advantage of named tuples are that, as the name suggests, we can access the properties (fields)
# # of the tuple by name:
#
# # pt1 # ERROR NameError: name 'pt1' is not defined
# # pt1.x
# # 10
#
# print c.._1
# # Circle(center_x=0, center_y=0, radius=10)
#
# print c.._1.r..
# # 10
#
| [
"[email protected]"
] | |
40ad9596e5995a4fe0ac18d2fdc15a8855e1aa5a | bd62843278ffc297ef8f6d75a931f1f4ca4caaa7 | /exercises/friends_family/ff_dictionary/friends_dk_mod.py | e7db5bf1184a32c5d7919883f7ca191f03d5850c | [] | no_license | raysmith619/Introduction-To-Programming | d3bae042b4fc17bd56e8631a4d660233d8cd165b | bedc16eb5f6db0ad3b313355df6d51b5161c3835 | refs/heads/master | 2023-07-19T08:43:41.229893 | 2023-07-15T19:22:28 | 2023-07-15T19:22:28 | 132,622,195 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,228 | py | #friends_dk_mod.py 13Oct2020 crs
# Adapted from friends_mod.py
"""
A friends "module" which can be used by other programs
via from friends_mod import *
"""
my_friends = {} # Initialize dictionary of friends(names) as an empty list
def list_friends():
""" list friends
"""
nf = 0 # Count of number listed so far
print("friends: ", end="")
for fr_name in my_friends:
if nf > 0:
print(", ", end="") # Separate after first
print(fr_name, end="") # On one line
nf += 1
print() # Add newline end of list
def test_list_friends():
""" Testing list_friends
"""
global my_friends # REQUIRED to allow us to modify
# variable outside function
print("\n=============\ntest_list_friends")
my_friends = {"fa":1, "fb":1, "fc": 1}
list_friends()
def add_one_friend(friend):
""" Adds one friend to our list
:friend: friend's name
"""
global my_friends # REQUIRED to allow us to modify
# variable outside function
print("add_one_friend(",
friend, ")", sep="")
my_friends[friend] = friend # Add to list (replaces)
list_friends()
def add_friends(*friends):
""" Add zero or more friends
:*friends: zero or more friend names
"""
print("\nadd_friends(", *friends, ")") # passing on list to print
for friend in friends: # comma separated args become list
add_one_friend(friend)
def is_friend(possible):
""" Check if possible is a friend, that is in my_friends
:possible: name of possible friend
:returns: True if possible is a friend
"""
if possible in my_friends:
return True # possible is in list
return False # Not in list
"""
Do testing
"""
def test_add_one_friend():
""" Test, or atleast exercise, add_one_friend function
"""
global my_friends # REQUIRED to allow us to modify
# variable outside function
print("\n=============\ntest_add_one_friend")
my_friends = {} # Start test with empty
add_one_friend("tom")
add_one_friend("joe")
def test_add_friends():
""" Test, or atleast exercise, add_one_friend function
"""
global my_friends # REQUIRED to allow us to modify
# variable outside function
print("\n=============\ntest_add_friends()")
my_friends = {} # Start test with empty
add_friends("tom")
add_friends("joe", "mary", "ray")
def test_is_friend_ck(possible, expect=True):
""" Helper function check if test passes
:possible: possible friend
:expect: expected value (True,False)
default: True if not present
"""
print("test_is_friend_ck:", possible, "expect=", expect, end="")
result = is_friend(possible)
if result == expect:
print(" Passed Test")
else:
print(" FAILED Test result=", result, "expected=", expect)
def test_is_friend():
""" Test is_friend function
"""
global my_friends # REQUIRED to allow us to modify
# variable outside function
print("\n=============\ntest_is_friend()")
print("Set up friends list")
my_friends = {} # Start test with empty
add_friends("joe", "mary", "ray")
print("Check function")
test_is_friend_ck("joe") # Check if True as expected
test_is_friend_ck("marty", expect=False) # Check if False
test_is_friend_ck("mary", expect=True) # Ck if True explicit
print("Test the testing - this should fail the test.")
test_is_friend_ck("alex") # Should fail this!
"""
This type of test can be placed
in a module to facilitate "self-testing"
because it gets executed if/when the file gets
run by itself
"""
if __name__ == "__main__":
print("Self test", __file__)
test_list_friends()
test_add_one_friend()
test_add_friends()
test_is_friend()
| [
"[email protected]"
] | |
a125fabf6f8c28a59be672821e363432a14a3230 | 7b34cecc88b257752af3682ce198adb529f22f52 | /httoop/codecs/text/html.py | 7afddd060b630b351f19a225b0bf9d040b9adc0b | [
"MIT"
] | permissive | spaceone/httoop | c6a3c61f6b01804fa004e3ea1d46c5417b110f08 | bd170d5de030ff855d76b688f3e9d9d4f717a30a | refs/heads/master | 2023-01-05T18:36:09.904939 | 2023-01-03T20:57:56 | 2023-01-03T20:57:56 | 9,329,173 | 17 | 10 | null | 2015-06-20T21:55:10 | 2013-04-09T19:02:18 | HTML | UTF-8 | Python | false | false | 107 | py | # -*- coding: utf-8 -*-
from httoop.codecs.codec import Codec
class HTML(Codec):
mimetype = 'text/html'
| [
"[email protected]"
] | |
de46d2d1fb94ab7a5c96224c56459fe16cb981cf | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02716/s082816838.py | 81eab4bb1d4a40d68a77f483f2899018f648da49 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 529 | py | N = int(input())
A = list(map(int, input().split()))
DP_odd = [0, 0, A[0]]
DP_even = [0, max(A[0], A[1])]
if N >= 3:
DP_odd = [DP_even[0], max(DP_odd[1] + A[2], DP_even[1]), DP_odd[2] + A[2]]
for i in range(3, N):
if (i + 1) % 2 == 1:
DP_odd = [max(DP_odd[0] + A[i], DP_even[0]), max(DP_odd[1] + A[i], DP_even[1]), DP_odd[2] + A[i]]
else:
DP_even = [max(DP_even[0] + A[i], DP_odd[1]), max(DP_even[1] + A[i], DP_odd[2])]
if N % 2 == 1:
ans = DP_odd[1]
else:
ans = DP_even[1]
print(ans) | [
"[email protected]"
] | |
7840fe7574a34dded1c942db0fb65b5a62ba8699 | ed10dc841d5b4f6a038e8f24f603750992d9fae9 | /lldb/test/API/commands/settings/TestSettings.py | 23f4de05ea0cb8f300266e31b0d8010f1e670d70 | [
"NCSA",
"Apache-2.0",
"LLVM-exception"
] | permissive | WYK15/swift-Ollvm10 | 90c2f0ade099a1cc545183eba5c5a69765320401 | ea68224ab23470963b68dfcc28b5ac769a070ea3 | refs/heads/main | 2023-03-30T20:02:58.305792 | 2021-04-07T02:41:01 | 2021-04-07T02:41:01 | 355,189,226 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 32,795 | py | """
Test lldb settings command.
"""
import os
import re
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class SettingsCommandTestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
NO_DEBUG_INFO_TESTCASE = True
def test_apropos_should_also_search_settings_description(self):
"""Test that 'apropos' command should also search descriptions for the settings variables."""
self.expect("apropos 'environment variable'",
substrs=["target.env-vars",
"environment variables",
"executable's environment"])
def test_append_target_env_vars(self):
"""Test that 'append target.run-args' works."""
# Append the env-vars.
self.runCmd('settings append target.env-vars MY_ENV_VAR=YES')
# And add hooks to restore the settings during tearDown().
self.addTearDownHook(
lambda: self.runCmd("settings clear target.env-vars"))
# Check it immediately!
self.expect('settings show target.env-vars',
substrs=['MY_ENV_VAR=YES'])
def test_insert_before_and_after_target_run_args(self):
"""Test that 'insert-before/after target.run-args' works."""
# Set the run-args first.
self.runCmd('settings set target.run-args a b c')
# And add hooks to restore the settings during tearDown().
self.addTearDownHook(
lambda: self.runCmd("settings clear target.run-args"))
# Now insert-before the index-0 element with '__a__'.
self.runCmd('settings insert-before target.run-args 0 __a__')
# And insert-after the index-1 element with '__A__'.
self.runCmd('settings insert-after target.run-args 1 __A__')
# Check it immediately!
self.expect('settings show target.run-args',
substrs=['target.run-args',
'[0]: "__a__"',
'[1]: "a"',
'[2]: "__A__"',
'[3]: "b"',
'[4]: "c"'])
@expectedFailureAll(oslist=["windows"], bugnumber="llvm.org/pr44430")
def test_replace_target_run_args(self):
"""Test that 'replace target.run-args' works."""
# Set the run-args and then replace the index-0 element.
self.runCmd('settings set target.run-args a b c')
# And add hooks to restore the settings during tearDown().
self.addTearDownHook(
lambda: self.runCmd("settings clear target.run-args"))
# Now replace the index-0 element with 'A', instead.
self.runCmd('settings replace target.run-args 0 A')
# Check it immediately!
self.expect('settings show target.run-args',
substrs=['target.run-args (arguments) =',
'[0]: "A"',
'[1]: "b"',
'[2]: "c"'])
def test_set_prompt(self):
"""Test that 'set prompt' actually changes the prompt."""
# Set prompt to 'lldb2'.
self.runCmd("settings set prompt 'lldb2 '")
# Immediately test the setting.
self.expect("settings show prompt", SETTING_MSG("prompt"),
startstr='prompt (string) = "lldb2 "')
# The overall display should also reflect the new setting.
self.expect("settings show", SETTING_MSG("prompt"),
substrs=['prompt (string) = "lldb2 "'])
# Use '-r' option to reset to the original default prompt.
self.runCmd("settings clear prompt")
def test_set_term_width(self):
"""Test that 'set term-width' actually changes the term-width."""
self.runCmd("settings set term-width 70")
# Immediately test the setting.
self.expect("settings show term-width", SETTING_MSG("term-width"),
startstr="term-width (int) = 70")
# The overall display should also reflect the new setting.
self.expect("settings show", SETTING_MSG("term-width"),
substrs=["term-width (int) = 70"])
# rdar://problem/10712130
@skipIf(oslist=["windows"], bugnumber="llvm.org/pr44431")
def test_set_frame_format(self):
"""Test that 'set frame-format' with a backtick char in the format string works as well as fullpath."""
self.build()
exe = self.getBuildArtifact("a.out")
self.runCmd("file " + exe, CURRENT_EXECUTABLE_SET)
def cleanup():
self.runCmd(
"settings set frame-format %s" %
self.format_string, check=False)
# Execute the cleanup function during test case tear down.
self.addTearDownHook(cleanup)
self.runCmd("settings show frame-format")
m = re.match(
'^frame-format \(format-string\) = "(.*)\"$',
self.res.GetOutput())
self.assertTrue(m, "Bad settings string")
self.format_string = m.group(1)
# Change the default format to print function.name rather than
# function.name-with-args
format_string = "frame #${frame.index}: ${frame.pc}{ ${module.file.basename}\`${function.name}{${function.pc-offset}}}{ at ${line.file.fullpath}:${line.number}}{, lang=${language}}\n"
self.runCmd("settings set frame-format %s" % format_string)
# Immediately test the setting.
self.expect("settings show frame-format", SETTING_MSG("frame-format"),
substrs=[format_string])
self.runCmd("breakpoint set -n main")
self.runCmd("process launch --working-dir '{0}'".format(self.get_process_working_directory()),
RUN_SUCCEEDED)
self.expect("thread backtrace",
substrs=["`main", self.getSourceDir()])
def test_set_auto_confirm(self):
"""Test that after 'set auto-confirm true', manual confirmation should not kick in."""
self.build()
exe = self.getBuildArtifact("a.out")
self.runCmd("file " + exe, CURRENT_EXECUTABLE_SET)
self.runCmd("settings set auto-confirm true")
# Immediately test the setting.
self.expect("settings show auto-confirm", SETTING_MSG("auto-confirm"),
startstr="auto-confirm (boolean) = true")
# Now 'breakpoint delete' should just work fine without confirmation
# prompt from the command interpreter.
self.runCmd("breakpoint set -n main")
self.expect("breakpoint delete",
startstr="All breakpoints removed")
# Restore the original setting of auto-confirm.
self.runCmd("settings clear auto-confirm")
self.expect("settings show auto-confirm", SETTING_MSG("auto-confirm"),
startstr="auto-confirm (boolean) = false")
@skipIf(archs=no_match(['x86_64', 'i386', 'i686']))
def test_disassembler_settings(self):
"""Test that user options for the disassembler take effect."""
self.build()
exe = self.getBuildArtifact("a.out")
self.runCmd("file " + exe, CURRENT_EXECUTABLE_SET)
# AT&T syntax
self.runCmd("settings set target.x86-disassembly-flavor att")
self.runCmd("settings set target.use-hex-immediates false")
self.expect("disassemble -n numberfn",
substrs=["$90"])
self.runCmd("settings set target.use-hex-immediates true")
self.runCmd("settings set target.hex-immediate-style c")
self.expect("disassemble -n numberfn",
substrs=["$0x5a"])
self.runCmd("settings set target.hex-immediate-style asm")
self.expect("disassemble -n numberfn",
substrs=["$5ah"])
# Intel syntax
self.runCmd("settings set target.x86-disassembly-flavor intel")
self.runCmd("settings set target.use-hex-immediates false")
self.expect("disassemble -n numberfn",
substrs=["90"])
self.runCmd("settings set target.use-hex-immediates true")
self.runCmd("settings set target.hex-immediate-style c")
self.expect("disassemble -n numberfn",
substrs=["0x5a"])
self.runCmd("settings set target.hex-immediate-style asm")
self.expect("disassemble -n numberfn",
substrs=["5ah"])
@skipIfDarwinEmbedded # <rdar://problem/34446098> debugserver on ios etc can't write files
def test_run_args_and_env_vars(self):
self.do_test_run_args_and_env_vars(use_launchsimple=False)
@skipIfDarwinEmbedded # <rdar://problem/34446098> debugserver on ios etc can't write files
def test_launchsimple_args_and_env_vars(self):
self.do_test_run_args_and_env_vars(use_launchsimple=True)
def do_test_run_args_and_env_vars(self, use_launchsimple):
"""Test that run-args and env-vars are passed to the launched process."""
self.build()
# Set the run-args and the env-vars.
# And add hooks to restore the settings during tearDown().
self.runCmd('settings set target.run-args A B C')
self.addTearDownHook(
lambda: self.runCmd("settings clear target.run-args"))
self.runCmd('settings set target.env-vars ["MY_ENV_VAR"]=YES')
self.addTearDownHook(
lambda: self.runCmd("settings clear target.env-vars"))
exe = self.getBuildArtifact("a.out")
self.runCmd("file " + exe, CURRENT_EXECUTABLE_SET)
target = self.dbg.GetTargetAtIndex(0)
launch_info = target.GetLaunchInfo()
found_env_var = False
for i in range(0, launch_info.GetNumEnvironmentEntries()):
if launch_info.GetEnvironmentEntryAtIndex(i) == "MY_ENV_VAR=YES":
found_env_var = True
break
self.assertTrue(found_env_var,
"MY_ENV_VAR was not set in LunchInfo object")
self.expect(
'target show-launch-environment',
substrs=["MY_ENV_VAR=YES"])
wd = self.get_process_working_directory()
if use_launchsimple:
process = target.LaunchSimple(None, None, wd)
self.assertTrue(process)
else:
self.runCmd("process launch --working-dir '{0}'".format(wd),
RUN_SUCCEEDED)
# Read the output file produced by running the program.
output = lldbutil.read_file_from_process_wd(self, "output2.txt")
self.expect(
output,
exe=False,
substrs=[
"argv[1] matches",
"argv[2] matches",
"argv[3] matches",
"Environment variable 'MY_ENV_VAR' successfully passed."])
# Check that env-vars overrides unset-env-vars.
self.runCmd('settings set target.unset-env-vars MY_ENV_VAR')
self.expect(
'target show-launch-environment',
'env-vars overrides unset-env-vars',
substrs=["MY_ENV_VAR=YES"])
wd = self.get_process_working_directory()
if use_launchsimple:
process = target.LaunchSimple(None, None, wd)
self.assertTrue(process)
else:
self.runCmd("process launch --working-dir '{0}'".format(wd),
RUN_SUCCEEDED)
# Read the output file produced by running the program.
output = lldbutil.read_file_from_process_wd(self, "output2.txt")
self.expect(
output,
exe=False,
substrs=[
"Environment variable 'MY_ENV_VAR' successfully passed."])
@skipIfRemote # it doesn't make sense to send host env to remote target
def test_pass_host_env_vars(self):
"""Test that the host env vars are passed to the launched process."""
self.build()
# Set some host environment variables now.
os.environ["MY_HOST_ENV_VAR1"] = "VAR1"
os.environ["MY_HOST_ENV_VAR2"] = "VAR2"
# This is the function to unset the two env variables set above.
def unset_env_variables():
os.environ.pop("MY_HOST_ENV_VAR1")
os.environ.pop("MY_HOST_ENV_VAR2")
self.addTearDownHook(unset_env_variables)
exe = self.getBuildArtifact("a.out")
self.runCmd("file " + exe, CURRENT_EXECUTABLE_SET)
# By default, inherit-env is 'true'.
self.expect(
'settings show target.inherit-env',
"Default inherit-env is 'true'",
startstr="target.inherit-env (boolean) = true")
self.expect(
'target show-launch-environment',
'Host environment is passed correctly',
substrs=['MY_HOST_ENV_VAR1=VAR1', 'MY_HOST_ENV_VAR2=VAR2'])
self.runCmd("process launch --working-dir '{0}'".format(self.get_process_working_directory()),
RUN_SUCCEEDED)
# Read the output file produced by running the program.
output = lldbutil.read_file_from_process_wd(self, "output1.txt")
self.expect(
output,
exe=False,
substrs=[
"The host environment variable 'MY_HOST_ENV_VAR1' successfully passed.",
"The host environment variable 'MY_HOST_ENV_VAR2' successfully passed."])
# Now test that we can prevent the inferior from inheriting the
# environment.
self.runCmd('settings set target.inherit-env false')
self.expect(
'target show-launch-environment',
'target.inherit-env affects `target show-launch-environment`',
matching=False,
substrs = ['MY_HOST_ENV_VAR1=VAR1', 'MY_HOST_ENV_VAR2=VAR2'])
self.runCmd("process launch --working-dir '{0}'".format(self.get_process_working_directory()),
RUN_SUCCEEDED)
# Read the output file produced by running the program.
output = lldbutil.read_file_from_process_wd(self, "output1.txt")
self.expect(
output,
exe=False,
matching=False,
substrs=[
"The host environment variable 'MY_HOST_ENV_VAR1' successfully passed.",
"The host environment variable 'MY_HOST_ENV_VAR2' successfully passed."])
# Now test that we can unset variables from the inherited environment.
self.runCmd('settings set target.inherit-env true')
self.runCmd('settings set target.unset-env-vars MY_HOST_ENV_VAR1')
self.runCmd("process launch --working-dir '{0}'".format(self.get_process_working_directory()),
RUN_SUCCEEDED)
# Read the output file produced by running the program.
output = lldbutil.read_file_from_process_wd(self, "output1.txt")
self.expect(
'target show-launch-environment',
'MY_HOST_ENV_VAR1 is unset, it shouldn\'t be in `target show-launch-environment`',
matching=False,
substrs = ['MY_HOST_ENV_VAR1=VAR1'])
self.expect(
'target show-launch-environment',
'MY_HOST_ENV_VAR2 shouldn be in `target show-launch-environment`',
substrs = ['MY_HOST_ENV_VAR2=VAR2'])
self.expect(
output,
exe=False,
matching=False,
substrs=[
"The host environment variable 'MY_HOST_ENV_VAR1' successfully passed."])
self.expect(
output,
exe=False,
substrs=[
"The host environment variable 'MY_HOST_ENV_VAR2' successfully passed."])
@skipIfDarwinEmbedded # <rdar://problem/34446098> debugserver on ios etc can't write files
def test_set_error_output_path(self):
"""Test that setting target.error/output-path for the launched process works."""
self.build()
exe = self.getBuildArtifact("a.out")
self.runCmd("file " + exe, CURRENT_EXECUTABLE_SET)
# Set the error-path and output-path and verify both are set.
self.runCmd("settings set target.error-path '{0}'".format(
lldbutil.append_to_process_working_directory(self, "stderr.txt")))
self.runCmd("settings set target.output-path '{0}".format(
lldbutil.append_to_process_working_directory(self, "stdout.txt")))
# And add hooks to restore the original settings during tearDown().
self.addTearDownHook(
lambda: self.runCmd("settings clear target.output-path"))
self.addTearDownHook(
lambda: self.runCmd("settings clear target.error-path"))
self.expect("settings show target.error-path",
SETTING_MSG("target.error-path"),
substrs=['target.error-path (file)', 'stderr.txt"'])
self.expect("settings show target.output-path",
SETTING_MSG("target.output-path"),
substrs=['target.output-path (file)', 'stdout.txt"'])
self.runCmd("process launch --working-dir '{0}'".format(self.get_process_working_directory()),
RUN_SUCCEEDED)
output = lldbutil.read_file_from_process_wd(self, "stderr.txt")
message = "This message should go to standard error."
if lldbplatformutil.hasChattyStderr(self):
self.expect(output, exe=False, substrs=[message])
else:
self.expect(output, exe=False, startstr=message)
output = lldbutil.read_file_from_process_wd(self, "stdout.txt")
self.expect(output, exe=False,
startstr="This message should go to standard out.")
def test_print_dictionary_setting(self):
self.runCmd("settings clear target.env-vars")
self.runCmd("settings set target.env-vars [\"MY_VAR\"]=some-value")
self.expect("settings show target.env-vars",
substrs=["MY_VAR=some-value"])
self.runCmd("settings clear target.env-vars")
def test_print_array_setting(self):
self.runCmd("settings clear target.run-args")
self.runCmd("settings set target.run-args gobbledy-gook")
self.expect("settings show target.run-args",
substrs=['[0]: "gobbledy-gook"'])
self.runCmd("settings clear target.run-args")
def test_settings_with_quotes(self):
self.runCmd("settings clear target.run-args")
self.runCmd("settings set target.run-args a b c")
self.expect("settings show target.run-args",
substrs=['[0]: "a"',
'[1]: "b"',
'[2]: "c"'])
self.runCmd("settings set target.run-args 'a b c'")
self.expect("settings show target.run-args",
substrs=['[0]: "a b c"'])
self.runCmd("settings clear target.run-args")
self.runCmd("settings clear target.env-vars")
self.runCmd(
'settings set target.env-vars ["MY_FILE"]="this is a file name with spaces.txt"')
self.expect("settings show target.env-vars",
substrs=['MY_FILE=this is a file name with spaces.txt'])
self.runCmd("settings clear target.env-vars")
# Test and make sure that setting "format-string" settings obeys quotes
# if they are provided
self.runCmd("settings set thread-format 'abc def' ")
self.expect("settings show thread-format",
'thread-format (format-string) = "abc def"')
self.runCmd('settings set thread-format "abc def" ')
self.expect("settings show thread-format",
'thread-format (format-string) = "abc def"')
# Make sure when no quotes are provided that we maintain any trailing
# spaces
self.runCmd('settings set thread-format abc def ')
self.expect("settings show thread-format",
'thread-format (format-string) = "abc def "')
self.runCmd('settings clear thread-format')
@expectedFailureAll(oslist=["windows"], bugnumber="llvm.org/pr44430")
def test_settings_with_trailing_whitespace(self):
# boolean
# Set to known value
self.runCmd("settings set target.skip-prologue true")
# Set to new value with trailing whitespace
self.runCmd("settings set target.skip-prologue false ")
# Make sure the setting was correctly set to "false"
self.expect(
"settings show target.skip-prologue",
SETTING_MSG("target.skip-prologue"),
startstr="target.skip-prologue (boolean) = false")
self.runCmd("settings clear target.skip-prologue", check=False)
# integer
self.runCmd("settings set term-width 70") # Set to known value
# Set to new value with trailing whitespaces
self.runCmd("settings set term-width 60 \t")
self.expect("settings show term-width", SETTING_MSG("term-width"),
startstr="term-width (int) = 60")
self.runCmd("settings clear term-width", check=False)
# string
self.runCmd("settings set target.arg0 abc") # Set to known value
# Set to new value with trailing whitespaces
self.runCmd("settings set target.arg0 cde\t ")
self.expect("settings show target.arg0", SETTING_MSG("target.arg0"),
startstr='target.arg0 (string) = "cde"')
self.runCmd("settings clear target.arg0", check=False)
# file
path1 = self.getBuildArtifact("path1.txt")
path2 = self.getBuildArtifact("path2.txt")
self.runCmd(
"settings set target.output-path %s" %
path1) # Set to known value
self.expect(
"settings show target.output-path",
SETTING_MSG("target.output-path"),
startstr='target.output-path (file) = ',
substrs=[path1])
self.runCmd("settings set target.output-path %s " %
path2) # Set to new value with trailing whitespaces
self.expect(
"settings show target.output-path",
SETTING_MSG("target.output-path"),
startstr='target.output-path (file) = ',
substrs=[path2])
self.runCmd("settings clear target.output-path", check=False)
# enum
# Set to known value
self.runCmd("settings set stop-disassembly-display never")
# Set to new value with trailing whitespaces
self.runCmd("settings set stop-disassembly-display always ")
self.expect(
"settings show stop-disassembly-display",
SETTING_MSG("stop-disassembly-display"),
startstr='stop-disassembly-display (enum) = always')
self.runCmd("settings clear stop-disassembly-display", check=False)
# language
# Set to known value
self.runCmd("settings set target.language c89")
# Set to new value with trailing whitespace
self.runCmd("settings set target.language c11 ")
self.expect(
"settings show target.language",
SETTING_MSG("target.language"),
startstr="target.language (language) = c11")
self.runCmd("settings clear target.language", check=False)
# arguments
self.runCmd("settings set target.run-args 1 2 3") # Set to known value
# Set to new value with trailing whitespaces
self.runCmd("settings set target.run-args 3 4 5 ")
self.expect(
"settings show target.run-args",
SETTING_MSG("target.run-args"),
substrs=[
'target.run-args (arguments) =',
'[0]: "3"',
'[1]: "4"',
'[2]: "5"'])
self.runCmd("settings set target.run-args 1 2 3") # Set to known value
# Set to new value with trailing whitespaces
self.runCmd("settings set target.run-args 3 \ \ ")
self.expect(
"settings show target.run-args",
SETTING_MSG("target.run-args"),
substrs=[
'target.run-args (arguments) =',
'[0]: "3"',
'[1]: " "',
'[2]: " "'])
self.runCmd("settings clear target.run-args", check=False)
# dictionaries
self.runCmd("settings clear target.env-vars") # Set to known value
# Set to new value with trailing whitespaces
self.runCmd("settings set target.env-vars A=B C=D\t ")
self.expect(
"settings show target.env-vars",
SETTING_MSG("target.env-vars"),
substrs=[
'target.env-vars (dictionary of strings) =',
'A=B',
'C=D'])
self.runCmd("settings clear target.env-vars", check=False)
# regex
# Set to known value
self.runCmd("settings clear target.process.thread.step-avoid-regexp")
# Set to new value with trailing whitespaces
self.runCmd(
"settings set target.process.thread.step-avoid-regexp foo\\ ")
self.expect(
"settings show target.process.thread.step-avoid-regexp",
SETTING_MSG("target.process.thread.step-avoid-regexp"),
substrs=['target.process.thread.step-avoid-regexp (regex) = foo\\ '])
self.runCmd(
"settings clear target.process.thread.step-avoid-regexp",
check=False)
# format-string
self.runCmd("settings clear disassembly-format") # Set to known value
# Set to new value with trailing whitespaces
self.runCmd("settings set disassembly-format foo ")
self.expect("settings show disassembly-format",
SETTING_MSG("disassembly-format"),
substrs=['disassembly-format (format-string) = "foo "'])
self.runCmd("settings clear disassembly-format", check=False)
def test_settings_list(self):
# List settings (and optionally test the filter to only show 'target' settings).
self.expect("settings list target", substrs=["language", "arg0", "detach-on-error"])
self.expect("settings list target", matching=False, substrs=["packet-timeout"])
self.expect("settings list", substrs=["language", "arg0", "detach-on-error", "packet-timeout"])
def test_settings_remove_single(self):
# Set some environment variables and use 'remove' to delete them.
self.runCmd("settings set target.env-vars a=b c=d")
self.expect("settings show target.env-vars", substrs=["a=b", "c=d"])
self.runCmd("settings remove target.env-vars a")
self.expect("settings show target.env-vars", matching=False, substrs=["a=b"])
self.expect("settings show target.env-vars", substrs=["c=d"])
self.runCmd("settings remove target.env-vars c")
self.expect("settings show target.env-vars", matching=False, substrs=["a=b", "c=d"])
def test_settings_remove_multiple(self):
self.runCmd("settings set target.env-vars a=b c=d e=f")
self.expect("settings show target.env-vars", substrs=["a=b", "c=d", "e=f"])
self.runCmd("settings remove target.env-vars a e")
self.expect("settings show target.env-vars", matching=False, substrs=["a=b", "e=f"])
self.expect("settings show target.env-vars", substrs=["c=d"])
def test_settings_remove_nonexistent_value(self):
self.expect("settings remove target.env-vars doesntexist", error=True,
substrs=["no value found named 'doesntexist'"])
def test_settings_remove_nonexistent_settings(self):
self.expect("settings remove doesntexist alsodoesntexist", error=True,
substrs=["error: invalid value path 'doesntexist'"])
def test_settings_remove_missing_arg(self):
self.expect("settings remove", error=True,
substrs=["'settings remove' takes an array or dictionary item, or"])
def test_settings_remove_empty_arg(self):
self.expect("settings remove ''", error=True,
substrs=["'settings remove' command requires a valid variable name"])
def test_all_settings_exist(self):
self.expect("settings show",
substrs=["auto-confirm",
"frame-format",
"notify-void",
"prompt",
"script-lang",
"stop-disassembly-count",
"stop-disassembly-display",
"stop-line-count-after",
"stop-line-count-before",
"stop-show-column",
"term-width",
"thread-format",
"use-external-editor",
"target.default-arch",
"target.move-to-nearest-code",
"target.expr-prefix",
"target.language",
"target.prefer-dynamic-value",
"target.enable-synthetic-value",
"target.skip-prologue",
"target.source-map",
"target.exec-search-paths",
"target.max-children-count",
"target.max-string-summary-length",
"target.breakpoints-use-platform-avoid-list",
"target.run-args",
"target.env-vars",
"target.inherit-env",
"target.input-path",
"target.output-path",
"target.error-path",
"target.disable-aslr",
"target.disable-stdio",
"target.x86-disassembly-flavor",
"target.use-hex-immediates",
"target.hex-immediate-style",
"target.process.disable-memory-cache",
"target.process.extra-startup-command",
"target.process.thread.step-avoid-regexp",
"target.process.thread.trace-thread"])
# settings under an ".experimental" domain should have two properties:
# 1. If the name does not exist with "experimental" in the name path,
# the name lookup should try to find it without "experimental". So
# a previously-experimental setting that has been promoted to a
# "real" setting will still be set by the original name.
# 2. Changing a setting with .experimental., name, where the setting
# does not exist either with ".experimental." or without, should
# not generate an error. So if an experimental setting is removed,
# people who may have that in their ~/.lldbinit files should not see
# any errors.
def test_experimental_settings(self):
cmdinterp = self.dbg.GetCommandInterpreter()
result = lldb.SBCommandReturnObject()
# Set target.arg0 to a known value, check that we can retrieve it via
# the actual name and via .experimental.
self.expect('settings set target.arg0 first-value')
self.expect('settings show target.arg0', substrs=['first-value'])
self.expect('settings show target.experimental.arg0', substrs=['first-value'], error=False)
# Set target.arg0 to a new value via a target.experimental.arg0 name,
# verify that we can read it back via both .experimental., and not.
self.expect('settings set target.experimental.arg0 second-value', error=False)
self.expect('settings show target.arg0', substrs=['second-value'])
self.expect('settings show target.experimental.arg0', substrs=['second-value'], error=False)
# showing & setting an undefined .experimental. setting should generate no errors.
self.expect('settings show target.experimental.setting-which-does-not-exist', patterns=['^\s$'], error=False)
self.expect('settings set target.experimental.setting-which-does-not-exist true', error=False)
# A domain component before .experimental. which does not exist should give an error
# But the code does not yet do that.
# self.expect('settings set target.setting-which-does-not-exist.experimental.arg0 true', error=True)
# finally, confirm that trying to set a setting that does not exist still fails.
# (SHOWING a setting that does not exist does not currently yield an error.)
self.expect('settings set target.setting-which-does-not-exist true', error=True)
| [
"[email protected]"
] | |
111534e2e8de66694688967bcfb3a213ec10094b | 12346be5075d772878a6015053d6eeb4e7227acc | /21. Design Patterns/behavioral/template.py | b5f260abd7cbf6bd3ca2bef6a2a0ad0bdfa3fa25 | [
"MIT"
] | permissive | elenaborisova/Python-OOP | 2a46bfafce868f03481fb699580fb3e60ca4e3bd | 584882c08f84045b12322917f0716c7c7bd9befc | refs/heads/main | 2023-04-02T17:41:23.440617 | 2021-04-10T13:56:38 | 2021-04-10T13:56:38 | 321,376,083 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 539 | py | from abc import ABC, abstractmethod
class Storage(ABC):
@abstractmethod
def get_storage_list(self):
pass
def save(self, data):
self.get_storage_list().append(data)
class SelfListStorage(Storage):
def __init__(self):
self.list = []
def get_storage_list(self):
return self.list
class ProviderListStorage(Storage):
def __init__(self, list_provider):
self.list_provider = list_provider
def get_storage_list(self):
return self.list_provider.provide_list() | [
"[email protected]"
] | |
10878b61c5e0ebf2a18f06f4fa888b9efee34475 | ee904d3335b8fdc5dbb6c260f87dd0e01b7bb605 | /personal/models.py | 2a3381d9a1a6aa4c51c481316fcf4cac75a423cd | [] | no_license | sudhanshu8917/Techy-Blogger | 32930136b479635ec5616e44cc48b7d02bce2795 | f7fd26cb223276bd9c35023c8166243ab430b6b4 | refs/heads/master | 2022-04-25T01:03:57.281784 | 2020-04-23T19:46:48 | 2020-04-23T19:46:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 518 | py | from django.db import models
# Create your models here.
# PRIORITY = [
# ("H","Low"),
# ("L","Medium"),
# ("H","High"),
# ]
# class Question(models.Model):
# tilte =models.CharField(max_length=60)
# question =models.TextField(max_length=400)
# priority =models.CharField(max_length=1,choices=PRIORITY)
# def __str__(self):
# return self.tilte
# class Meta:
# verbose_name = "The Question"
# verbose_name_plural = "Peoples Question" | [
"[email protected]"
] | |
3e216e3ee3078736267939ddfdd51b2ed51045cd | 09e5cfe06e437989a2ccf2aeecb9c73eb998a36c | /modules/cctbx_project/wxtbx/phil_controls/boolctrl.py | eb5df59dfbe39578d721ac70b72c30df2989b55a | [
"BSD-3-Clause",
"BSD-3-Clause-LBNL"
] | permissive | jorgediazjr/dials-dev20191018 | b81b19653624cee39207b7cefb8dfcb2e99b79eb | 77d66c719b5746f37af51ad593e2941ed6fbba17 | refs/heads/master | 2020-08-21T02:48:54.719532 | 2020-01-25T01:41:37 | 2020-01-25T01:41:37 | 216,089,955 | 0 | 1 | BSD-3-Clause | 2020-01-25T01:41:39 | 2019-10-18T19:03:17 | Python | UTF-8 | Python | false | false | 2,534 | py | from __future__ import absolute_import, division, print_function
from wxtbx import phil_controls
import wx
from libtbx import Auto
WXTBX_PHIL_BOOL_TRIBOOL = 1
WXTBX_PHIL_BOOL_AUTO = 2
class BoolCtrl(wx.CheckBox, phil_controls.PhilCtrl):
def __init__(self, *args, **kwds):
kwds = dict(kwds)
self._bool_style = kwds.get("style", 0)
kwds['style'] = 0
if ((self._bool_style & WXTBX_PHIL_BOOL_TRIBOOL) or
(self._bool_style & WXTBX_PHIL_BOOL_AUTO)):
kwds['style'] |= wx.CHK_ALLOW_3RD_STATE_FOR_USER|wx.CHK_3STATE
else :
kwds['style'] |= wx.CHK_3STATE # wx.CHK_ALLOW_3RD_STATE_FOR_USER?
wx.CheckBox.__init__(self, *args, **kwds)
self.Bind(wx.EVT_CHECKBOX, lambda evt: self.DoSendEvent())
def SetValue(self, value):
if (value is None) or (value is Auto):
assert (self.Is3State())
self.Set3StateValue(wx.CHK_UNDETERMINED)
else :
if (self.Is3State()):
if (value == True):
self.Set3StateValue(wx.CHK_CHECKED)
else :
self.Set3StateValue(wx.CHK_UNCHECKED)
else :
wx.CheckBox.SetValue(self, value)
def GetValue(self):
if (self.Is3State()):
value = self.Get3StateValue()
if (value == wx.CHK_UNDETERMINED):
if (self._bool_style & WXTBX_PHIL_BOOL_AUTO):
return Auto
else :
return None
else :
return (value == wx.CHK_CHECKED)
else :
return wx.CheckBox.GetValue(self)
def GetPhilValue(self):
return self.GetValue()
def GetStringValue(self):
return str(self.GetValue())
if (__name__ == "__main__"):
app = wx.App(0)
frame = wx.Frame(None, -1, "PHIL bool test")
panel = wx.Panel(frame, -1, size=(600,400))
box1 = BoolCtrl(panel, label="Use NCS restraints", pos=(100,100))
box2 = BoolCtrl(panel, label="Find NCS groups automatically", pos=(100,150))
box3 = BoolCtrl(panel, label="Fast search mode", pos=(100,200),
style=WXTBX_PHIL_BOOL_AUTO)
box1.SetValue(False)
box2.SetValue(None)
box3.SetValue(Auto)
assert (box1.GetValue() == box1.GetPhilValue() == False)
assert (box2.GetValue() is None)
assert (box3.GetValue() is Auto)
assert (box2.GetStringValue() == "None")
assert (box3.GetStringValue() == "Auto")
box3.SetValue(False)
assert (box3.GetStringValue() == "False")
box1.SetValue(True)
assert (box1.GetStringValue() == "True")
def OnChange(event):
print(event.GetEventObject().GetPhilValue())
frame.Bind(phil_controls.EVT_PHIL_CONTROL, OnChange)
frame.Show()
app.MainLoop()
| [
"[email protected]"
] | |
44b05c375e36adc5e091b2377a51205d852b42fc | 45de3aa97525713e3a452c18dcabe61ac9cf0877 | /src/secondaires/jeux/jeux/poquiir/combinaisons.py | 4bf4664b25400af967a27eaa73edd21d60a641ff | [
"BSD-3-Clause"
] | permissive | stormi/tsunami | 95a6da188eadea3620c70f7028f32806ee2ec0d1 | bdc853229834b52b2ee8ed54a3161a1a3133d926 | refs/heads/master | 2020-12-26T04:27:13.578652 | 2015-11-17T21:32:38 | 2015-11-17T21:32:38 | 25,606,146 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,931 | py | # -*-coding:Utf-8 -*
# Copyright (c) 2012 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier définissant les combinaisons possibles."""
from abstraits.obase import BaseObj
from corps.fonctions import lisser
class Combinaison(BaseObj):
"""Classe représentant une combinaison abstraite."""
points = None
nom = None
def __init__(self, combinaison, exterieures):
"""Constructeur de la combinaison.
Les cartes contenues dans la liste combinaison sont celles formant
la combinaison. Les autres sont contenues dans exterieures.
"""
BaseObj.__init__(self)
self.combinaison = combinaison
self.exterieures = exterieures
self._construire()
def __getnewargs__(self):
return (None, None)
@property
def nom(self):
"""Retourne le nom de la combinaison."""
return "rien"
@property
def points_complet(self):
"""Retourne les points de la combinaison spécifique."""
return (self.points, self.combinaison[-1].points)
@property
def points_exterieurs(self):
"""Retourne la somme des points des cartes non utilisées."""
return sum(piece.points for piece in self.exterieures)
@classmethod
def forme(cls, pieces):
"""Retourne une combinaison si les pièces forment une combinaison.
Les pièces doivent être transmises sous la forme d'une liste de listes.
Les pièces de même valeur doivent être regroupées dans une liste et
les pièces de plus grande valeur doivent apparaître en premier.
Exemple s'inspirant, au lieu de pièces, des cartes standards :
On a : 7 de coeur, as de coeur, 3 de carreau, 3 de trèffle...
On doit recevoir : [[as coeur], [7 coeur], [3 carreau, 3 trèffle]]
"""
return None
class Paire(Combinaison):
"""Combinaison représentant la paire."""
points = 1
@property
def nom(self):
nom_piece = self.combinaison[0].nom.rstrip("s")
return lisser("une paire de {}s".format(nom_piece))
@classmethod
def forme(cls, pieces):
for groupe in pieces:
if len(groupe) == 2:
autres = list(pieces)
autres.remove(groupe)
exterieures = []
for o_groupe in autres:
exterieures.extend(o_groupe)
paire = cls(groupe, exterieures)
return paire
return None
class DoublePaire(Combinaison):
"""Combinaison représentant la double paire."""
points = 2
@property
def nom(self):
nom_1 = self.combinaison[0].nom.rstrip("s")
nom_2 = self.combinaison[2].nom.rstrip("s")
return lisser("une double-paire de {}s et de {}s".format(nom_1, nom_2))
@classmethod
def forme(cls, pieces):
groupes = []
for groupe in pieces:
if len(groupe) == 2:
groupes.append(groupe)
if len(groupes) != 2:
continue
autres = list(pieces)
for o_groupe in groupes:
autres.remove(o_groupe)
exterieures = []
for o_groupe in autres:
exterieures.extend(o_groupe)
dpaire = cls(groupes[0] + groupes[1], exterieures)
return dpaire
return None
class Brelan(Combinaison):
"""Combinaison représentant le brelan."""
points = 3
@property
def nom(self):
nom_piece = self.combinaison[0].nom.rstrip("s")
return lisser("un brelan de {}s".format(nom_piece))
@classmethod
def forme(cls, pieces):
for groupe in pieces:
if len(groupe) == 3:
autres = list(pieces)
autres.remove(groupe)
exterieures = []
for o_groupe in autres:
exterieures.extend(o_groupe)
brelan = cls(groupe, exterieures)
return brelan
return None
class Suite(Combinaison):
"""Combinaison représentant la suite."""
points = 4
@property
def nom(self):
nom_piece = self.combinaison[0].nom_complet_defini
nom_piece = " ".join(nom_piece.split(" ")[:-2])
return lisser("une suite à {}".format(nom_piece))
@classmethod
def forme(cls, pieces):
a_pieces = []
for groupe in pieces:
a_pieces.extend(groupe)
a_pieces = sorted(a_pieces, key=lambda piece: piece.points,
reverse=True)
for i, piece in enumerate(a_pieces):
t_pieces = [piece]
nb = 1
for a_piece in a_pieces[i + 1:]:
if piece.points - a_piece.points == nb:
t_pieces.append(a_piece)
nb += 1
if len(t_pieces) == 5:
exterieures = list(a_pieces)
for t_piece in t_pieces:
if t_piece in exterieures:
exterieures.remove(t_piece)
suite = cls(t_pieces, exterieures)
return suite
return None
class Couleur(Combinaison):
"""Combinaison représentant la couleur."""
points = 5
@property
def nom(self):
nom_piece = self.combinaison[0].nom_complet_defini
nom_piece = " ".join(nom_piece.split(" ")[:-2])
return lisser("une couleur à {}".format(nom_piece))
@classmethod
def forme(cls, pieces):
a_pieces = []
for groupe in pieces:
a_pieces.extend(groupe)
a_pieces = sorted(a_pieces, key=lambda piece: piece.points,
reverse=True)
couleurs = {}
for piece in a_pieces:
liste = couleurs.get(piece._couleur, [])
liste.append(piece)
couleurs[piece._couleur] = liste
for groupe in couleurs.values():
if len(groupe) >= 5:
exterieures = list(a_pieces)
for piece in groupe:
if piece in exterieures:
exterieures.remove(piece)
couleur = cls(groupe, exterieures)
return couleur
return None
class Carre(Combinaison):
"""Combinaison représentant le carré."""
points = 6
@property
def nom(self):
nom_piece = self.combinaison[0].nom.strip("s")
return lisser("un carré de {}s".format(nom_piece))
@classmethod
def forme(cls, pieces):
print("carré", pieces)
for groupe in pieces:
if len(groupe) == 4:
autres = list(pieces)
autres.remove(groupe)
exterieures = []
for o_groupe in autres:
exterieures.extend(o_groupe)
carre = cls(groupe, exterieures)
return carre
return None
combinaisons = [Carre, Couleur, Suite, Brelan, DoublePaire, Paire] | [
"[email protected]"
] | |
a50506c6f1e9b437891467aeec49f7ce0e5d0e3c | 5e9b2d0d2a4399fd028c738a082921a1df1f8321 | /hacker rank/30 Days Of Code/Day 04 - Class vs. Instance.py | 2f6aaced92bd0e97a00c76adb993e0adff08a560 | [] | no_license | sunilsm7/python_exercises | 42e5a1aee0a0d5402b585e1b1631517145aa1e00 | b2754c51464dcd81319c8514c24249a13e18d825 | refs/heads/master | 2020-12-02T16:19:20.266436 | 2017-10-27T10:22:37 | 2017-10-27T10:22:37 | 96,534,650 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 876 | py | """
In this challenge, we're going to learn about the difference between a class and an instance;
because this is an Object Oriented concept, it's only enabled in certain languages.
Task
Write a Person class with an instance variable, age, and a constructor that takes an integer, initial_age, as a parameter.
The constructor must assign initial_age to _age after confirming the argument passed as _initial_age is not negative.
If a negative argument is passed as initial_age, the constructor should set to and print "Age is not valid, setting age to 0."
In addition, you must write the following instance methods:
age_1_year() should increase the instance variable _age by 1.
is_old() should perform the following conditional actions:
If age < 13, print "You are young.".
If age >= 13 and age < 18, print "You are a teenager.".
Otherwise, print "You are old.".
"""
| [
"[email protected]"
] | |
a92d6441a0fd2f223bc21e0d866ebddf7a054b36 | 17821ba5f1345bcb5181092cec7808e08355abd0 | /Django_projects/P2-video fail/my_proj/src/my_proj/migrations/0005_video_name.py | e9e93aa7fe462157356740f1f0b77bf76382fe25 | [
"MIT"
] | permissive | Coni63/scripts_Python | be1a416dc702c919120645f2946596c68a6a3fbb | b1ac0bee706504abcc86fd7a72b8ec625ffa12b3 | refs/heads/master | 2021-07-11T16:50:56.719758 | 2018-02-25T12:19:29 | 2018-02-25T12:19:29 | 95,472,736 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 462 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-05-17 09:44
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('my_proj', '0004_remove_video_name'),
]
operations = [
migrations.AddField(
model_name='video',
name='name',
field=models.CharField(default='test', max_length=100),
),
]
| [
"[email protected]"
] | |
1596db543519340af331ebc5b52159918fd4ee73 | 8848bd7a4ca88e0061ce1c7dfbf45c488968ea52 | /ravens/tasks/insertion_goal.py | fc0615b9afcb813910b6af74eada86ff53f36564 | [
"Apache-2.0"
] | permissive | gautams3/deformable-ravens | 5f390d6bf5af26fa9c746232a8d90403a89fd7ce | 1324243b804532d229d91f2af13ee84c6fd4771c | refs/heads/master | 2023-08-15T01:46:47.025808 | 2021-10-13T16:10:32 | 2021-10-13T16:10:32 | 416,812,524 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,501 | py | #!/usr/bin/env python
import numpy as np
import pybullet as p
from ravens.tasks import Task
from ravens import utils as U
class InsertionGoal(Task):
"""Using insertion, but in a goal-based Transporters context."""
def __init__(self):
super().__init__()
self.ee = 'suction'
self.max_steps = 3
self.metric = 'pose'
self.primitive = 'pick_place'
def reset(self, env, last_info=None):
self.num_steps = 1
self.goal = {'places': {}, 'steps': []}
# Add L-shaped block.
block_size = (0.1, 0.1, 0.04)
block_urdf = 'assets/insertion/ell.urdf'
block_pose = self.random_pose(env, block_size)
block_id = env.add_object(block_urdf, block_pose)
self.goal['steps'].append({block_id: (2 * np.pi, [0])})
# Add L-shaped target pose, but without actually adding it.
if self.goal_cond_testing:
assert last_info is not None
self.goal['places'][0] = self._get_goal_info(last_info)
#print('\nin insertion reset, goal: {}'.format(self.goal['places'][0]))
else:
hole_pose = self.random_pose(env, block_size)
self.goal['places'][0] = hole_pose
#print('\nin insertion reset, goal: {}'.format(hole_pose))
def _get_goal_info(self, last_info):
"""Used to determine the goal given the last `info` dict."""
position, rotation, _ = last_info[4] # block ID=4
return (position, rotation)
| [
"[email protected]"
] | |
26ff43671dca13288c13c63813a52087fc0064b9 | 136a174f8de72746004aaf28a7ec959fddbd689b | /test_scripts/xx4.py | aca9862f32eab929bf99b84ef4e4d77742fecf20 | [] | no_license | xod442/imcServiceNow2 | ff3c74ffd633f67ef984c5ab9a65da0010e3bc9d | 0dd86659816bae19e5d43bcb8c894005564597cb | refs/heads/master | 2021-01-18T18:33:50.203685 | 2018-01-09T21:25:22 | 2018-01-09T21:25:22 | 86,862,461 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,927 | py | import time
from flask import Flask, request, render_template, redirect, url_for, flash, session, send_file
from flask.ext.bootstrap import Bootstrap
from flask_sqlalchemy import SQLAlchemy
from models import db, Imc_alarm_ids
from settings import APP_STATIC
import os
from flask import Flask, request, redirect, url_for
from werkzeug.utils import secure_filename
from snow_py import *
import requests
from pyhpeimc.auth import *
from pyhpeimc.plat.alarms import *
from snowbridge import *
db.create_all()
# Locked down upload folder never hurts...
UPLOAD_FOLDER = APP_STATIC
ALLOWED_EXTENSIONS = set(['csv'])
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS
bootstrap = Bootstrap(app)
imc_user = "admin"
imc_passwd = "admin"
imc_host = "10.132.0.15"
snow_user = "admin"
snow_passwd = "Grape123!"
instance = "dev30543"
snow_url = 'https://dev30543.service-now.com/api/now/table/incident'
varz = []
data = {}
dump = []
alarm = {}
imc_test_url = 'http://'+imc_host+':8080'
# Configuring a connection to the VSD API
#
# Write logfile to local database
#
# Routes
alarm['severity'] = "1"
alarm['userAckUserName'] ='admin'
alarm['deviceDisplay'] = '10.10.10.10'
alarm['faultDesc'] = "Its down"
alarm['userAckType'] = "0"
alarm['id'] = "210"
alarm['faultTime'] = "1490648244"
snow_return = "401"
print alarm['id']
print snow_return
print alarm['faultDesc']
print alarm['deviceDisplay']
print alarm['severity']
print alarm['faultTime']
print alarm['userAckUserName']
print alarm['userAckType']
write_local_db(alarm, snow_return)
'''
logfile = Imc_alarm_ids(alarm['id'],snow_return,alarm['faultDesc'],alarm['deviceDisplay'],
alarm['severity'],alarm['faultTime'],alarm['userAckUserName'], alarm['userAckType'])
print logfile
db.session.add(logfile)
db.session.commit()
'''
print "Peace!"
| [
"[email protected]"
] | |
8af17fb2e3ab102cd0d02489f823a5800a3dac93 | 55c250525bd7198ac905b1f2f86d16a44f73e03a | /Python/Lazymux/websploit/core/help.py | 8b341ed650071223766e292675f6d56d447f26f8 | [] | no_license | NateWeiler/Resources | 213d18ba86f7cc9d845741b8571b9e2c2c6be916 | bd4a8a82a3e83a381c97d19e5df42cbababfc66c | refs/heads/master | 2023-09-03T17:50:31.937137 | 2023-08-28T23:50:57 | 2023-08-28T23:50:57 | 267,368,545 | 2 | 1 | null | 2022-09-08T15:20:18 | 2020-05-27T16:18:17 | null | UTF-8 | Python | false | false | 129 | py | version https://git-lfs.github.com/spec/v1
oid sha256:ad1e9de5e3288e6454391464b0dbe8b0b42084b82cfbc0f4789743568bbccdf1
size 1001
| [
"[email protected]"
] | |
b4f7cdeec17ecd205cbc93e2f5b6bc0444aacb08 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2864/60591/310678.py | b3ddf43088c2b9c36f5b42543811c99ee8acaf76 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 541 | py | n = eval(input())
nums = list(map(int, input().split(" ")))
if (len(nums) == 1):
print(nums[0])
elif(len(nums) == 2):
if(abs(nums[1] - nums[0]) == 1):
print(max(nums[1],nums[0]))
else:
print(nums[1] + nums[0])
else:
result = 0
temp = []
for x in range(max(nums) + 1):
temp.append(0)
for num in nums:
temp[num] += num
cost = [temp[0], max(temp[0], temp[1])]
for x in range(2,max(nums) + 1):
cost.append(max(cost[x - 1],cost[x - 2] + temp[x]))
print(cost[-1])
| [
"[email protected]"
] | |
d8a5c1939b6c95386264908ad58cff196f78ef17 | 5963c12367490ffc01c9905c028d1d5480078dec | /homeassistant/components/numato/switch.py | 505d28d0c4036acb28906ca213dd68232a6cd195 | [
"Apache-2.0"
] | permissive | BenWoodford/home-assistant | eb03f73165d11935e8d6a9756272014267d7d66a | 2fee32fce03bc49e86cf2e7b741a15621a97cce5 | refs/heads/dev | 2023-03-05T06:13:30.354545 | 2021-07-18T09:51:53 | 2021-07-18T09:51:53 | 117,122,037 | 11 | 6 | Apache-2.0 | 2023-02-22T06:16:51 | 2018-01-11T16:10:19 | Python | UTF-8 | Python | false | false | 3,419 | py | """Switch platform integration for Numato USB GPIO expanders."""
import logging
from numato_gpio import NumatoGpioError
from homeassistant.const import (
CONF_DEVICES,
CONF_ID,
CONF_SWITCHES,
DEVICE_DEFAULT_NAME,
)
from homeassistant.helpers.entity import ToggleEntity
from . import CONF_INVERT_LOGIC, CONF_PORTS, DATA_API, DOMAIN
_LOGGER = logging.getLogger(__name__)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the configured Numato USB GPIO switch ports."""
if discovery_info is None:
return
api = hass.data[DOMAIN][DATA_API]
switches = []
devices = hass.data[DOMAIN][CONF_DEVICES]
for device in [d for d in devices if CONF_SWITCHES in d]:
device_id = device[CONF_ID]
platform = device[CONF_SWITCHES]
invert_logic = platform[CONF_INVERT_LOGIC]
ports = platform[CONF_PORTS]
for port, port_name in ports.items():
try:
api.setup_output(device_id, port)
api.write_output(device_id, port, 1 if invert_logic else 0)
except NumatoGpioError as err:
_LOGGER.error(
"Failed to initialize switch '%s' on Numato device %s port %s: %s",
port_name,
device_id,
port,
err,
)
continue
switches.append(
NumatoGpioSwitch(
port_name,
device_id,
port,
invert_logic,
api,
)
)
add_entities(switches, True)
class NumatoGpioSwitch(ToggleEntity):
"""Representation of a Numato USB GPIO switch port."""
def __init__(self, name, device_id, port, invert_logic, api):
"""Initialize the port."""
self._name = name or DEVICE_DEFAULT_NAME
self._device_id = device_id
self._port = port
self._invert_logic = invert_logic
self._state = False
self._api = api
@property
def name(self):
"""Return the name of the switch."""
return self._name
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def is_on(self):
"""Return true if port is turned on."""
return self._state
def turn_on(self, **kwargs):
"""Turn the port on."""
try:
self._api.write_output(
self._device_id, self._port, 0 if self._invert_logic else 1
)
self._state = True
self.schedule_update_ha_state()
except NumatoGpioError as err:
_LOGGER.error(
"Failed to turn on Numato device %s port %s: %s",
self._device_id,
self._port,
err,
)
def turn_off(self, **kwargs):
"""Turn the port off."""
try:
self._api.write_output(
self._device_id, self._port, 1 if self._invert_logic else 0
)
self._state = False
self.schedule_update_ha_state()
except NumatoGpioError as err:
_LOGGER.error(
"Failed to turn off Numato device %s port %s: %s",
self._device_id,
self._port,
err,
)
| [
"[email protected]"
] | |
380d34e9731daa55c6f70d3e860fe21844cf1912 | 32c56293475f49c6dd1b0f1334756b5ad8763da9 | /google-cloud-sdk/lib/googlecloudsdk/core/util/semver.py | 8fa1edb39fbcb2ec9cf241dd02e65fa0e28e8c38 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"MIT"
] | permissive | bopopescu/socialliteapp | b9041f17f8724ee86f2ecc6e2e45b8ff6a44b494 | 85bb264e273568b5a0408f733b403c56373e2508 | refs/heads/master | 2022-11-20T03:01:47.654498 | 2020-02-01T20:29:43 | 2020-02-01T20:29:43 | 282,403,750 | 0 | 0 | MIT | 2020-07-25T08:31:59 | 2020-07-25T08:31:59 | null | UTF-8 | Python | false | false | 6,611 | py | # -*- coding: utf-8 -*- #
# Copyright 2015 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper functions for comparing semantic versions.
Basic rules of semver:
Format: major.minor.patch-prerelease+build
major, minor, patch, must all be present and integers with no leading zeros.
They are compared numerically by segment.
prerelease is an optional '.' separated series of identifiers where each is
either an integer with no leading zeros, or an alphanumeric string
(including '-'). Prereleases are compared by comparing each identifier in
order. Integers are compared numerically, alphanumeric strings are compared
lexigraphically. A prerelease version is lower precedence than it's associated
normal version.
The build number is optional and not included in the comparison. It is '.'
separated series of alphanumeric identifiers.
Two SemVer objects are considered equal if they represent the exact same string
(including the build number and including case differences). For comparison
operators, we follow the SemVer spec of precedence and ignore the build number
and case of alphanumeric strings.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import re
from six.moves import zip_longest
# Only digits, with no leading zeros.
_DIGITS = r'(?:0|[1-9][0-9]*)'
# Digits, letters and dashes
_ALPHA_NUM = r'[-0-9A-Za-z]+'
# This is an alphanumeric string that must have at least once letter (or else it
# would be considered digits).
_STRICT_ALPHA_NUM = r'[-0-9A-Za-z]*[-A-Za-z]+[-0-9A-Za-z]*'
_PRE_RELEASE_IDENTIFIER = r'(?:{0}|{1})'.format(_DIGITS, _STRICT_ALPHA_NUM)
_PRE_RELEASE = r'(?:{0}(?:\.{0})*)'.format(_PRE_RELEASE_IDENTIFIER)
_BUILD = r'(?:{0}(?:\.{0})*)'.format(_ALPHA_NUM)
_SEMVER = (
r'^(?P<major>{digits})\.(?P<minor>{digits})\.(?P<patch>{digits})'
r'(?:\-(?P<prerelease>{release}))?(?:\+(?P<build>{build}))?$'
).format(digits=_DIGITS, release=_PRE_RELEASE, build=_BUILD)
class ParseError(Exception):
"""An exception for when a string failed to parse as a valid semver."""
pass
class SemVer(object):
"""Object to hold a parsed semantic version string."""
def __init__(self, version):
"""Creates a SemVer object from the given version string.
Args:
version: str, The version string to parse.
Raises:
ParseError: If the version could not be correctly parsed.
Returns:
SemVer, The parsed version.
"""
(self.major, self.minor, self.patch, self.prerelease, self.build) = (
SemVer._FromString(version))
@classmethod
def _FromString(cls, version):
"""Parse the given version string into its parts."""
if version is None:
raise ParseError('The value is not a valid SemVer string: [None]')
try:
match = re.match(_SEMVER, version)
except (TypeError, re.error) as e:
raise ParseError('Error parsing version string: [{0}]. {1}'
.format(version, e))
if not match:
raise ParseError(
'The value is not a valid SemVer string: [{0}]'.format(version))
parts = match.groupdict()
return (
int(parts['major']), int(parts['minor']), int(parts['patch']),
parts['prerelease'], parts['build'])
@classmethod
def _CmpHelper(cls, x, y):
"""Just a helper equivalent to the cmp() function in Python 2."""
return (x > y) - (x < y)
@classmethod
def _ComparePrereleaseStrings(cls, s1, s2):
"""Compares the two given prerelease strings.
Args:
s1: str, The first prerelease string.
s2: str, The second prerelease string.
Returns:
1 if s1 is greater than s2, -1 if s2 is greater than s1, and 0 if equal.
"""
s1 = s1.split('.') if s1 else []
s2 = s2.split('.') if s2 else []
for (this, other) in zip_longest(s1, s2):
# They can't both be None because empty parts of the string split will
# come through as the empty string. None indicates it ran out of parts.
if this is None:
return 1
elif other is None:
return -1
# Both parts have a value
if this == other:
# This part is the same, move on to the next.
continue
if this.isdigit() and other.isdigit():
# Numerical comparison if they are both numbers.
return SemVer._CmpHelper(int(this), int(other))
# Lexical comparison if either is a string. Numbers will always sort
# before strings.
return SemVer._CmpHelper(this.lower(), other.lower())
return 0
def _Compare(self, other):
"""Compare this SemVer to other.
Args:
other: SemVer, the other version to compare this one to.
Returns:
1 if self > other, -1 if other > self, 0 if equal.
"""
# Compare the required parts.
result = SemVer._CmpHelper(
(self.major, self.minor, self.patch),
(other.major, other.minor, other.patch))
# Only if required parts are equal, compare the prerelease strings.
# Never include build number in comparison.
result = result or SemVer._ComparePrereleaseStrings(
self.prerelease, other.prerelease)
return result
def Distance(self, other):
"""Compare this SemVer to other and returns the distances.
Args:
other: SemVer, the other version to compare this one to.
Returns:
Distances between the major, minor and patch versions.
"""
major_diff = self.major - other.major
minor_diff = self.minor - other.minor
patch_diff = self.patch - other.patch
return major_diff, minor_diff, patch_diff
def __eq__(self, other):
return other and (
(self.major, self.minor, self.patch, self.prerelease, self.build) ==
(other.major, other.minor, other.patch, other.prerelease, other.build))
def __ne__(self, other):
return not self == other
def __gt__(self, other):
return self._Compare(other) > 0
def __lt__(self, other):
return self._Compare(other) < 0
def __ge__(self, other):
return not self < other
def __le__(self, other):
return not self > other
| [
"[email protected]"
] | |
0f03a2ac1686b80d47dda000ad3fd21ef99f7f7a | 1ef68ba8f4754bf4d4d86d945bb1392be3ff5beb | /mlagents/envs/communicator_objects/custom_action_pb2.py | 1c16809b0b0d05a34b2fe0fb5193e54c3337f10e | [
"MIT"
] | permissive | Abluceli/HRG-SAC | fc1b5fb720f391390b0ac86c23c46187178a3691 | 334df1e8afbfff3544413ade46fb12f03556014b | refs/heads/master | 2022-12-29T22:51:35.584254 | 2020-02-19T13:39:23 | 2020-02-19T13:39:23 | 241,630,517 | 7 | 1 | MIT | 2022-12-08T06:18:57 | 2020-02-19T13:36:58 | Python | UTF-8 | Python | false | true | 1,939 | py | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: mlagents/envs/communicator_objects/custom_action.proto
import sys
_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1"))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name="mlagents/envs/communicator_objects/custom_action.proto",
package="communicator_objects",
syntax="proto3",
serialized_options=_b("\252\002\034MLAgents.CommunicatorObjects"),
serialized_pb=_b(
'\n6mlagents/envs/communicator_objects/custom_action.proto\x12\x14\x63ommunicator_objects"\x0e\n\x0c\x43ustomActionB\x1f\xaa\x02\x1cMLAgents.CommunicatorObjectsb\x06proto3'
),
)
_CUSTOMACTION = _descriptor.Descriptor(
name="CustomAction",
full_name="communicator_objects.CustomAction",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=80,
serialized_end=94,
)
DESCRIPTOR.message_types_by_name["CustomAction"] = _CUSTOMACTION
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
CustomAction = _reflection.GeneratedProtocolMessageType(
"CustomAction",
(_message.Message,),
dict(
DESCRIPTOR=_CUSTOMACTION,
__module__="mlagents.envs.communicator_objects.custom_action_pb2"
# @@protoc_insertion_point(class_scope:communicator_objects.CustomAction)
),
)
_sym_db.RegisterMessage(CustomAction)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| [
"[email protected]"
] | |
8de8ea6b1c58ca9b9e86f7823a753e50cc5c3b33 | 553e6acd1019bb2c7d6a1b08009ca50ef2fa0ad1 | /mammoth/optim.py | b7f17e1cecf96cd1842270f07004067b512eda4a | [] | no_license | bkj/mammoth | ac0cfd6f8c5165ce72a5a7e591a938cf823270d3 | 0bd0122b5bac5ce897436a2318cb47b2fbc84164 | refs/heads/master | 2021-05-15T00:23:48.290164 | 2018-07-26T16:15:23 | 2018-07-26T16:15:23 | 103,467,821 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,805 | py | #!/usr/bin/env python
"""
optim.py
"""
import math
import torch
import numpy as np
class LambdaAdam(torch.optim.Optimizer):
"""
ADAM optimizer that mimics hypergrads
- Difference is addition of `lam` parameter. I noticed that my hypergrad test was converging
to eps < 1e-10. Setting lam to some small number (1e-1, 1e-2, etc) lets the torch version
convert to eps < 1e-8.
!! This is not efficient, due to cloning, etc. Will need to reimplement more efficiently
for larger models. Then again, for larger models, this may not matter.
"""
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=10**-4, lam=1):
defaults = dict(lr=lr, betas=betas, eps=eps, lam=lam)
super().__init__(params, defaults)
def step(self, closure=None):
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
state = self.state[p]
if len(state) == 0:
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p.data)
state['exp_avg_sq'] = torch.zeros_like(p.data)
m, v = state['exp_avg'].clone(), state['exp_avg_sq'].clone()
beta1, beta2 = group['betas']
state['step'] += 1
# --
b1t = beta1 * (group['lam'] ** (state['step'] - 1))
m = (m * b1t) + ((1 - b1t) * grad)
v = (1 - beta2) * (grad ** 2) + beta2 * v
mhat = m / (1 - beta1 ** state['step'])
vhat = v / (1 - beta2 ** state['step'])
p.data -= group['lr'] * mhat / (torch.sqrt(vhat) + group['eps'])
# --
# default torch implementation
# m = (m * beta1) + ((1 - beta1) * grad)
# v = (1 - beta2) * (grad ** 2) + beta2 * v
# denom = torch.sqrt(v) + group['eps']
# bias_correction1 = 1 - beta1 ** state['step']
# bias_correction2 = 1 - beta2 ** state['step']
# step_size = group['lr'] * math.sqrt(bias_correction2) / bias_correction1
# p.data.addcdiv_(-step_size, m, denom)
# --
state['exp_avg'] = m.clone()
state['exp_avg_sq'] = v.clone()
return loss | [
"[email protected]"
] | |
8cf1a9534a126b14369a0c65201592f19a07b52f | 7a1a65b0cda41ea204fad4848934db143ebf199a | /automatedprocesses_firststage/adsym_InventorySources_v2_DD_testapi.py | 3f6dcf9326e7f74dfb04f362aaeebd1489663c43 | [] | no_license | bpopovich44/ReaperSec | 4b015e448ed5ce23316bd9b9e33966373daea9c0 | 22acba4d84313e62dbbf95cf2a5465283a6491b0 | refs/heads/master | 2021-05-02T18:26:11.875122 | 2019-06-22T15:02:09 | 2019-06-22T15:02:09 | 120,664,056 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,719 | py | #!/usr/bin/python2.7
import json
from mysql.connector import MySQLConnection, Error
from python_dbconfig import read_db_config
import aol_api
def connect():
# """Gets AOL Data and writes them to a MySQL table"""
db = "mysql_dl"
# Connect To DB:
db_config = read_db_config(db)
try:
print('Connecting to database...')
conn = MySQLConnection(**db_config)
if conn.is_connected():
print('Connection established.')
cursor = conn.cursor()
# calls get_access_token function and starts script
logintoken = aol_api.get_access_token("25e5de37-aa8d-4d93-b407-29bc42b86044", "stEVHyPObmxCTeI6mTMKuA")
print(logintoken)
result = aol_api.run_existing_report(logintoken, "190595")
#print(result)
info = json.loads(result)
#print(info)
for x in json.loads(result)['data']:
rownum = ''
date = x['row'][0]
inventory_source = x['row'][1].replace("'", " -").replace('"', "")
geo_country = x['row'][2].replace("'", "")
media = x['row'][3].replace('"', "").replace("'", "")
ad_opportunities = x['row'][4]
ad_attempts = x['row'][5]
ad_impressions = x['row'][6]
ad_revenue = x['row'][7]
ecpm = x['row'][8]
ad_errors = x['row'][9]
iab_viewability_measurable_ad_impressions = x['row'][10]
iab_viewable_ad_impressions = x['row'][11]
market_ops = x['row'][12]
clicks = x['row'][13].replace(" ", "0")
list = (rownum, date, inventory_source, geo_country, media, ad_opportunities, ad_attempts, ad_impressions, \
ad_revenue, ecpm, ad_errors, iab_viewability_measurable_ad_impressions, iab_viewable_ad_impressions, market_ops, clicks)
#print(list)
sql = """INSERT INTO adsym_InventorySources_v2 VALUES ("%s", "%s", "%s", "%s", "%s", "%s", "%s", "%s", "%s", "%s", "%s", \
"%s", "%s", "%s", "%s")""" % (rownum, date, inventory_source, geo_country, media, ad_opportunities, ad_attempts, ad_impressions, \
ad_revenue, ecpm, ad_errors, iab_viewability_measurable_ad_impressions, iab_viewable_ad_impressions, market_ops, clicks)
cursor.execute(sql)
cursor.execute('commit')
else:
print('Connection failed.')
except Error as error:
print(error)
finally:
conn.close()
print('Connection closed.')
if __name__ == '__main__':
connect()
| [
"[email protected]"
] | |
97e4669eaaef04e481d3c1a28889378009c43f5e | c97ae1cc922a037484c5d4794d0a657561cf47f3 | /config.py | 53c8a1a210fb59cef99d47b41017842907143b96 | [] | no_license | AlenAlic/clubpromoters | 3059078b02b77745e7a1e49d998f9d24554082e8 | f44b3b20c20d5669c1658036cea35fb9a4f223fc | refs/heads/master | 2022-12-11T14:38:37.824769 | 2019-09-08T19:02:49 | 2019-09-08T19:02:49 | 190,430,315 | 0 | 0 | null | 2022-12-09T22:02:49 | 2019-06-05T16:29:25 | JavaScript | UTF-8 | Python | false | false | 664 | py | import os
basedir = os.path.abspath(os.path.dirname(__file__))
ENV = 'development'
SECRET_KEY = 'test-key'
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir, 'app.db?check_same_thread=False')
SQLALCHEMY_ECHO = False
SQLALCHEMY_TRACK_MODIFICATIONS = False
SQLALCHEMY_RECORD_QUERIES = False
# MAIL SERVERS
# python -m smtpd -n -c DebuggingServer localhost:8025
# python -u -m smtpd -n -c DebuggingServer localhost:8025 > mail.log
MAIL_SERVER = 'localhost'
MAIL_PORT = 8025
MAIL_USE_TLS = ''
MAIL_USERNAME = ''
MAIL_PASSWORD = ''
ADMINS = ['[email protected]']
DEBUG_TB_INTERCEPT_REDIRECTS = False
# requirements
# pip freeze > requirements.txt
| [
"[email protected]"
] | |
2cbf983911e50399c3a76fb804444089fce74a61 | c071eb46184635818e8349ce9c2a78d6c6e460fc | /system/python_stubs/-745935208/_ast/__init__/Global.py | e9f88c95a0171f4bb4c222f70243e1403b25fc9c | [] | no_license | sidbmw/PyCharm-Settings | a71bc594c83829a1522e215155686381b8ac5c6e | 083f9fe945ee5358346e5d86b17130d521d1b954 | refs/heads/master | 2020-04-05T14:24:03.216082 | 2018-12-28T02:29:29 | 2018-12-28T02:29:29 | 156,927,399 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 370 | py | # encoding: utf-8
# module _ast
# from C:\Users\siddh\AppData\Local\Programs\Python\Python37\lib\site-packages\pandas\util\_move.cp37-win_amd64.pyd
# by generator 1.146
# no doc
# no imports
from .stmt import stmt
class Global(stmt):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = (
'names',
)
| [
"[email protected]"
] | |
af7582913055c33dfb0d2fb42261bb2d00085cbd | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/55/usersdata/120/22966/submittedfiles/av2_p3_civil.py | ec623fcaf0928a3823b671b83b40cb785119d3ff | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 686 | py | # -*- coding: utf-8 -*-
from __future__ import division
import numpy as np
#definir somalinha
def somalinha(a,linha):
soma=0
for j in range(0,a.shape[1],1):
soma=soma+a[linha,j]
return somalinha
#definir somacoluna
def somacoluna(a,coluna):
soma=0
for i in range(0,a.shape[0],1):
soma=soma+a[i,coluna]
return soma
# definir peso
def peso(a,linha,coluna):
peso=somalinha(a,linha)+somacoluna(a,coluna)-(2*a[linha,coluna])
return peso
n=input('digite n:')
x=input('digite x:')
y=input('digite y:')
a=np.zeros((n,n))
for i in range(0,a.shape[0],1):
for j in range(0,a.shape[1],1):
print ('%d'%peso(a,x,y))
| [
"[email protected]"
] | |
1f8b9d0f62221cd8a4ea43b57dfb8951433fe248 | 5c81a33883e052070c557c76b5968aa501d5526e | /products/migrations/0005_attribute_attributeitem.py | 24c44104e06e383020c74f454c2c93a8a182519b | [] | no_license | worlddeleteRin/rabbit_vkusno | 2ebacdf72d87700d191965481c56e78bfec33e9b | 017cdff4b40fa7e9a0f7729e4f7b754f48e93c3a | refs/heads/master | 2023-04-03T23:32:42.770973 | 2021-04-08T06:43:04 | 2021-04-08T06:43:04 | 355,661,649 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,110 | py | # Generated by Django 3.0.8 on 2020-10-11 12:39
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('products', '0004_auto_20201010_1810'),
]
operations = [
migrations.CreateModel(
name='Attribute',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(default='', max_length=300)),
('category', models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, to='products.Category')),
],
),
migrations.CreateModel(
name='Attributeitem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(default='', max_length=300)),
('attr', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='products.Attribute')),
],
),
]
| [
"[email protected]"
] | |
0f2011cb5b2aadf3215ef9f7b51b9c97d83a2488 | 43ab33b2f50e47f5dbe322daa03c86a99e5ee77c | /rcc/models/study_site_view_rpc.py | a9556784eb2812cdd56b265c2e06679096439101 | [] | no_license | Sage-Bionetworks/rcc-client | c770432de2d2950e00f7c7bd2bac22f3a81c2061 | 57c4a621aecd3a2f3f9faaa94f53b2727992a01a | refs/heads/main | 2023-02-23T05:55:39.279352 | 2021-01-21T02:06:08 | 2021-01-21T02:06:08 | 331,486,099 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,149 | py | # coding: utf-8
"""
nPhase REST Resource
REDCap REST API v.2 # noqa: E501
The version of the OpenAPI document: 2.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from rcc.configuration import Configuration
class StudySiteViewRpc(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'id': 'int',
'study_id': 'int',
'name': 'str',
'site_id': 'int',
'site_type': 'str',
'principal_investigator': 'str',
'facility_name': 'str',
'enabled': 'bool'
}
attribute_map = {
'id': 'id',
'study_id': 'studyId',
'name': 'name',
'site_id': 'siteId',
'site_type': 'siteType',
'principal_investigator': 'principalInvestigator',
'facility_name': 'facilityName',
'enabled': 'enabled'
}
def __init__(self, id=None, study_id=None, name=None, site_id=None, site_type=None, principal_investigator=None, facility_name=None, enabled=None, local_vars_configuration=None): # noqa: E501
"""StudySiteViewRpc - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._id = None
self._study_id = None
self._name = None
self._site_id = None
self._site_type = None
self._principal_investigator = None
self._facility_name = None
self._enabled = None
self.discriminator = None
if id is not None:
self.id = id
if study_id is not None:
self.study_id = study_id
if name is not None:
self.name = name
if site_id is not None:
self.site_id = site_id
if site_type is not None:
self.site_type = site_type
if principal_investigator is not None:
self.principal_investigator = principal_investigator
if facility_name is not None:
self.facility_name = facility_name
if enabled is not None:
self.enabled = enabled
@property
def id(self):
"""Gets the id of this StudySiteViewRpc. # noqa: E501
:return: The id of this StudySiteViewRpc. # noqa: E501
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this StudySiteViewRpc.
:param id: The id of this StudySiteViewRpc. # noqa: E501
:type: int
"""
self._id = id
@property
def study_id(self):
"""Gets the study_id of this StudySiteViewRpc. # noqa: E501
:return: The study_id of this StudySiteViewRpc. # noqa: E501
:rtype: int
"""
return self._study_id
@study_id.setter
def study_id(self, study_id):
"""Sets the study_id of this StudySiteViewRpc.
:param study_id: The study_id of this StudySiteViewRpc. # noqa: E501
:type: int
"""
self._study_id = study_id
@property
def name(self):
"""Gets the name of this StudySiteViewRpc. # noqa: E501
:return: The name of this StudySiteViewRpc. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this StudySiteViewRpc.
:param name: The name of this StudySiteViewRpc. # noqa: E501
:type: str
"""
self._name = name
@property
def site_id(self):
"""Gets the site_id of this StudySiteViewRpc. # noqa: E501
:return: The site_id of this StudySiteViewRpc. # noqa: E501
:rtype: int
"""
return self._site_id
@site_id.setter
def site_id(self, site_id):
"""Sets the site_id of this StudySiteViewRpc.
:param site_id: The site_id of this StudySiteViewRpc. # noqa: E501
:type: int
"""
self._site_id = site_id
@property
def site_type(self):
"""Gets the site_type of this StudySiteViewRpc. # noqa: E501
:return: The site_type of this StudySiteViewRpc. # noqa: E501
:rtype: str
"""
return self._site_type
@site_type.setter
def site_type(self, site_type):
"""Sets the site_type of this StudySiteViewRpc.
:param site_type: The site_type of this StudySiteViewRpc. # noqa: E501
:type: str
"""
self._site_type = site_type
@property
def principal_investigator(self):
"""Gets the principal_investigator of this StudySiteViewRpc. # noqa: E501
:return: The principal_investigator of this StudySiteViewRpc. # noqa: E501
:rtype: str
"""
return self._principal_investigator
@principal_investigator.setter
def principal_investigator(self, principal_investigator):
"""Sets the principal_investigator of this StudySiteViewRpc.
:param principal_investigator: The principal_investigator of this StudySiteViewRpc. # noqa: E501
:type: str
"""
self._principal_investigator = principal_investigator
@property
def facility_name(self):
"""Gets the facility_name of this StudySiteViewRpc. # noqa: E501
:return: The facility_name of this StudySiteViewRpc. # noqa: E501
:rtype: str
"""
return self._facility_name
@facility_name.setter
def facility_name(self, facility_name):
"""Sets the facility_name of this StudySiteViewRpc.
:param facility_name: The facility_name of this StudySiteViewRpc. # noqa: E501
:type: str
"""
self._facility_name = facility_name
@property
def enabled(self):
"""Gets the enabled of this StudySiteViewRpc. # noqa: E501
:return: The enabled of this StudySiteViewRpc. # noqa: E501
:rtype: bool
"""
return self._enabled
@enabled.setter
def enabled(self, enabled):
"""Sets the enabled of this StudySiteViewRpc.
:param enabled: The enabled of this StudySiteViewRpc. # noqa: E501
:type: bool
"""
self._enabled = enabled
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, StudySiteViewRpc):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, StudySiteViewRpc):
return True
return self.to_dict() != other.to_dict()
| [
"[email protected]"
] | |
55d11a1f12e6188a1ad698f562dad455168768b3 | 4129ae27e90b3aa76187203e42aa0ecbae69216f | /img/test.py | b8e635ba0fa6d08d2030bb90c4ad0473dfa34e66 | [] | no_license | PinoJoe/WebCrawler | d1a6b84629832222cbebb1037f1cbc0771deadcf | 94929bc73bde98569b2992f8bc648c2f39afcccc | refs/heads/master | 2022-01-23T13:09:35.853177 | 2019-08-05T13:40:44 | 2019-08-05T13:40:44 | 122,821,572 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,406 | py | #-*-coding:utf-8 -*-
import urllib
from lxml import etree
import requests
import time
from contextlib import closing
def ProcessBar(blocknum, blocksize, totalsize):
speed = (blocknum * blocksize) / (time.time() - start_time)
speed_str = '下载速度: %s' % format_size(speed)
recv_size = blocknum * blocksize
pervent = recv_size / totalsize
percent_str = '%.2f%%' % (pervent * 100)
n = round(pervent * 5)
s = ('=' * n).ljust(5, '-')
print(percent_str.ljust(8, ' ') + '[' + s + ']' + speed_str, end='\r')
def format_size(bytes):
try:
bytes = float(bytes)
kb = bytes / 1024
except:
print('传入的字节格式错误')
return 'Error'
if kb >= 1024:
M = kb / 1024
if M >= 1024:
G = M / 1024
return '%.3fG' % (G)
else:
return '%.3fM' % (M)
else:
return '%.3fK' % (kb)
user_agent = 'Mozilla/5.0 (Windows NT 6.1; rv:58.0) Gecko/20100101 Firefox/58.0'
headers = {'User-Agent':user_agent}
r = requests.get('http://www.ivsky.com/tupian/ziranfengguang/',headers=headers)
h = etree.HTML(r.text)
img_urls = h.xpath('.//img/@src')
i = 0
for img_url in img_urls:
filename = 'img' + str(i) + '.jgp'
start_time = time.time()
urllib.request.urlretrieve(img_url, filename, ProcessBar)
i += 1
print('\n')
| [
"[email protected]"
] | |
b376047b8fffc918dea88a06d8f95217ed1a01eb | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_coolants.py | 52648d1c9cbfeb1d8e6f52d2d454be9cc0ff0119 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 226 | py |
#calss header
class _COOLANTS():
def __init__(self,):
self.name = "COOLANTS"
self.definitions = coolant
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['coolant']
| [
"[email protected]"
] | |
ae8384d7325ab05aae05cc3dff842e3ae00aef65 | 632b9b323dc29c67fd6b8cdbec6ec80161ad484a | /extractInstitution.py | 126544f887ba375a7c57a7e8e67987ecdd57ee55 | [] | no_license | SixingYan/Academic-Relationship-Network | 3a08f7cf5d9d1a73f8639c883257fc76dbe86376 | 94dbfcc76a734005ffceb08e31763112b0d4462b | refs/heads/master | 2021-01-21T17:57:14.388478 | 2018-02-23T09:48:31 | 2018-02-23T09:48:31 | 92,003,208 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,655 | py | # -*- coding: utf-8 -*-
import re
from tool import getResult,getCursor,readTXT
from bs4 import BeautifulSoup
import os
files_path = 'E:/Code/Data/dlibrary'
conn,cur = getCursor()
#import os;os.chdir('e:/Code/Python');import extractInstitution;extractInstitution.mainFunction()
def cleanInstit(instit):
#
institNew = ''
for inst in instit.split(' '):
institNew += re.sub('[^a-zA-Z]','',inst)+' '
return institNew.strip()
def readFiles(files_path):
#
filePathList = []
for fileName in os.listdir(files_path):
if len(fileName)>1:
newFilePath = files_path+'/'+fileName
filePathList.append(newFilePath)
return filePathList
def insertInstitution(eid,institution,fileP):
#放入一个一维数组
insertSQL = ''
for inst in institution:
try:
insertSQL = 'insert into experience1 (eid,institution) values('+str(eid)+', "'+inst+'")'
cur.execute(insertSQL)
conn.commit()
except Exception:
print('error:'+insertSQL)
#cur.execute('update dlurl1 set status= where id='+str(eid))#标记已抽取
#conn.commit()
print('Competed '+fileP)
def extractInstitut(html):
#
institution = []
#找到<strong> Affiliation history
#它的下一个div
#里面的每一个a
soup = BeautifulSoup(''.join(html),"lxml")
history = soup.find('history')
strongTag = history.find(text='Affiliation history')
if strongTag != None:
strongTag = strongTag.parent
else:
return institution
while (type(strongTag.nextSibling) != 'NoneType') or (strongTag.nextSibling.name != 'div'):
#print(' ---loop--- ')
strongTag = strongTag.nextSibling
#print(str(strongTag))
if strongTag.name == 'div':
break
if strongTag == None:
print('no find?')
break
try:
if strongTag.findAll('a') != None:
for a in strongTag.findAll('a'):
instName = cleanInstit(a.string)
institution.append(instName)
return institution
except Exception:
print('error:'+str(strongTag))
def extractUserID(url):
#
url = url.split('&')[0]
urlid = url[:]
id = urlid.replace('http://dl.acm.org/author_page.cfm?id=','')
userid = id[4:]#only numbers begin at 4 are considered
return urlid,userid
def getID(html):
#
eid = -1 #初始化
indx = '<![CDATA['
start = html.find(indx)
end = html.find(']]></fullpath>')
if start>0:
subjectURL = html[(start+len(indx)):end]
url,userid = extractUserID(subjectURL)#从网址中分离出url地址
#回查数据库
selectSQL = 'select t.id from (select id,url from dlurl1 where userid='+str(userid)+') t where t.url="'+url+'"'
result = getResult(selectSQL,cur)
if len(result)==1:
eid = int(result[0]['id'])
else:
print('error or exist')
return eid
def mainFunction():
#
#读取文件
filePathList = readFiles(files_path)
print('read is ready')
for fileP in filePathList:
html = readTXT(fileP)
#print('do here')
eid = getID(html)
#print('do here0')
if eid >0:
instit = extractInstitut(html)
if len(instit)>0:
#print('do here1')
insertInstitution(eid,instit,fileP)
#print(instit)
#break#只运行一次
cur.close();conn.close();
if __name__ == '__main__':
mainFunction() | [
"[email protected]"
] | |
fa3e1e5a03b34cf9667f4072e97ba84be7134e14 | f8ef8828377131f38a75e25f1571d3e0ea7b4837 | /api/migrations/0006_match_expansion.py | 6d76c5f3773954536724f113207af5736a647ccb | [] | no_license | szpone/bg-journal | 8b46742e2b71db820e5fb1e5f690c0362586661e | 80a3570414773daf34458ca068d051cbfe29a8b7 | refs/heads/master | 2022-05-05T06:10:33.957525 | 2019-05-02T17:11:48 | 2019-05-02T17:11:48 | 165,717,971 | 0 | 1 | null | 2022-04-22T21:07:23 | 2019-01-14T19:04:02 | Python | UTF-8 | Python | false | false | 499 | py | # Generated by Django 2.1.7 on 2019-03-18 20:03
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('api', '0005_remove_user_confirm_password'),
]
operations = [
migrations.AddField(
model_name='match',
name='expansion',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='api.Expansion'),
),
]
| [
"[email protected]"
] | |
cb562f1501ebd70b4953051bffe97d1b3be9ab1f | 147d0863f4590649a90ea5f78c66974723a87247 | /api/api_request.py | 7e3ae65e10c255203d5ebc25a3c87d4874377cbe | [] | no_license | jinchuika/ligabot | af5bd5443dc0df7d929e7b866869ba075c91db55 | 69544912e1ac46f281ba2fc78ff913d60d9a2a38 | refs/heads/master | 2021-01-20T12:50:32.894359 | 2017-05-08T14:07:47 | 2017-05-08T14:07:47 | 90,419,163 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,788 | py | import requests
import click
from django.conf import settings
from datetime import datetime
class RequestHandler(object):
BASE_URL = settings.BASE_URL
API_TOKEN = settings.API_TOKEN
LIVE_URL = 'http://soccer-cli.appspot.com/'
verbose = False
def __init__(self, verbose=False):
self.verbose = verbose
def _get(self, url):
"""Handles api.football-data.org requests"""
if self.verbose:
print('calling: ' + url)
req = requests.get(RequestHandler.BASE_URL + url, headers={'X-Auth-Token': RequestHandler.API_TOKEN, 'X-Response-Control': 'minified'})
if req.status_code == requests.codes.ok:
if self.verbose:
print(req.text)
return req
def get_live_scores(self, use_12_hour_format):
"""Gets the live scores"""
req = requests.get(RequestHandler.LIVE_URL)
if req.status_code == requests.codes.ok:
scores = req.json()
if len(scores["games"]) == 0:
click.secho("No live action currently", fg="red", bold=True)
return
self.writer.live_scores(scores, use_12_hour_format)
else:
click.secho("There was problem getting live scores", fg="red", bold=True)
def get_team_scores(self, team_id, time=7, show_upcoming=False, use_12_hour_format=False):
"""Queries the API and gets the particular team scores"""
time_frame = 'n' if show_upcoming else 'p'
if team_id:
req = self._get('teams/{team_id}/fixtures?timeFrame={time_frame}{time}'.format(
team_id=team_id,
time_frame=time_frame,
time=time))
team_scores = req.json()
if len(team_scores["fixtures"]) != 0:
return [{
'id': fixture['id'],
'fecha': fixture['date'],
'jornada': fixture['matchday'],
'local': fixture['homeTeamName'],
'visitante': fixture['awayTeamName'],
'gol_local': fixture['result']['goalsHomeTeam'],
'gol_visitante': fixture['result']['goalsAwayTeam'],
'estado': fixture["status"]
} for fixture in team_scores['fixtures']]
else:
return []
def get_standings(self, league_id):
"""Queries the API and gets the standings for a particular league"""
req = self._get('competitions/{id}/leagueTable'.format(id=league_id))
return [{
'rank': team["rank"],
'teamId': team["teamId"],
'teamName': team["team"],
'playedGames': team["playedGames"],
'goals': team["goals"],
'goalsAgainst': team["goalsAgainst"],
'goalDifference': team["goalDifference"],
'points': team["points"]
} for team in req.json()['standing']]
def get_league_scores(self, league_id, time=7, show_upcoming=False, use_12_hour_format=False):
"""
Queries the API and fetches the scores for fixtures
based upon the league and time parameter
"""
time_frame = 'n' if show_upcoming else 'p'
if league_id:
req = self._get('competitions/{league_id}/fixtures?timeFrame={time_frame}{time}'.format(
league_id=league_id,
time_frame=time_frame,
time=time))
fixtures_results = req.json()
# no fixtures in the past week. display a help message and return
if len(fixtures_results["fixtures"]) != 0:
return [{
'id': fixture['id'],
'fecha': fixture['date'],
'jornada': fixture['matchday'],
'local': fixture['homeTeamName'],
'local_id': fixture['homeTeamId'],
'visitante_id': fixture['awayTeamId'],
'visitante': fixture['awayTeamName'],
'gol_local': fixture['result']['goalsHomeTeam'],
'gol_visitante': fixture['result']['goalsAwayTeam'],
'estado': fixture["status"]
} for fixture in fixtures_results['fixtures']]
else:
return []
else:
# When no league specified. Print all available in time frame.
return []
def get_team_players(self, team):
"""
Queries the API and fetches the players
for a particular team
"""
team_id = self.team_names.get(team, None)
req = self._get('teams/{team_id}/players'.format(team_id=team_id))
team_players = req.json()
if int(team_players["count"]) == 0:
click.secho("No players found for this team", fg="red", bold=True)
else:
self.writer.team_players(team_players)
def get_leagues(self, season=None):
if not season:
season = datetime.now().year
req = self._get('competitions/?season={season}'.format(season=season))
competition_list = req.json()
return [{
'id': competition['id'],
'caption': competition['caption'],
'league': competition['league'],
'year': competition['year'],
'numberOfTeams': competition['numberOfTeams'],
'numberOfGames': competition['numberOfGames'],
'numberOfMatchdays': competition['numberOfMatchdays'],
'currentMatchday': competition['currentMatchday'],
'lastUpdated': competition['lastUpdated'],
} for competition in competition_list]
def get_league_info(self, league_id):
req = self._get('competitions/{league_id}/'.format(league_id=league_id))
competition = req.json()
return {
'id': competition['id'],
'caption': competition['caption'],
'league': competition['league'],
'year': competition['year'],
'numberOfTeams': competition['numberOfTeams'],
'numberOfGames': competition['numberOfGames'],
'numberOfMatchdays': competition['numberOfMatchdays'],
'currentMatchday': competition['currentMatchday'],
'lastUpdated': competition['lastUpdated'],
}
def get_league_teams(self, league_id):
req = self._get('competitions/{league_id}/teams'.format(league_id=league_id))
team_list = req.json()
return [{
'id': team['id'],
'name': team['name'],
'short_name': team['shortName'],
'squad_market_value': team['squadMarketValue'],
'crest_url': team['crestUrl'],
} for team in team_list['teams'] if 'id' in team]
| [
"[email protected]"
] | |
f75f33d950309ba2333f6b2ace38e72f6bf95b7c | 65675a487fee2ff9651675ae6a09c8d62682c2a4 | /23b.py | e44397b4dee9ab0ef0746d70edcb656548770bd5 | [] | no_license | Abarn279/advent-of-code-2020 | e132f4b04ee1b777ddc00bb97322f707a72c86e0 | ea840ee1e7a8cafedfe6f0b9f3e64a2e8b6f0c80 | refs/heads/master | 2023-02-04T21:11:06.771592 | 2020-12-26T02:37:25 | 2020-12-26T02:37:25 | 317,412,968 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,037 | py | node_dict = {}
class SLLNode:
''' Singly linked list node '''
def __init__(self, nxt=None, data=None):
if nxt is None:
self.next = self
else:
self.next = nxt
self.data = data
node_dict[self.data] = self
def insert_after(self, other_val):
current_next = self.next
self.next = SLLNode(current_next, other_val)
return self.next
def insert_bulk_after(self, ary):
curr_next = self.next
self.next = ary[0]
ary[-1].next = curr_next
def remove_range_after(self, amt):
''' Remove amt of nodes, after this one, returning the resulting array. '''
nodes = []
to_remove = self.next
for n in range(amt):
nodes.append(to_remove)
to_remove = to_remove.next
self.next = nodes[-1].next
nodes[-1].next = None
return nodes
def find_destination(self, t = None):
if t is None:
t = ((self.data - 2) % 1000000) + 1
else:
t = ((t - 1) % 1000000) + 1
return node_dict[t]
def get_order(self):
o = self.next
while o.data != 1:
o = o.next
c = o.next
s = ""
while c is not o:
s += str(c.data)
c = c.next
return s
def __eq__(self, other):
return self.data == other.data
def __repr__(self):
return str(self.data)
# My input
inp = '318946572'
# Build DLL
current = SLLNode(None, int(inp[0]))
nxt = current
for n in inp[1:]:
nxt = nxt.insert_after(int(n))
for n in range(10, 1000001):
nxt = nxt.insert_after(n)
for move in range(10000000):
removed = current.remove_range_after(3)
destination = current.find_destination()
while destination in removed:
destination = current.find_destination(destination.data - 1)
destination.insert_bulk_after(removed)
current = current.next
print(node_dict[1].next.data * node_dict[1].next.next.data) | [
"[email protected]"
] | |
dba760a081168b07da49364f0d7449d2b7849238 | b6b380e6f5353dba2256211033cebec638dffe4a | /packages/fuego/fuego/serialization/chemkin/unpickle/parsers/Species.py | cb4d4a0b1bd9d21462be0519db57925920608495 | [] | no_license | danse-inelastic/pyre-all | 0ddf640b68f6089e40345e9a8e20562a8b035b3c | 59cc235b6481586c58415535bbec660470218e31 | refs/heads/master | 2021-01-18T12:31:27.905459 | 2015-04-26T04:57:46 | 2015-04-26T04:57:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,335 | py | #!/usr/bin/env python
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Michael A.G. Aivazis
# California Institute of Technology
# (C) 1998-2007 All Rights Reserved
#
# <LicenseText>
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
from BaseParser import BaseParser
class Species(BaseParser):
# the interesting tokens
def aSpeciesName(self, token):
try:
species = self._mechanism.newSpecies(token.name, self.locator())
except self._mechanism.DuplicateSpecies, msg:
self.onWarning(str(msg), self.locator())
return 0
# transitions
def aSpeciesSection(self, token):
self._info.log("species parser: section start")
self._parse(self._scanner, self._tokenizer)
return 0
# other methods
def __init__(self, mechanism, tokenizer):
import pyre
BaseParser.__init__(self, mechanism)
self._tokenizer = tokenizer
import fuego
self._scanner = fuego.serialization.chemkin.unpickle.scanners.species()
return
# version
__id__ = "$Id: Species.py,v 1.1.1.1 2007-09-13 18:17:31 aivazis Exp $"
#
# End of file
| [
"[email protected]"
] | |
d1aa49ff888c33fc8ce933754b64cb779c56098e | 9a335e0c99250de6c0429b367107153ff0633fe5 | /sine_of_num.py | d8f72e1928f379707c9fa205b00c72a59b9f297c | [] | no_license | felcygrace/guvi_player | 16f4e92be00625490b013f0f833350c9b4185fe3 | cf74f455707ab66d08a0801440033a63871102dd | refs/heads/master | 2020-05-31T10:31:42.720423 | 2019-06-22T09:39:23 | 2019-06-22T09:39:23 | 190,242,881 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 126 | py | import math
n=int(input())
a=math.randians(n)
if a>0 and a<1:
print(round(math.sin(a),2))
else:
print(round(math.sin(a)))
| [
"[email protected]"
] | |
f7debe817af0afd601474afec973beb67886808b | f9183ce2308090dbb6a8c2f5d96c17c56a8ca768 | /main/forms.py | c9618dd619531607d80ecd51fc59f05971ac3664 | [] | no_license | asad2200/UrlShortener | a9b0e0f5cce203dd2bcc1244de7feb99588c6c71 | 055d83d5a1bbf9628a8f045d152dc85c58e9460f | refs/heads/master | 2023-05-13T23:44:09.748618 | 2021-05-29T10:56:17 | 2021-05-29T10:56:17 | 371,616,730 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 248 | py | from django import forms
from .models import URL
class URLForm(forms.ModelForm):
class Meta:
model = URL
fields = ["name", "url"]
widgets = {
"url": forms.Textarea(attrs={"rows": 2, "cols": 5}),
}
| [
"[email protected]"
] | |
0835966ff2ce83229633c6df3ebc0d562c265106 | 82770c7bc5e2f27a48b8c370b0bab2ee41f24d86 | /microblog/flask/venv/lib/python2.7/site-packages/scipy/stats/tests/test_mstats_basic.py | d4606a58c0ee10a92b657014fb576d2ae6809fe9 | [
"Apache-2.0"
] | permissive | johankaito/fufuka | 77ddb841f27f6ce8036d7b38cb51dc62e85b2679 | 32a96ecf98ce305c2206c38443e58fdec88c788d | refs/heads/master | 2022-07-20T00:51:55.922063 | 2015-08-21T20:56:48 | 2015-08-21T20:56:48 | 39,845,849 | 2 | 0 | Apache-2.0 | 2022-06-29T23:30:11 | 2015-07-28T16:39:54 | Python | UTF-8 | Python | false | false | 48,883 | py | """
Tests for the stats.mstats module (support for masked arrays)
"""
from __future__ import division, print_function, absolute_import
import warnings
import numpy as np
from numpy import nan
import numpy.ma as ma
from numpy.ma import masked, nomask
import scipy.stats.mstats as mstats
from scipy import stats
from common_tests import check_named_results
from numpy.testing import TestCase, run_module_suite
from numpy.testing.decorators import skipif
from numpy.ma.testutils import (assert_equal, assert_almost_equal,
assert_array_almost_equal, assert_array_almost_equal_nulp, assert_,
assert_allclose, assert_raises)
class TestMquantiles(TestCase):
def test_mquantiles_limit_keyword(self):
# Regression test for Trac ticket #867
data = np.array([[6., 7., 1.],
[47., 15., 2.],
[49., 36., 3.],
[15., 39., 4.],
[42., 40., -999.],
[41., 41., -999.],
[7., -999., -999.],
[39., -999., -999.],
[43., -999., -999.],
[40., -999., -999.],
[36., -999., -999.]])
desired = [[19.2, 14.6, 1.45],
[40.0, 37.5, 2.5],
[42.8, 40.05, 3.55]]
quants = mstats.mquantiles(data, axis=0, limit=(0, 50))
assert_almost_equal(quants, desired)
class TestGMean(TestCase):
def test_1D(self):
a = (1,2,3,4)
actual = mstats.gmean(a)
desired = np.power(1*2*3*4,1./4.)
assert_almost_equal(actual, desired, decimal=14)
desired1 = mstats.gmean(a,axis=-1)
assert_almost_equal(actual, desired1, decimal=14)
assert_(not isinstance(desired1, ma.MaskedArray))
a = ma.array((1,2,3,4),mask=(0,0,0,1))
actual = mstats.gmean(a)
desired = np.power(1*2*3,1./3.)
assert_almost_equal(actual, desired,decimal=14)
desired1 = mstats.gmean(a,axis=-1)
assert_almost_equal(actual, desired1, decimal=14)
@skipif(not hasattr(np, 'float96'), 'cannot find float96 so skipping')
def test_1D_float96(self):
a = ma.array((1,2,3,4), mask=(0,0,0,1))
actual_dt = mstats.gmean(a, dtype=np.float96)
desired_dt = np.power(1 * 2 * 3, 1. / 3.).astype(np.float96)
assert_almost_equal(actual_dt, desired_dt, decimal=14)
assert_(actual_dt.dtype == desired_dt.dtype)
def test_2D(self):
a = ma.array(((1, 2, 3, 4), (1, 2, 3, 4), (1, 2, 3, 4)),
mask=((0, 0, 0, 0), (1, 0, 0, 1), (0, 1, 1, 0)))
actual = mstats.gmean(a)
desired = np.array((1,2,3,4))
assert_array_almost_equal(actual, desired, decimal=14)
desired1 = mstats.gmean(a,axis=0)
assert_array_almost_equal(actual, desired1, decimal=14)
actual = mstats.gmean(a, -1)
desired = ma.array((np.power(1*2*3*4,1./4.),
np.power(2*3,1./2.),
np.power(1*4,1./2.)))
assert_array_almost_equal(actual, desired, decimal=14)
class TestHMean(TestCase):
def test_1D(self):
a = (1,2,3,4)
actual = mstats.hmean(a)
desired = 4. / (1./1 + 1./2 + 1./3 + 1./4)
assert_almost_equal(actual, desired, decimal=14)
desired1 = mstats.hmean(ma.array(a),axis=-1)
assert_almost_equal(actual, desired1, decimal=14)
a = ma.array((1,2,3,4),mask=(0,0,0,1))
actual = mstats.hmean(a)
desired = 3. / (1./1 + 1./2 + 1./3)
assert_almost_equal(actual, desired,decimal=14)
desired1 = mstats.hmean(a,axis=-1)
assert_almost_equal(actual, desired1, decimal=14)
@skipif(not hasattr(np, 'float96'), 'cannot find float96 so skipping')
def test_1D_float96(self):
a = ma.array((1,2,3,4), mask=(0,0,0,1))
actual_dt = mstats.hmean(a, dtype=np.float96)
desired_dt = np.asarray(3. / (1./1 + 1./2 + 1./3),
dtype=np.float96)
assert_almost_equal(actual_dt, desired_dt, decimal=14)
assert_(actual_dt.dtype == desired_dt.dtype)
def test_2D(self):
a = ma.array(((1,2,3,4),(1,2,3,4),(1,2,3,4)),
mask=((0,0,0,0),(1,0,0,1),(0,1,1,0)))
actual = mstats.hmean(a)
desired = ma.array((1,2,3,4))
assert_array_almost_equal(actual, desired, decimal=14)
actual1 = mstats.hmean(a,axis=-1)
desired = (4./(1/1.+1/2.+1/3.+1/4.),
2./(1/2.+1/3.),
2./(1/1.+1/4.)
)
assert_array_almost_equal(actual1, desired, decimal=14)
class TestRanking(TestCase):
def __init__(self, *args, **kwargs):
TestCase.__init__(self, *args, **kwargs)
def test_ranking(self):
x = ma.array([0,1,1,1,2,3,4,5,5,6,])
assert_almost_equal(mstats.rankdata(x),
[1,3,3,3,5,6,7,8.5,8.5,10])
x[[3,4]] = masked
assert_almost_equal(mstats.rankdata(x),
[1,2.5,2.5,0,0,4,5,6.5,6.5,8])
assert_almost_equal(mstats.rankdata(x, use_missing=True),
[1,2.5,2.5,4.5,4.5,4,5,6.5,6.5,8])
x = ma.array([0,1,5,1,2,4,3,5,1,6,])
assert_almost_equal(mstats.rankdata(x),
[1,3,8.5,3,5,7,6,8.5,3,10])
x = ma.array([[0,1,1,1,2], [3,4,5,5,6,]])
assert_almost_equal(mstats.rankdata(x),
[[1,3,3,3,5], [6,7,8.5,8.5,10]])
assert_almost_equal(mstats.rankdata(x, axis=1),
[[1,3,3,3,5], [1,2,3.5,3.5,5]])
assert_almost_equal(mstats.rankdata(x,axis=0),
[[1,1,1,1,1], [2,2,2,2,2,]])
class TestCorr(TestCase):
def test_pearsonr(self):
# Tests some computations of Pearson's r
x = ma.arange(10)
with warnings.catch_warnings():
# The tests in this context are edge cases, with perfect
# correlation or anticorrelation, or totally masked data.
# None of these should trigger a RuntimeWarning.
warnings.simplefilter("error", RuntimeWarning)
assert_almost_equal(mstats.pearsonr(x, x)[0], 1.0)
assert_almost_equal(mstats.pearsonr(x, x[::-1])[0], -1.0)
x = ma.array(x, mask=True)
pr = mstats.pearsonr(x, x)
assert_(pr[0] is masked)
assert_(pr[1] is masked)
x1 = ma.array([-1.0, 0.0, 1.0])
y1 = ma.array([0, 0, 3])
r, p = mstats.pearsonr(x1, y1)
assert_almost_equal(r, np.sqrt(3)/2)
assert_almost_equal(p, 1.0/3)
# (x2, y2) have the same unmasked data as (x1, y1).
mask = [False, False, False, True]
x2 = ma.array([-1.0, 0.0, 1.0, 99.0], mask=mask)
y2 = ma.array([0, 0, 3, -1], mask=mask)
r, p = mstats.pearsonr(x2, y2)
assert_almost_equal(r, np.sqrt(3)/2)
assert_almost_equal(p, 1.0/3)
def test_spearmanr(self):
# Tests some computations of Spearman's rho
(x, y) = ([5.05,6.75,3.21,2.66],[1.65,2.64,2.64,6.95])
assert_almost_equal(mstats.spearmanr(x,y)[0], -0.6324555)
(x, y) = ([5.05,6.75,3.21,2.66,np.nan],[1.65,2.64,2.64,6.95,np.nan])
(x, y) = (ma.fix_invalid(x), ma.fix_invalid(y))
assert_almost_equal(mstats.spearmanr(x,y)[0], -0.6324555)
x = [2.0, 47.4, 42.0, 10.8, 60.1, 1.7, 64.0, 63.1,
1.0, 1.4, 7.9, 0.3, 3.9, 0.3, 6.7]
y = [22.6, 8.3, 44.4, 11.9, 24.6, 0.6, 5.7, 41.6,
0.0, 0.6, 6.7, 3.8, 1.0, 1.2, 1.4]
assert_almost_equal(mstats.spearmanr(x,y)[0], 0.6887299)
x = [2.0, 47.4, 42.0, 10.8, 60.1, 1.7, 64.0, 63.1,
1.0, 1.4, 7.9, 0.3, 3.9, 0.3, 6.7, np.nan]
y = [22.6, 8.3, 44.4, 11.9, 24.6, 0.6, 5.7, 41.6,
0.0, 0.6, 6.7, 3.8, 1.0, 1.2, 1.4, np.nan]
(x, y) = (ma.fix_invalid(x), ma.fix_invalid(y))
assert_almost_equal(mstats.spearmanr(x,y)[0], 0.6887299)
# test for namedtuple attributes
res = mstats.spearmanr(x, y)
attributes = ('correlation', 'pvalue')
check_named_results(res, attributes, ma=True)
def test_kendalltau(self):
# Tests some computations of Kendall's tau
x = ma.fix_invalid([5.05, 6.75, 3.21, 2.66,np.nan])
y = ma.fix_invalid([1.65, 26.5, -5.93, 7.96, np.nan])
z = ma.fix_invalid([1.65, 2.64, 2.64, 6.95, np.nan])
assert_almost_equal(np.asarray(mstats.kendalltau(x,y)),
[+0.3333333,0.4969059])
assert_almost_equal(np.asarray(mstats.kendalltau(x,z)),
[-0.5477226,0.2785987])
#
x = ma.fix_invalid([0, 0, 0, 0,20,20, 0,60, 0,20,
10,10, 0,40, 0,20, 0, 0, 0, 0, 0, np.nan])
y = ma.fix_invalid([0,80,80,80,10,33,60, 0,67,27,
25,80,80,80,80,80,80, 0,10,45, np.nan, 0])
result = mstats.kendalltau(x,y)
assert_almost_equal(np.asarray(result), [-0.1585188, 0.4128009])
# test for namedtuple attributes
res = mstats.kendalltau(x, y)
attributes = ('correlation', 'pvalue')
check_named_results(res, attributes, ma=True)
def test_kendalltau_seasonal(self):
# Tests the seasonal Kendall tau.
x = [[nan,nan, 4, 2, 16, 26, 5, 1, 5, 1, 2, 3, 1],
[4, 3, 5, 3, 2, 7, 3, 1, 1, 2, 3, 5, 3],
[3, 2, 5, 6, 18, 4, 9, 1, 1,nan, 1, 1,nan],
[nan, 6, 11, 4, 17,nan, 6, 1, 1, 2, 5, 1, 1]]
x = ma.fix_invalid(x).T
output = mstats.kendalltau_seasonal(x)
assert_almost_equal(output['global p-value (indep)'], 0.008, 3)
assert_almost_equal(output['seasonal p-value'].round(2),
[0.18,0.53,0.20,0.04])
def test_pointbiserial(self):
x = [1,0,1,1,1,1,0,1,0,0,0,1,1,0,0,0,1,1,1,0,0,0,0,0,0,0,0,1,0,
0,0,0,0,1,-1]
y = [14.8,13.8,12.4,10.1,7.1,6.1,5.8,4.6,4.3,3.5,3.3,3.2,3.0,
2.8,2.8,2.5,2.4,2.3,2.1,1.7,1.7,1.5,1.3,1.3,1.2,1.2,1.1,
0.8,0.7,0.6,0.5,0.2,0.2,0.1,np.nan]
assert_almost_equal(mstats.pointbiserialr(x, y)[0], 0.36149, 5)
# test for namedtuple attributes
res = mstats.pointbiserialr(x, y)
attributes = ('correlation', 'pvalue')
check_named_results(res, attributes, ma=True)
class TestTrimming(TestCase):
def test_trim(self):
a = ma.arange(10)
assert_equal(mstats.trim(a), [0,1,2,3,4,5,6,7,8,9])
a = ma.arange(10)
assert_equal(mstats.trim(a,(2,8)), [None,None,2,3,4,5,6,7,8,None])
a = ma.arange(10)
assert_equal(mstats.trim(a,limits=(2,8),inclusive=(False,False)),
[None,None,None,3,4,5,6,7,None,None])
a = ma.arange(10)
assert_equal(mstats.trim(a,limits=(0.1,0.2),relative=True),
[None,1,2,3,4,5,6,7,None,None])
a = ma.arange(12)
a[[0,-1]] = a[5] = masked
assert_equal(mstats.trim(a, (2,8)),
[None, None, 2, 3, 4, None, 6, 7, 8, None, None, None])
x = ma.arange(100).reshape(10, 10)
expected = [1]*10 + [0]*70 + [1]*20
trimx = mstats.trim(x, (0.1,0.2), relative=True, axis=None)
assert_equal(trimx._mask.ravel(), expected)
trimx = mstats.trim(x, (0.1,0.2), relative=True, axis=0)
assert_equal(trimx._mask.ravel(), expected)
trimx = mstats.trim(x, (0.1,0.2), relative=True, axis=-1)
assert_equal(trimx._mask.T.ravel(), expected)
# same as above, but with an extra masked row inserted
x = ma.arange(110).reshape(11, 10)
x[1] = masked
expected = [1]*20 + [0]*70 + [1]*20
trimx = mstats.trim(x, (0.1,0.2), relative=True, axis=None)
assert_equal(trimx._mask.ravel(), expected)
trimx = mstats.trim(x, (0.1,0.2), relative=True, axis=0)
assert_equal(trimx._mask.ravel(), expected)
trimx = mstats.trim(x.T, (0.1,0.2), relative=True, axis=-1)
assert_equal(trimx.T._mask.ravel(), expected)
def test_trim_old(self):
x = ma.arange(100)
assert_equal(mstats.trimboth(x).count(), 60)
assert_equal(mstats.trimtail(x,tail='r').count(), 80)
x[50:70] = masked
trimx = mstats.trimboth(x)
assert_equal(trimx.count(), 48)
assert_equal(trimx._mask, [1]*16 + [0]*34 + [1]*20 + [0]*14 + [1]*16)
x._mask = nomask
x.shape = (10,10)
assert_equal(mstats.trimboth(x).count(), 60)
assert_equal(mstats.trimtail(x).count(), 80)
def test_trimmedmean(self):
data = ma.array([77, 87, 88,114,151,210,219,246,253,262,
296,299,306,376,428,515,666,1310,2611])
assert_almost_equal(mstats.trimmed_mean(data,0.1), 343, 0)
assert_almost_equal(mstats.trimmed_mean(data,(0.1,0.1)), 343, 0)
assert_almost_equal(mstats.trimmed_mean(data,(0.2,0.2)), 283, 0)
def test_trimmed_stde(self):
data = ma.array([77, 87, 88,114,151,210,219,246,253,262,
296,299,306,376,428,515,666,1310,2611])
assert_almost_equal(mstats.trimmed_stde(data,(0.2,0.2)), 56.13193, 5)
assert_almost_equal(mstats.trimmed_stde(data,0.2), 56.13193, 5)
def test_winsorization(self):
data = ma.array([77, 87, 88,114,151,210,219,246,253,262,
296,299,306,376,428,515,666,1310,2611])
assert_almost_equal(mstats.winsorize(data,(0.2,0.2)).var(ddof=1),
21551.4, 1)
data[5] = masked
winsorized = mstats.winsorize(data)
assert_equal(winsorized.mask, data.mask)
class TestMoments(TestCase):
# Comparison numbers are found using R v.1.5.1
# note that length(testcase) = 4
# testmathworks comes from documentation for the
# Statistics Toolbox for Matlab and can be found at both
# http://www.mathworks.com/access/helpdesk/help/toolbox/stats/kurtosis.shtml
# http://www.mathworks.com/access/helpdesk/help/toolbox/stats/skewness.shtml
# Note that both test cases came from here.
testcase = [1,2,3,4]
testmathworks = ma.fix_invalid([1.165, 0.6268, 0.0751, 0.3516, -0.6965,
np.nan])
testcase_2d = ma.array(
np.array([[0.05245846, 0.50344235, 0.86589117, 0.36936353, 0.46961149],
[0.11574073, 0.31299969, 0.45925772, 0.72618805, 0.75194407],
[0.67696689, 0.91878127, 0.09769044, 0.04645137, 0.37615733],
[0.05903624, 0.29908861, 0.34088298, 0.66216337, 0.83160998],
[0.64619526, 0.94894632, 0.27855892, 0.0706151, 0.39962917]]),
mask=np.array([[True, False, False, True, False],
[True, True, True, False, True],
[False, False, False, False, False],
[True, True, True, True, True],
[False, False, True, False, False]], dtype=np.bool))
def test_moment(self):
y = mstats.moment(self.testcase,1)
assert_almost_equal(y,0.0,10)
y = mstats.moment(self.testcase,2)
assert_almost_equal(y,1.25)
y = mstats.moment(self.testcase,3)
assert_almost_equal(y,0.0)
y = mstats.moment(self.testcase,4)
assert_almost_equal(y,2.5625)
def test_variation(self):
y = mstats.variation(self.testcase)
assert_almost_equal(y,0.44721359549996, 10)
def test_skewness(self):
y = mstats.skew(self.testmathworks)
assert_almost_equal(y,-0.29322304336607,10)
y = mstats.skew(self.testmathworks,bias=0)
assert_almost_equal(y,-0.437111105023940,10)
y = mstats.skew(self.testcase)
assert_almost_equal(y,0.0,10)
def test_kurtosis(self):
# Set flags for axis = 0 and fisher=0 (Pearson's definition of kurtosis
# for compatibility with Matlab)
y = mstats.kurtosis(self.testmathworks,0,fisher=0,bias=1)
assert_almost_equal(y, 2.1658856802973,10)
# Note that MATLAB has confusing docs for the following case
# kurtosis(x,0) gives an unbiased estimate of Pearson's skewness
# kurtosis(x) gives a biased estimate of Fisher's skewness (Pearson-3)
# The MATLAB docs imply that both should give Fisher's
y = mstats.kurtosis(self.testmathworks,fisher=0, bias=0)
assert_almost_equal(y, 3.663542721189047,10)
y = mstats.kurtosis(self.testcase,0,0)
assert_almost_equal(y,1.64)
# test that kurtosis works on multidimensional masked arrays
correct_2d = ma.array(np.array([-1.5, -3., -1.47247052385, 0.,
-1.26979517952]),
mask=np.array([False, False, False, True,
False], dtype=np.bool))
assert_array_almost_equal(mstats.kurtosis(self.testcase_2d, 1),
correct_2d)
for i, row in enumerate(self.testcase_2d):
assert_almost_equal(mstats.kurtosis(row), correct_2d[i])
correct_2d_bias_corrected = ma.array(
np.array([-1.5, -3., -1.88988209538, 0., -0.5234638463918877]),
mask=np.array([False, False, False, True, False], dtype=np.bool))
assert_array_almost_equal(mstats.kurtosis(self.testcase_2d, 1,
bias=False),
correct_2d_bias_corrected)
for i, row in enumerate(self.testcase_2d):
assert_almost_equal(mstats.kurtosis(row, bias=False),
correct_2d_bias_corrected[i])
# Check consistency between stats and mstats implementations
assert_array_almost_equal_nulp(mstats.kurtosis(self.testcase_2d[2, :]),
stats.kurtosis(self.testcase_2d[2, :]))
def test_mode(self):
a1 = [0,0,0,1,1,1,2,3,3,3,3,4,5,6,7]
a2 = np.reshape(a1, (3,5))
a3 = np.array([1,2,3,4,5,6])
a4 = np.reshape(a3, (3,2))
ma1 = ma.masked_where(ma.array(a1) > 2, a1)
ma2 = ma.masked_where(a2 > 2, a2)
ma3 = ma.masked_where(a3 < 2, a3)
ma4 = ma.masked_where(ma.array(a4) < 2, a4)
assert_equal(mstats.mode(a1, axis=None), (3,4))
assert_equal(mstats.mode(a1, axis=0), (3,4))
assert_equal(mstats.mode(ma1, axis=None), (0,3))
assert_equal(mstats.mode(a2, axis=None), (3,4))
assert_equal(mstats.mode(ma2, axis=None), (0,3))
assert_equal(mstats.mode(a3, axis=None), (1,1))
assert_equal(mstats.mode(ma3, axis=None), (2,1))
assert_equal(mstats.mode(a2, axis=0), ([[0,0,0,1,1]], [[1,1,1,1,1]]))
assert_equal(mstats.mode(ma2, axis=0), ([[0,0,0,1,1]], [[1,1,1,1,1]]))
assert_equal(mstats.mode(a2, axis=-1), ([[0],[3],[3]], [[3],[3],[1]]))
assert_equal(mstats.mode(ma2, axis=-1), ([[0],[1],[0]], [[3],[1],[0]]))
assert_equal(mstats.mode(ma4, axis=0), ([[3,2]], [[1,1]]))
assert_equal(mstats.mode(ma4, axis=-1), ([[2],[3],[5]], [[1],[1],[1]]))
a1_res = mstats.mode(a1, axis=None)
# test for namedtuple attributes
attributes = ('mode', 'count')
check_named_results(a1_res, attributes, ma=True)
class TestPercentile(TestCase):
def setUp(self):
self.a1 = [3,4,5,10,-3,-5,6]
self.a2 = [3,-6,-2,8,7,4,2,1]
self.a3 = [3.,4,5,10,-3,-5,-6,7.0]
def test_percentile(self):
x = np.arange(8) * 0.5
assert_equal(mstats.scoreatpercentile(x, 0), 0.)
assert_equal(mstats.scoreatpercentile(x, 100), 3.5)
assert_equal(mstats.scoreatpercentile(x, 50), 1.75)
def test_2D(self):
x = ma.array([[1, 1, 1],
[1, 1, 1],
[4, 4, 3],
[1, 1, 1],
[1, 1, 1]])
assert_equal(mstats.scoreatpercentile(x,50), [1,1,1])
class TestVariability(TestCase):
""" Comparison numbers are found using R v.1.5.1
note that length(testcase) = 4
"""
testcase = ma.fix_invalid([1,2,3,4,np.nan])
def test_signaltonoise(self):
# This is not in R, so used:
# mean(testcase, axis=0) / (sqrt(var(testcase)*3/4))
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
y = mstats.signaltonoise(self.testcase)
assert_almost_equal(y, 2.236067977)
def test_sem(self):
# This is not in R, so used: sqrt(var(testcase)*3/4) / sqrt(3)
y = mstats.sem(self.testcase)
assert_almost_equal(y, 0.6454972244)
n = self.testcase.count()
assert_allclose(mstats.sem(self.testcase, ddof=0) * np.sqrt(n/(n-2)),
mstats.sem(self.testcase, ddof=2))
def test_zmap(self):
# This is not in R, so tested by using:
# (testcase[i]-mean(testcase,axis=0)) / sqrt(var(testcase)*3/4)
y = mstats.zmap(self.testcase, self.testcase)
desired_unmaskedvals = ([-1.3416407864999, -0.44721359549996,
0.44721359549996, 1.3416407864999])
assert_array_almost_equal(desired_unmaskedvals,
y.data[y.mask == False], decimal=12)
def test_zscore(self):
# This is not in R, so tested by using:
# (testcase[i]-mean(testcase,axis=0)) / sqrt(var(testcase)*3/4)
y = mstats.zscore(self.testcase)
desired = ma.fix_invalid([-1.3416407864999, -0.44721359549996,
0.44721359549996, 1.3416407864999, np.nan])
assert_almost_equal(desired, y, decimal=12)
class TestMisc(TestCase):
def test_obrientransform(self):
args = [[5]*5+[6]*11+[7]*9+[8]*3+[9]*2+[10]*2,
[6]+[7]*2+[8]*4+[9]*9+[10]*16]
result = [5*[3.1828]+11*[0.5591]+9*[0.0344]+3*[1.6086]+2*[5.2817]+2*[11.0538],
[10.4352]+2*[4.8599]+4*[1.3836]+9*[0.0061]+16*[0.7277]]
assert_almost_equal(np.round(mstats.obrientransform(*args).T,4),
result,4)
def test_kstwosamp(self):
x = [[nan,nan, 4, 2, 16, 26, 5, 1, 5, 1, 2, 3, 1],
[4, 3, 5, 3, 2, 7, 3, 1, 1, 2, 3, 5, 3],
[3, 2, 5, 6, 18, 4, 9, 1, 1,nan, 1, 1,nan],
[nan, 6, 11, 4, 17,nan, 6, 1, 1, 2, 5, 1, 1]]
x = ma.fix_invalid(x).T
(winter,spring,summer,fall) = x.T
assert_almost_equal(np.round(mstats.ks_twosamp(winter,spring),4),
(0.1818,0.9892))
assert_almost_equal(np.round(mstats.ks_twosamp(winter,spring,'g'),4),
(0.1469,0.7734))
assert_almost_equal(np.round(mstats.ks_twosamp(winter,spring,'l'),4),
(0.1818,0.6744))
def test_friedmanchisq(self):
# No missing values
args = ([9.0,9.5,5.0,7.5,9.5,7.5,8.0,7.0,8.5,6.0],
[7.0,6.5,7.0,7.5,5.0,8.0,6.0,6.5,7.0,7.0],
[6.0,8.0,4.0,6.0,7.0,6.5,6.0,4.0,6.5,3.0])
result = mstats.friedmanchisquare(*args)
assert_almost_equal(result[0], 10.4737, 4)
assert_almost_equal(result[1], 0.005317, 6)
# Missing values
x = [[nan,nan, 4, 2, 16, 26, 5, 1, 5, 1, 2, 3, 1],
[4, 3, 5, 3, 2, 7, 3, 1, 1, 2, 3, 5, 3],
[3, 2, 5, 6, 18, 4, 9, 1, 1,nan, 1, 1,nan],
[nan, 6, 11, 4, 17,nan, 6, 1, 1, 2, 5, 1, 1]]
x = ma.fix_invalid(x)
result = mstats.friedmanchisquare(*x)
assert_almost_equal(result[0], 2.0156, 4)
assert_almost_equal(result[1], 0.5692, 4)
# test for namedtuple attributes
attributes = ('statistic', 'pvalue')
check_named_results(result, attributes, ma=True)
def test_regress_simple():
# Regress a line with sinusoidal noise. Test for #1273.
x = np.linspace(0, 100, 100)
y = 0.2 * np.linspace(0, 100, 100) + 10
y += np.sin(np.linspace(0, 20, 100))
slope, intercept, r_value, p_value, sterr = mstats.linregress(x, y)
assert_almost_equal(slope, 0.19644990055858422)
assert_almost_equal(intercept, 10.211269918932341)
# test for namedtuple attributes
res = mstats.linregress(x, y)
attributes = ('slope', 'intercept', 'rvalue', 'pvalue', 'stderr')
check_named_results(res, attributes, ma=True)
def test_theilslopes():
# Test for basic slope and intercept.
slope, intercept, lower, upper = mstats.theilslopes([0,1,1])
assert_almost_equal(slope, 0.5)
assert_almost_equal(intercept, 0.5)
# Test for correct masking.
y = np.ma.array([0,1,100,1], mask=[False, False, True, False])
slope, intercept, lower, upper = mstats.theilslopes(y)
assert_almost_equal(slope, 1./3)
assert_almost_equal(intercept, 2./3)
# Test of confidence intervals from example in Sen (1968).
x = [1, 2, 3, 4, 10, 12, 18]
y = [9, 15, 19, 20, 45, 55, 78]
slope, intercept, lower, upper = mstats.theilslopes(y, x, 0.07)
assert_almost_equal(slope, 4)
assert_almost_equal(upper, 4.38, decimal=2)
assert_almost_equal(lower, 3.71, decimal=2)
def test_plotting_positions():
# Regression test for #1256
pos = mstats.plotting_positions(np.arange(3), 0, 0)
assert_array_almost_equal(pos.data, np.array([0.25, 0.5, 0.75]))
class TestNormalitytests():
def test_vs_nonmasked(self):
x = np.array((-2,-1,0,1,2,3)*4)**2
assert_array_almost_equal(mstats.normaltest(x),
stats.normaltest(x))
assert_array_almost_equal(mstats.skewtest(x),
stats.skewtest(x))
assert_array_almost_equal(mstats.kurtosistest(x),
stats.kurtosistest(x))
funcs = [stats.normaltest, stats.skewtest, stats.kurtosistest]
mfuncs = [mstats.normaltest, mstats.skewtest, mstats.kurtosistest]
x = [1, 2, 3, 4]
for func, mfunc in zip(funcs, mfuncs):
assert_raises(ValueError, func, x)
assert_raises(ValueError, mfunc, x)
def test_axis_None(self):
# Test axis=None (equal to axis=0 for 1-D input)
x = np.array((-2,-1,0,1,2,3)*4)**2
assert_allclose(mstats.normaltest(x, axis=None), mstats.normaltest(x))
assert_allclose(mstats.skewtest(x, axis=None), mstats.skewtest(x))
assert_allclose(mstats.kurtosistest(x, axis=None),
mstats.kurtosistest(x))
def test_maskedarray_input(self):
# Add some masked values, test result doesn't change
x = np.array((-2,-1,0,1,2,3)*4)**2
xm = np.ma.array(np.r_[np.inf, x, 10],
mask=np.r_[True, [False] * x.size, True])
assert_allclose(mstats.normaltest(xm), stats.normaltest(x))
assert_allclose(mstats.skewtest(xm), stats.skewtest(x))
assert_allclose(mstats.kurtosistest(xm), stats.kurtosistest(x))
def test_nd_input(self):
x = np.array((-2,-1,0,1,2,3)*4)**2
x_2d = np.vstack([x] * 2).T
for func in [mstats.normaltest, mstats.skewtest, mstats.kurtosistest]:
res_1d = func(x)
res_2d = func(x_2d)
assert_allclose(res_2d[0], [res_1d[0]] * 2)
assert_allclose(res_2d[1], [res_1d[1]] * 2)
def test_normaltest_result_attributes(self):
x = np.array((-2, -1, 0, 1, 2, 3)*4)**2
res = mstats.normaltest(x)
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes, ma=True)
def test_kurtosistest_result_attributes(self):
x = np.array((-2, -1, 0, 1, 2, 3)*4)**2
res = mstats.kurtosistest(x)
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes, ma=True)
class TestFOneway():
def test_result_attributes(self):
a = np.array([655, 788], dtype=np.uint16)
b = np.array([789, 772], dtype=np.uint16)
res = mstats.f_oneway(a, b)
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes, ma=True)
class TestMannwhitneyu():
def test_result_attributes(self):
x = np.array([1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 2.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 2., 1., 1., 1., 1., 2., 1., 1., 2., 1., 1., 2.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 2., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 2., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 3., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1.])
y = np.array([1., 1., 1., 1., 1., 1., 1., 2., 1., 2., 1., 1., 1., 1.,
2., 1., 1., 1., 2., 1., 1., 1., 1., 1., 2., 1., 1., 3.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 2., 1., 2., 1.,
1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 2.,
2., 1., 1., 2., 1., 1., 2., 1., 2., 1., 1., 1., 1., 2.,
2., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 2., 1., 1., 1., 1., 1., 2., 2., 2., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
2., 1., 1., 2., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 2., 1., 1.,
1., 1., 1., 1.])
res = mstats.mannwhitneyu(x, y)
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes, ma=True)
class TestKruskal():
def test_result_attributes(self):
x = [1, 3, 5, 7, 9]
y = [2, 4, 6, 8, 10]
res = mstats.kruskal(x, y)
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes, ma=True)
#TODO: for all ttest functions, add tests with masked array inputs
class TestTtest_rel():
def test_vs_nonmasked(self):
np.random.seed(1234567)
outcome = np.random.randn(20, 4) + [0, 0, 1, 2]
# 1-D inputs
res1 = stats.ttest_rel(outcome[:, 0], outcome[:, 1])
res2 = mstats.ttest_rel(outcome[:, 0], outcome[:, 1])
assert_allclose(res1, res2)
# 2-D inputs
res1 = stats.ttest_rel(outcome[:, 0], outcome[:, 1], axis=None)
res2 = mstats.ttest_rel(outcome[:, 0], outcome[:, 1], axis=None)
assert_allclose(res1, res2)
res1 = stats.ttest_rel(outcome[:, :2], outcome[:, 2:], axis=0)
res2 = mstats.ttest_rel(outcome[:, :2], outcome[:, 2:], axis=0)
assert_allclose(res1, res2)
# Check default is axis=0
res3 = mstats.ttest_rel(outcome[:, :2], outcome[:, 2:])
assert_allclose(res2, res3)
def test_result_attributes(self):
np.random.seed(1234567)
outcome = np.random.randn(20, 4) + [0, 0, 1, 2]
res = mstats.ttest_rel(outcome[:, 0], outcome[:, 1])
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes, ma=True)
def test_invalid_input_size(self):
assert_raises(ValueError, mstats.ttest_rel,
np.arange(10), np.arange(11))
x = np.arange(24)
assert_raises(ValueError, mstats.ttest_rel,
x.reshape(2, 3, 4), x.reshape(2, 4, 3), axis=1)
assert_raises(ValueError, mstats.ttest_rel,
x.reshape(2, 3, 4), x.reshape(2, 4, 3), axis=2)
def test_empty(self):
res1 = mstats.ttest_rel([], [])
assert_(np.all(np.isnan(res1)))
class TestTtest_ind():
def test_vs_nonmasked(self):
np.random.seed(1234567)
outcome = np.random.randn(20, 4) + [0, 0, 1, 2]
# 1-D inputs
res1 = stats.ttest_ind(outcome[:, 0], outcome[:, 1])
res2 = mstats.ttest_ind(outcome[:, 0], outcome[:, 1])
assert_allclose(res1, res2)
# 2-D inputs
res1 = stats.ttest_ind(outcome[:, 0], outcome[:, 1], axis=None)
res2 = mstats.ttest_ind(outcome[:, 0], outcome[:, 1], axis=None)
assert_allclose(res1, res2)
res1 = stats.ttest_ind(outcome[:, :2], outcome[:, 2:], axis=0)
res2 = mstats.ttest_ind(outcome[:, :2], outcome[:, 2:], axis=0)
assert_allclose(res1, res2)
# Check default is axis=0
res3 = mstats.ttest_ind(outcome[:, :2], outcome[:, 2:])
assert_allclose(res2, res3)
def test_result_attributes(self):
np.random.seed(1234567)
outcome = np.random.randn(20, 4) + [0, 0, 1, 2]
res = mstats.ttest_ind(outcome[:, 0], outcome[:, 1])
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes, ma=True)
def test_empty(self):
res1 = mstats.ttest_ind([], [])
assert_(np.all(np.isnan(res1)))
class TestTtest_1samp():
def test_vs_nonmasked(self):
np.random.seed(1234567)
outcome = np.random.randn(20, 4) + [0, 0, 1, 2]
# 1-D inputs
res1 = stats.ttest_1samp(outcome[:, 0], 1)
res2 = mstats.ttest_1samp(outcome[:, 0], 1)
assert_allclose(res1, res2)
# 2-D inputs
res1 = stats.ttest_1samp(outcome[:, 0], outcome[:, 1], axis=None)
res2 = mstats.ttest_1samp(outcome[:, 0], outcome[:, 1], axis=None)
assert_allclose(res1, res2)
res1 = stats.ttest_1samp(outcome[:, :2], outcome[:, 2:], axis=0)
res2 = mstats.ttest_1samp(outcome[:, :2], outcome[:, 2:], axis=0)
assert_allclose(res1, res2)
# Check default is axis=0
res3 = mstats.ttest_1samp(outcome[:, :2], outcome[:, 2:])
assert_allclose(res2, res3)
def test_result_attributes(self):
np.random.seed(1234567)
outcome = np.random.randn(20, 4) + [0, 0, 1, 2]
res = mstats.ttest_1samp(outcome[:, 0], 1)
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes, ma=True)
def test_empty(self):
res1 = mstats.ttest_1samp([], 1)
assert_(np.all(np.isnan(res1)))
class TestCompareWithStats(TestCase):
"""
Class to compare mstats results with stats results.
It is in general assumed that scipy.stats is at a more mature stage than
stats.mstats. If a routine in mstats results in similar results like in
scipy.stats, this is considered also as a proper validation of scipy.mstats
routine.
Different sample sizes are used for testing, as some problems between stats
and mstats are dependent on sample size.
Author: Alexander Loew
NOTE that some tests fail. This might be caused by
a) actual differences or bugs between stats and mstats
b) numerical inaccuracies
c) different definitions of routine interfaces
These failures need to be checked. Current workaround is to have disabled these tests,
but issuing reports on scipy-dev
"""
def get_n(self):
""" Returns list of sample sizes to be used for comparison. """
return [1000, 100, 10, 5]
def generate_xy_sample(self, n):
# This routine generates numpy arrays and corresponding masked arrays
# with the same data, but additional masked values
np.random.seed(1234567)
x = np.random.randn(n)
y = x + np.random.randn(n)
xm = np.ones(len(x) + 5) * 1e16
ym = np.ones(len(y) + 5) * 1e16
xm[0:len(x)] = x
ym[0:len(y)] = y
mask = xm > 9e15
xm = np.ma.array(xm, mask=mask)
ym = np.ma.array(ym, mask=mask)
return x, y, xm, ym
def generate_xy_sample2D(self, n, nx):
x = np.ones((n, nx)) * np.nan
y = np.ones((n, nx)) * np.nan
xm = np.ones((n+5, nx)) * np.nan
ym = np.ones((n+5, nx)) * np.nan
for i in range(nx):
x[:,i], y[:,i], dx, dy = self.generate_xy_sample(n)
xm[0:n, :] = x[0:n]
ym[0:n, :] = y[0:n]
xm = np.ma.array(xm, mask=np.isnan(xm))
ym = np.ma.array(ym, mask=np.isnan(ym))
return x, y, xm, ym
def test_linregress(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
res1 = stats.linregress(x, y)
res2 = stats.mstats.linregress(xm, ym)
assert_allclose(np.asarray(res1), np.asarray(res2))
def test_pearsonr(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
r, p = stats.pearsonr(x, y)
rm, pm = stats.mstats.pearsonr(xm, ym)
assert_almost_equal(r, rm, decimal=14)
assert_almost_equal(p, pm, decimal=14)
def test_spearmanr(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
r, p = stats.spearmanr(x, y)
rm, pm = stats.mstats.spearmanr(xm, ym)
assert_almost_equal(r, rm, 14)
assert_almost_equal(p, pm, 14)
def test_gmean(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
r = stats.gmean(abs(x))
rm = stats.mstats.gmean(abs(xm))
assert_allclose(r, rm, rtol=1e-13)
r = stats.gmean(abs(y))
rm = stats.mstats.gmean(abs(ym))
assert_allclose(r, rm, rtol=1e-13)
def test_hmean(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
r = stats.hmean(abs(x))
rm = stats.mstats.hmean(abs(xm))
assert_almost_equal(r, rm, 10)
r = stats.hmean(abs(y))
rm = stats.mstats.hmean(abs(ym))
assert_almost_equal(r, rm, 10)
def test_skew(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
r = stats.skew(x)
rm = stats.mstats.skew(xm)
assert_almost_equal(r, rm, 10)
r = stats.skew(y)
rm = stats.mstats.skew(ym)
assert_almost_equal(r, rm, 10)
def test_moment(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
r = stats.moment(x)
rm = stats.mstats.moment(xm)
assert_almost_equal(r, rm, 10)
r = stats.moment(y)
rm = stats.mstats.moment(ym)
assert_almost_equal(r, rm, 10)
def test_signaltonoise(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
r = stats.signaltonoise(x)
rm = stats.mstats.signaltonoise(xm)
assert_almost_equal(r, rm, 10)
r = stats.signaltonoise(y)
rm = stats.mstats.signaltonoise(ym)
assert_almost_equal(r, rm, 10)
def test_betai(self):
np.random.seed(12345)
for i in range(10):
a = np.random.rand() * 5.
b = np.random.rand() * 200.
assert_equal(stats.betai(a, b, 0.), 0.)
assert_equal(stats.betai(a, b, 1.), 1.)
assert_equal(stats.mstats.betai(a, b, 0.), 0.)
assert_equal(stats.mstats.betai(a, b, 1.), 1.)
x = np.random.rand()
assert_almost_equal(stats.betai(a, b, x),
stats.mstats.betai(a, b, x), decimal=13)
def test_zscore(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
#reference solution
zx = (x - x.mean()) / x.std()
zy = (y - y.mean()) / y.std()
#validate stats
assert_allclose(stats.zscore(x), zx, rtol=1e-10)
assert_allclose(stats.zscore(y), zy, rtol=1e-10)
#compare stats and mstats
assert_allclose(stats.zscore(x), stats.mstats.zscore(xm[0:len(x)]),
rtol=1e-10)
assert_allclose(stats.zscore(y), stats.mstats.zscore(ym[0:len(y)]),
rtol=1e-10)
def test_kurtosis(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
r = stats.kurtosis(x)
rm = stats.mstats.kurtosis(xm)
assert_almost_equal(r, rm, 10)
r = stats.kurtosis(y)
rm = stats.mstats.kurtosis(ym)
assert_almost_equal(r, rm, 10)
def test_sem(self):
# example from stats.sem doc
a = np.arange(20).reshape(5,4)
am = np.ma.array(a)
r = stats.sem(a,ddof=1)
rm = stats.mstats.sem(am, ddof=1)
assert_allclose(r, 2.82842712, atol=1e-5)
assert_allclose(rm, 2.82842712, atol=1e-5)
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
assert_almost_equal(stats.mstats.sem(xm, axis=None, ddof=0),
stats.sem(x, axis=None, ddof=0), decimal=13)
assert_almost_equal(stats.mstats.sem(ym, axis=None, ddof=0),
stats.sem(y, axis=None, ddof=0), decimal=13)
assert_almost_equal(stats.mstats.sem(xm, axis=None, ddof=1),
stats.sem(x, axis=None, ddof=1), decimal=13)
assert_almost_equal(stats.mstats.sem(ym, axis=None, ddof=1),
stats.sem(y, axis=None, ddof=1), decimal=13)
def test_describe(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
r = stats.describe(x, ddof=1)
rm = stats.mstats.describe(xm, ddof=1)
for ii in range(6):
assert_almost_equal(np.asarray(r[ii]),
np.asarray(rm[ii]),
decimal=12)
def test_describe_result_attributes(self):
actual = mstats.describe(np.arange(5))
attributes = ('nobs', 'minmax', 'mean', 'variance', 'skewness',
'kurtosis')
check_named_results(actual, attributes, ma=True)
def test_rankdata(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
r = stats.rankdata(x)
rm = stats.mstats.rankdata(x)
assert_allclose(r, rm)
def test_tmean(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
assert_almost_equal(stats.tmean(x),stats.mstats.tmean(xm), 14)
assert_almost_equal(stats.tmean(y),stats.mstats.tmean(ym), 14)
def test_tmax(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
assert_almost_equal(stats.tmax(x,2.),
stats.mstats.tmax(xm,2.), 10)
assert_almost_equal(stats.tmax(y,2.),
stats.mstats.tmax(ym,2.), 10)
def test_tmin(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
assert_equal(stats.tmin(x),stats.mstats.tmin(xm))
assert_equal(stats.tmin(y),stats.mstats.tmin(ym))
assert_almost_equal(stats.tmin(x,lowerlimit=-1.),
stats.mstats.tmin(xm,lowerlimit=-1.), 10)
assert_almost_equal(stats.tmin(y,lowerlimit=-1.),
stats.mstats.tmin(ym,lowerlimit=-1.), 10)
def test_zmap(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
z = stats.zmap(x,y)
zm = stats.mstats.zmap(xm,ym)
assert_allclose(z, zm[0:len(z)], atol=1e-10)
def test_variation(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
assert_almost_equal(stats.variation(x), stats.mstats.variation(xm),
decimal=12)
assert_almost_equal(stats.variation(y), stats.mstats.variation(ym),
decimal=12)
def test_tvar(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
assert_almost_equal(stats.tvar(x), stats.mstats.tvar(xm),
decimal=12)
assert_almost_equal(stats.tvar(y), stats.mstats.tvar(ym),
decimal=12)
def test_trimboth(self):
a = np.arange(20)
b = stats.trimboth(a, 0.1)
bm = stats.mstats.trimboth(a, 0.1)
assert_allclose(b, bm.data[~bm.mask])
def test_tsem(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
assert_almost_equal(stats.tsem(x),stats.mstats.tsem(xm), decimal=14)
assert_almost_equal(stats.tsem(y),stats.mstats.tsem(ym), decimal=14)
assert_almost_equal(stats.tsem(x,limits=(-2.,2.)),
stats.mstats.tsem(xm,limits=(-2.,2.)),
decimal=14)
def test_skewtest(self):
# this test is for 1D data
for n in self.get_n():
if n > 8:
x, y, xm, ym = self.generate_xy_sample(n)
r = stats.skewtest(x)
rm = stats.mstats.skewtest(xm)
assert_allclose(r[0], rm[0], rtol=1e-15)
# TODO this test is not performed as it is a known issue that
# mstats returns a slightly different p-value what is a bit
# strange is that other tests like test_maskedarray_input don't
# fail!
#~ assert_almost_equal(r[1], rm[1])
def test_skewtest_result_attributes(self):
x = np.array((-2, -1, 0, 1, 2, 3)*4)**2
res = mstats.skewtest(x)
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes, ma=True)
def test_skewtest_2D_notmasked(self):
# a normal ndarray is passed to the masked function
x = np.random.random((20, 2)) * 20.
r = stats.skewtest(x)
rm = stats.mstats.skewtest(x)
assert_allclose(np.asarray(r), np.asarray(rm))
def test_skewtest_2D_WithMask(self):
nx = 2
for n in self.get_n():
if n > 8:
x, y, xm, ym = self.generate_xy_sample2D(n, nx)
r = stats.skewtest(x)
rm = stats.mstats.skewtest(xm)
assert_equal(r[0][0],rm[0][0])
assert_equal(r[0][1],rm[0][1])
def test_normaltest(self):
np.seterr(over='raise')
for n in self.get_n():
if n > 8:
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=UserWarning)
x, y, xm, ym = self.generate_xy_sample(n)
r = stats.normaltest(x)
rm = stats.mstats.normaltest(xm)
assert_allclose(np.asarray(r), np.asarray(rm))
def test_find_repeats(self):
x = np.asarray([1,1,2,2,3,3,3,4,4,4,4]).astype('float')
tmp = np.asarray([1,1,2,2,3,3,3,4,4,4,4,5,5,5,5]).astype('float')
mask = (tmp == 5.)
xm = np.ma.array(tmp, mask=mask)
r = stats.find_repeats(x)
rm = stats.mstats.find_repeats(xm)
assert_equal(r,rm)
def test_kendalltau(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
r = stats.kendalltau(x, y)
rm = stats.mstats.kendalltau(xm, ym)
assert_almost_equal(r[0], rm[0], decimal=10)
assert_almost_equal(r[1], rm[1], decimal=7)
def test_obrientransform(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
r = stats.obrientransform(x)
rm = stats.mstats.obrientransform(xm)
assert_almost_equal(r.T, rm[0:len(x)])
if __name__ == "__main__":
run_module_suite()
| [
"[email protected]"
] | |
d7a25d94dee5bb5c016aa6033dc187cfe73cf882 | 40f4908483b98fc4f370ff4f2d520e1284d045b3 | /phase02/immortals_repo/harness/pymmortals/generated/com/securboration/immortals/ontology/analysis/profiling/simpleresourcedependencyassertion.py | fd72bc793c259cc64cb2bc4289b0667e7140091c | [] | no_license | TF-185/bbn-immortals | 7f70610bdbbcbf649f3d9021f087baaa76f0d8ca | e298540f7b5f201779213850291337a8bded66c7 | refs/heads/master | 2023-05-31T00:16:42.522840 | 2019-10-24T21:45:07 | 2019-10-24T21:45:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 652 | py | from pymmortals.datatypes.serializable import Serializable
from pymmortals.generated.com.securboration.immortals.ontology.core.resource import Resource
from pymmortals.generated.com.securboration.immortals.ontology.measurement.codeunitpointer import CodeUnitPointer
from typing import Type
# noinspection PyPep8Naming
class SimpleResourceDependencyAssertion(Serializable):
_validator_values = dict()
_types = dict()
def __init__(self,
codeUnit: CodeUnitPointer = None,
dependency: Type[Resource] = None):
super().__init__()
self.codeUnit = codeUnit
self.dependency = dependency
| [
"[email protected]"
] | |
f57d10cb23fa6300616fe2080588f7d3c6404adb | 190072bc404751d83e5aceb99a34ccba1067caae | /photobot/examples/Layer_function_select.py | 6437d18ae66b047371f687e0ca0497d59b8a25ed | [
"MIT"
] | permissive | karstenw/Library | ab751bde79bb0bd2bd7f705901dab415ba154476 | 9c3f665be4988c14d939d28e7729c72819bba446 | refs/heads/master | 2023-08-14T04:53:15.559747 | 2023-07-16T12:27:19 | 2023-07-16T12:27:19 | 46,520,062 | 0 | 0 | null | 2023-05-18T14:06:29 | 2015-11-19T21:00:38 | Python | UTF-8 | Python | false | false | 1,223 | py | import sys, os
# need a different name
import random as rnd
import pprint
pp = pprint.pprint
import pdb
kwdbg = 0
W, H = 542, 1050
fullwidth = int(W-20)
tilewidth = int((fullwidth-10) / 2.0)
# check for Nodebox
NB = True
try:
_ctx
except(NameError):
NB = False
if NB:
size(W, H)
pb = ximport("photobot")
else:
WIDTH, HEIGHT = W, H
import photobot as pb
import imagewells
if kwdbg:
# make random choices repeatable for debugging
rnd.seed(8)
imagewell = imagewells.loadImageWell(resultfile="imagewell-files")
tiles = imagewell['landscape']
rnd.shuffle(tiles)
# pick 2 images
img1path = tiles.pop()
img2path = tiles.pop()
# create a gray canvas
c = pb.canvas( WIDTH, HEIGHT)
c.fill( (192, 192, 192) )
#
# Image 1
#
_, filename = os.path.split( img1path )
# create, scale and place the image
x, y = 10, 10
img1, w1, h1 = pb.placeImage(c, img1path, x, y, WIDTH-20, "Image 1 Base")
c.top.autocontrast(cutoff=0)
pb.label(c, filename, x, y)
#
# Image 2
#
c.layers[img1].duplicate()
path=( (w1/2,0), (w1,int(h1*0.667)), (w1/2.0, h1), (0,h1*0.75),(0,h1/2) )
c.top.select( path )
x, y = 10, h1+20+10
c.top.translate( x, y)
# draw the result
c.draw(name="Layer_function_select")
| [
"[email protected]"
] | |
19a267b88eeda5563af6a304dcbd755284124dfc | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_84/177.py | db6951d5ed962140a11a025f300265217eb10a9c | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 705 | py | # coding: shift-jis
import sys
f = file(sys.argv[1])
test_cnt = int(f.readline())
for case in range(1, test_cnt+1):
V, H = map(int, f.readline().split())
row = [list(f.readline()[:-1]) for _ in range(V) ]
ret = True
for v in range(V):
for h in range(H):
if row[v][h] == '#':
if v == V-1 or h == H-1:
ret = False
break
if row[v][h+1] != '#' or row[v+1][h] != '#' or row[v+1][h+1]!='#':
ret = False
break
row[v][h] = '/'
row[v][h+1] = '\\'
row[v+1][h] = '\\'
row[v+1][h+1] = '/'
print 'Case #%d:'%case
if ret:
for r in row:
print reduce(lambda a,b:a+b, r)
else:
print 'Impossible'
| [
"[email protected]"
] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.