blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
059e8d97f0b62ea4ab980bb45f12a01bacc68228 | 6dd08ec6b4f6351de8450a3d7e592fd6b4994119 | /cbase/server/cbase-1.8.1/testrunner/pytests/spatialcompaction.py | 190ff0ff033f2e8af6e3946146558e06a12e1206 | []
| no_license | zhgwenming/appstack | d015e96b911fe318f9fba1bdeeea9d888d57dfba | 8fe6c1dfc2f5ed4a36c335e86ae28c17b3769276 | refs/heads/master | 2021-01-23T13:30:19.507537 | 2015-11-09T06:48:35 | 2015-11-09T06:48:35 | 7,576,644 | 1 | 2 | null | 2016-01-05T09:16:22 | 2013-01-12T15:13:21 | C | UTF-8 | Python | false | false | 1,569 | py | import unittest
import uuid
import logger
from membase.helper.spatial_helper import SpatialHelper
class SpatialCompactionTests(unittest.TestCase):
def setUp(self):
self.log = logger.Logger.get_logger()
self.helper = SpatialHelper(self, "default")
self.helper.setup_cluster()
def tearDown(self):
self.helper.cleanup_cluster()
def test_spatial_compaction(self):
self.log.info(
"description : test manual compaction for spatial indexes")
prefix = str(uuid.uuid4())[:7]
design_name = "dev_test_spatial_compaction"
self.helper.create_index_fun(design_name, prefix)
# Insert (resp. update, as they have the same prefix) and query
# the spatial index several time so that the compaction makes sense
for i in range(0, 8):
self.helper.insert_docs(2000, prefix)
self.helper.get_results(design_name)
# Get the index size prior to compaction
status, info = self.helper.info(design_name)
disk_size = info["spatial_index"]["disk_size"]
# Do the compaction
self.helper.compact(design_name)
# Check if the index size got smaller
status, info = self.helper.info(design_name)
self.assertTrue(info["spatial_index"]["disk_size"] < disk_size,
"The file size ({0}) isn't smaller than the "
"pre compaction size ({1})."
.format(info["spatial_index"]["disk_size"],
disk_size))
| [
"[email protected]"
]
| |
dc9289d234825789dfd30143764b5bf441e87b50 | a7cca49626a3d7100e9ac5c2f343c351ecb76ac7 | /playbooks/tests/led_toggle.py | f8079be0655d96fcf02c841fe646899d740a03c0 | [
"MIT"
]
| permissive | Carglglz/upydev | 104455d77d64300074bda54d86bd791f19184975 | 529aa29f3e1acf8160383fe410b5659110dc96de | refs/heads/master | 2023-05-24T18:38:56.242500 | 2022-10-21T14:03:17 | 2022-10-21T14:03:17 | 199,335,165 | 49 | 9 | MIT | 2022-10-21T14:03:18 | 2019-07-28T20:42:00 | Python | UTF-8 | Python | false | false | 142 | py | import time
for i in range(5):
print(f"This is a loaded script: {i}")
led.on()
time.sleep(0.5)
led.off()
time.sleep(0.5)
| [
"[email protected]"
]
| |
dcff227305bc074d0d32949ae48b052c1608a805 | dd3bbd4e7aaee7a8a5f26b927ce28ac472c855a5 | /eggs/Products.CMFPlone-4.1-py2.7.egg/Products/CMFPlone/skins/plone_scripts/getNotAddableTypes.py | d9e66131d0c1fb846122cf94e88d8368a72a9d1e | []
| no_license | nacho22martin/tesis | ea0a822f8bdbdef6f13f41276ecd4d6e85427ca5 | e137eb6225cc5e724bee74a892567796166134ac | refs/heads/master | 2020-12-24T13:20:58.334839 | 2013-11-09T12:42:41 | 2013-11-09T12:42:41 | 14,261,570 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 307 | py | ## Script (Python) "getNotAddableTypes"
##bind container=container
##bind context=context
##bind namespace=
##bind script=script
##bind subpath=traverse_subpath
##parameters=
##title=
##
# customize this script to filter addable portal types based on
# context, the current user or other criteria
return ()
| [
"ignacio@plone.(none)"
]
| ignacio@plone.(none) |
859189dfd335cbf552d601b7f074a5040f3b71b9 | d1f8aef0e3da67555b6b7d57ac9bec0b94e12cc5 | /dragex/interfaces/__init__.py | d85a2f6ea8e655ceea1d1c1ab049f645c0717c72 | []
| no_license | victorhook/dragex | d3593f0c12fc2cbdbccc14a085f70e493f3b8f05 | 6c06740230f7513318abe79c78cb6d4369ba3e68 | refs/heads/master | 2023-06-02T03:58:54.061938 | 2021-06-17T19:06:24 | 2021-06-17T19:06:24 | 370,010,180 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 160 | py | from .drawable import Drawable # noqa
from .game_object import GameObject # noqa
from .screen import Screen # noqa
from .sprite_image import SpriteImage # noqa
| [
"[email protected]"
]
| |
7bffb66e5f552e2e744965e1073430a1c8eaf3b7 | 1b60858c303bd7d88dae82b8db56273c326ddb44 | /tests/swagger_client_tests/test_processor_status_snapshot_entity.py | 5f4fb8dda20bf1e9f698019dba23303937af0daf | [
"Apache-2.0"
]
| permissive | tspannhw/nipyapi | 1ba076ef669493bad20681579891eea1d43f4fc8 | 30cdd028cf68cc4316b54a23bfa1f0397de3ae23 | refs/heads/master | 2021-07-19T14:37:22.993682 | 2017-10-29T18:52:31 | 2017-10-29T18:52:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,475 | py | # coding: utf-8
"""
NiFi Rest Api
The Rest Api provides programmatic access to command and control a NiFi instance in real time. Start and stop processors, monitor queues, query provenance data, and more. Each endpoint below includes a description, definitions of the expected input and output, potential response codes, and the authorizations required to invoke each service.
OpenAPI spec version: 1.2.0
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import nipyapi
from nipyapi.swagger_client.rest import ApiException
from nipyapi.swagger_client.models.processor_status_snapshot_entity import ProcessorStatusSnapshotEntity
class TestProcessorStatusSnapshotEntity(unittest.TestCase):
""" ProcessorStatusSnapshotEntity unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testProcessorStatusSnapshotEntity(self):
"""
Test ProcessorStatusSnapshotEntity
"""
# FIXME: construct object with mandatory attributes with example values
#model =nipyapi.swagger_client.models.processor_status_snapshot_entity.ProcessorStatusSnapshotEntity()
pass
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
]
| |
da5f875d6ad92fb09e0281b4cd2eaf5ec54ecfc4 | 1974b3e9c5f2f677833e1608a41281f377fd331c | /dltesthttp_xuyalin2/www/testcase/webservice/ts_ws_orders/getOrderLog.py | f1dc05383ce69ec78eba00680612de440006ef31 | []
| no_license | xyl00755/pythonLearning | ed0f540b61247c3560f347853da5886b2e2ba25d | c6aecff86ff34dcd7358d98201627ff84e9bf2cf | refs/heads/master | 2021-01-13T08:19:25.171016 | 2016-12-16T05:43:10 | 2016-12-16T05:43:10 | 71,764,553 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,736 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
0255.获取订单跟踪信息
http://127.0.0.1:8280/mallws/orders/getOrderLog.json
{
"token": "57469529686440a88fedb0bed51ba5d0", // 必须 token
"orderNo":"123123123" // 必须 订单号
}
{
"code": 200,
"description": "执行成功!",
"model": {
"success": "0", // 成功 0-成功 1-失败
"orderLogList": [
{
"beforeStatus": "xx", // 订单之前的状态
"dealDescrip": "xx", // 订单操作说明
"nowStatus": "xx", // 订单当前状态
"dealDate": "xx" // 操作时间
}
]
},
"metadata": {
"type": 0,
"clazz": "cn.com.hd.mall.web.webservices.entity.response.order.OrderLogResponse"
}
}
参数校验:
只做必须验证
code说明:
100-token失效 200-成功 300-错误的角色(无权限) 400-非法的参数 500-服务器异常 600-重新登陆
"""
import unittest
from www.api.webservice import *
from www.common.excel import wsData
from www.operation.order import createOrder
class getOrderLog(unittest.TestCase):
UserShop = wsData('TmlShop')
UserShopMin = wsData('TmlShopMin')
DealMgr = wsData('DealMager')
DealMgr2 = wsData('DealMager2')
DealSaler = wsData('DealSaler')
DealBuyer = wsData('DealBuyer')
Merch1 = wsData('Merch1')
wsUserShop = webservice()
wsUserShop.login(UserShop.username, UserShop.password)
wsDealMgr = webservice()
wsDealMgr.login(DealMgr.username, DealMgr.password)
wsDealMgr2 = webservice()
wsDealMgr2.login(DealMgr2.username, DealMgr2.password)
wsDealSaler = webservice()
wsDealSaler.login(DealSaler.username, DealSaler.password)
wsDealBuyer = webservice()
wsDealBuyer.login(DealBuyer.username, DealBuyer.password)
# S1.货到付款提交订单获取订单跟踪消息
def test_getOrderLog_createOrder(self):
orderLog = self.wsUserShop.getOrderLog(self.UserShop.orderCodWaitDeliver.orderNo)
self.assertEqual(orderLog['model']['success'], '0')
self.assertEqual(orderLog['model']['orderLogList'][0]['beforeStatus'], '')
self.assertIsNotNone(orderLog['model']['orderLogList'][0]['dealDate'])
self.assertEqual(orderLog['model']['orderLogList'][0]['dealDescrip'], u'提交订单')
self.assertEqual(orderLog['model']['orderLogList'][0]['nowStatus'], 'C020')
# S2.货到付款取消订单获取订单跟踪消息
def test_getOrderLog_cancelOrder(self):
orderLog = self.wsUserShop.getOrderLog(self.UserShop.orderCodCancel.orderNo)
self.assertEqual(orderLog['model']['success'], '0')
flag = 0
for i in range(0,len(orderLog['model']['orderLogList'])):
if orderLog['model']['orderLogList'][i]['beforeStatus'] == 'C020':
self.assertIsNotNone(orderLog['model']['orderLogList'][i]['dealDate'])
self.assertEqual(orderLog['model']['orderLogList'][i]['dealDescrip'], u'交易已取消')
self.assertEqual(orderLog['model']['orderLogList'][i]['nowStatus'], 'C012')
flag += 1
self.assertEqual(flag, 1, 'cancel order log is not found or is found twice')
# S3.货到付款订单发货获取订单跟踪消息
def test_getOrderLog_deliverOrder(self):
orderLog = self.wsUserShop.getOrderLog(self.UserShop.orderCodWaitReceive.orderNo)
self.assertEqual(orderLog['model']['success'], '0')
flag = 0
for i in range(0,len(orderLog['model']['orderLogList'])):
if orderLog['model']['orderLogList'][i]['beforeStatus'] == 'C020':
self.assertIsNotNone(orderLog['model']['orderLogList'][i]['dealDate'])
self.assertEqual(orderLog['model']['orderLogList'][i]['dealDescrip'], u'卖家发货')
self.assertEqual(orderLog['model']['orderLogList'][i]['nowStatus'], 'C017')
flag += 1
self.assertEqual(flag, 1, 'cancel order log is not found or is found twice')
# S4.货到付款订单交易完成订单跟踪消息
def test_getOrderLog_codComplete(self):
orderLog = self.wsUserShop.getOrderLog(self.UserShop.orderCodComplete.orderNo)
self.assertEqual(orderLog['model']['success'], '0')
flag = 0
for i in range(0,len(orderLog['model']['orderLogList'])):
if orderLog['model']['orderLogList'][i]['beforeStatus'] == 'C017':
self.assertIsNotNone(orderLog['model']['orderLogList'][i]['dealDate'])
self.assertEqual(orderLog['model']['orderLogList'][i]['dealDescrip'], u'交易完成')
self.assertEqual(orderLog['model']['orderLogList'][i]['nowStatus'], 'C019')
flag += 1
self.assertEqual(flag, 1, 'cancel order log is not found or is found twice')
# S5.订单改价获取订单跟踪消息——暂时不会记录订单跟踪
def test_getOrderLog_changPrice(self):
order = createOrder(self.UserShop, self.Merch1)
ws = webservice()
ws.login(self.DealMgr.username, self.DealMgr.password)
ws.changeOrderPrice(orderNo=order.orderNo, orderDiscountAmount='100', orderChangeAmount='11900', orderStatus='C020')
ws.deliver(orderNo=order.orderNo)
orderLog = order.ws.getOrderLog(order.orderNo)
self.assertEqual(orderLog['model']['success'], '0')
flag = 0
for i in range(0,len(orderLog['model']['orderLogList'])):
if orderLog['model']['orderLogList'][i]['beforeStatus'] == 'C020':
self.assertIsNotNone(orderLog['model']['orderLogList'][i]['dealDate'])
self.assertEqual(orderLog['model']['orderLogList'][i]['dealDescrip'], u'卖家发货')
self.assertEqual(orderLog['model']['orderLogList'][i]['nowStatus'], 'C017')
flag += 1
self.assertEqual(flag, 1, 'cancel order log is not found or is found twice')
# S6.待收货订单取消后拒绝取消、同意取消订单跟踪
def test_getOrderLog_cancelAudit(self):
order = createOrder(self.UserShop, self.Merch1)
ws = webservice()
ws.login(self.DealMgr.username, self.DealMgr.password)
ws.deliver(orderNo=order.orderNo)
order.ws.cancel(paymentNo=order.paymentNo, cancelType='3')
ws.auditCancel(paymentNo=order.paymentNo, orderNo=order.orderNo, auditStatus='1')
order.ws.cancel(paymentNo=order.paymentNo, cancelType='3')
ws.auditCancel(paymentNo=order.paymentNo, orderNo=order.orderNo, auditStatus='0')
orderLog = order.ws.getOrderLog(order.orderNo)
self.assertEqual(orderLog['model']['success'], '0')
flagCancel = 0
flagReject = 0
flagAgree = 0
for i in range(0,len(orderLog['model']['orderLogList'])):
if orderLog['model']['orderLogList'][i]['dealDescrip'] == u'交易取消中':
self.assertEqual(orderLog['model']['orderLogList'][i]['beforeStatus'], 'C017')
self.assertIsNotNone(orderLog['model']['orderLogList'][i]['dealDate'])
self.assertEqual(orderLog['model']['orderLogList'][i]['nowStatus'], 'C017')
flagCancel += 1
continue
if orderLog['model']['orderLogList'][i]['dealDescrip'] == u'卖家拒绝取消':
self.assertEqual(orderLog['model']['orderLogList'][i]['beforeStatus'], 'C017')
self.assertIsNotNone(orderLog['model']['orderLogList'][i]['dealDate'])
self.assertEqual(orderLog['model']['orderLogList'][i]['nowStatus'], 'C017')
flagReject += 1
continue
if orderLog['model']['orderLogList'][i]['dealDescrip'] == u'交易已取消':
self.assertEqual(orderLog['model']['orderLogList'][i]['beforeStatus'], 'C017')
self.assertIsNotNone(orderLog['model']['orderLogList'][i]['dealDate'])
self.assertEqual(orderLog['model']['orderLogList'][i]['nowStatus'], 'C012')
flagAgree += 1
continue
self.assertEqual(flagCancel, 2, order.orderNo + 'cancel time is wrong!')
self.assertEqual(flagReject, 1, order.orderNo + 'cancel reject time is wrong!')
self.assertEqual(flagAgree, 1, order.orderNo + 'cancel agree time is wrong!')
# S7.在线支付提交订单获取订单跟踪
def test_getOrderLog_createOrderOnline(self):
orderLog = self.wsUserShop.getOrderLog(self.UserShop.orderOnlineWaitPay.orderNo)
self.assertEqual(orderLog['model']['success'], '0')
self.assertEqual(orderLog['model']['orderLogList'][0]['beforeStatus'], '')
self.assertIsNotNone(orderLog['model']['orderLogList'][0]['dealDate'])
self.assertEqual(orderLog['model']['orderLogList'][0]['dealDescrip'], u'提交订单')
self.assertEqual(orderLog['model']['orderLogList'][0]['nowStatus'], 'C011')
# S8.在线支付取消订单订单获取订单跟踪
def test_getOrderLog_cancelOrderOnline(self):
orderLog = self.wsUserShop.getOrderLog(self.UserShop.orderOnlienCancel.orderNo)
flag = 0
for i in range(0,len(orderLog['model']['orderLogList'])):
if orderLog['model']['orderLogList'][i]['beforeStatus'] == 'C011':
self.assertIsNotNone(orderLog['model']['orderLogList'][i]['dealDate'])
#self.assertLess(orderLog['model']['orderLogList'][i]['dealDate'], datetime.datetime.now().strftime('%Y/%m/%d %H:%M:%S'))
self.assertEqual(orderLog['model']['orderLogList'][i]['dealDescrip'], u'交易已取消')
self.assertEqual(orderLog['model']['orderLogList'][i]['nowStatus'], 'C012')
flag += 1
self.assertEqual(flag, 1, self.UserShop.orderOnlienCancel.orderNo + 'cancel order log is not found or is found twice')
# S9.在线支付付款获取订单跟踪
# S10.在线支付发货获取订单跟踪
# S11.在线支付确认收货获取订单跟踪
# S12.经销商管理员获取订单跟踪
def test_getOrderLog_dealMager(self):
orderLog = self.wsDealMgr.getOrderLog(self.UserShop.orderCodWaitReceive.orderNo)
self.assertEqual(orderLog['model']['success'], '0')
flag = 0
for i in range(0,len(orderLog['model']['orderLogList'])):
if orderLog['model']['orderLogList'][i]['beforeStatus'] == 'C020':
self.assertIsNotNone(orderLog['model']['orderLogList'][i]['dealDate'])
self.assertEqual(orderLog['model']['orderLogList'][i]['dealDescrip'], u'卖家发货')
self.assertEqual(orderLog['model']['orderLogList'][i]['nowStatus'], 'C017')
flag += 1
self.assertEqual(flag, 1, 'cancel order log is not found or is found twice')
# S13.经销商销售员获取订单跟踪
def test_getOrderLog_dealSaler(self):
orderLog = self.wsDealSaler.getOrderLog(self.UserShop.orderCodWaitReceive.orderNo)
self.assertEqual(orderLog['model']['success'], '0')
flag = 0
for i in range(0,len(orderLog['model']['orderLogList'])):
if orderLog['model']['orderLogList'][i]['beforeStatus'] == 'C020':
self.assertIsNotNone(orderLog['model']['orderLogList'][i]['dealDate'])
self.assertEqual(orderLog['model']['orderLogList'][i]['dealDescrip'], u'卖家发货')
self.assertEqual(orderLog['model']['orderLogList'][i]['nowStatus'], 'C017')
flag += 1
self.assertEqual(flag, 1, 'cancel order log is not found or is found twice')
# S14.经销商采购员员获取订单跟踪——未校验权限
def test_getOrderLog_dealBuyer(self):
orderLog = self.wsDealBuyer.getOrderLog(self.UserShop.orderCodWaitReceive.orderNo)
self.assertEqual(orderLog['model']['success'], '0')
flag = 0
for i in range(0,len(orderLog['model']['orderLogList'])):
if orderLog['model']['orderLogList'][i]['beforeStatus'] == 'C020':
self.assertIsNotNone(orderLog['model']['orderLogList'][i]['dealDate'])
self.assertEqual(orderLog['model']['orderLogList'][i]['dealDescrip'], u'卖家发货')
self.assertEqual(orderLog['model']['orderLogList'][i]['nowStatus'], 'C017')
flag += 1
self.assertEqual(flag, 1, 'cancel order log is not found or is found twice')
# S15.获取其他用户订单日志——未校验,当前暂不修改~
def test_getOrderLog_dealOther(self):
orderLog = self.wsDealMgr2.getOrderLog(self.UserShop.orderCodWaitReceive.orderNo)
self.assertEqual(orderLog['model']['success'], '0')
flag = 0
for i in range(0,len(orderLog['model']['orderLogList'])):
if orderLog['model']['orderLogList'][i]['beforeStatus'] == 'C020':
self.assertIsNotNone(orderLog['model']['orderLogList'][i]['dealDate'])
self.assertEqual(orderLog['model']['orderLogList'][i]['dealDescrip'], u'卖家发货')
self.assertEqual(orderLog['model']['orderLogList'][i]['nowStatus'], 'C017')
flag += 1
self.assertEqual(flag, 1, 'cancel order log is not found or is found twice')
# S16.订单号为空获取订单日志
def test_getOrderLog_orderNoNull(self):
orderLog = self.wsUserShop.getOrderLog('')
self.assertIsNone(orderLog['model']['success'])
self.assertIsNone(orderLog['model']['orderLogList'])
# S17.token为空获取订单日志
def test_getOrderLog_tokenNull(self):
ws = webservice()
orderLog = ws.getOrderLog(self.UserShop.orderCodWaitReceive.orderNo)
self.assertEqual(orderLog['code'], 600)
def suite():
suite = unittest.TestSuite()
suite.addTest(getOrderLog("test_getOrderLog_createOrder"))
suite.addTest(getOrderLog("test_getOrderLog_cancelOrder"))
suite.addTest(getOrderLog("test_getOrderLog_deliverOrder"))
suite.addTest(getOrderLog("test_getOrderLog_codComplete"))
#suite.addTest(getOrderLog("test_getOrderLog_changPrice"))
suite.addTest(getOrderLog("test_getOrderLog_cancelAudit"))
suite.addTest(getOrderLog("test_getOrderLog_createOrderOnline"))
suite.addTest(getOrderLog("test_getOrderLog_cancelOrderOnline"))
suite.addTest(getOrderLog("test_getOrderLog_dealMager"))
suite.addTest(getOrderLog("test_getOrderLog_dealSaler"))
suite.addTest(getOrderLog("test_getOrderLog_dealBuyer"))
#suite.addTest(getOrderLog("test_getOrderLog_dealOther"))
suite.addTest(getOrderLog("test_getOrderLog_orderNoNull"))
suite.addTest(getOrderLog("test_getOrderLog_tokenNull"))
return suite | [
"[email protected]"
]
| |
7ee7f2e7f0034ad78299103059e5d41c7e5251e8 | 47ff744da519c525cccfad1d8cead74f7e2cd209 | /uge4/.history/exercise_20200220124148.py | f126b64625bf836dfaac34c1d4c008fc555bbe88 | []
| no_license | Leafmight/Python | f6098395a7a13dd6afe6eb312a3eb1f3dbe78b84 | d987f22477c77f3f21305eb922ae6855be483255 | refs/heads/master | 2020-12-21T14:21:06.802341 | 2020-05-22T10:21:37 | 2020-05-22T10:21:37 | 236,457,255 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 588 | py | import numpy as np
filename = './befkbhalderstatkode.csv'
dd = np.genfromtxt(filename, delimiter=',', dtype=np.uint, skip_header=1)
neighb = {1: 'Indre By', 2: 'Østerbro', 3: 'Nørrebro', 4: 'Vesterbro/Kgs. Enghave',
5: 'Valby', 6: 'Vanløse', 7: 'Brønshøj-Husum', 8: 'Bispebjerg', 9: 'Amager Øst',
10: 'Amager Vest', 99: 'Udenfor'}
def pop(hood):
hood_mask = (dd[:,0] == 2015) & (dd[:,1] == hood)
return np.sum(dd[hood_mask][:4])
def getSumPerHood():
lst = {}
for key, value in neighb.items():
lst.update({value: pop(key)})
return lst
| [
"[email protected]"
]
| |
e8813cd668f7ed59984bd897bab0933c4ba2a92a | 8a36ddf6a9f2f6c00ff7d3db72fe7a6f88ead7a2 | /weather/weather.py | f53c89e3bc9040f4b89115a55e4788b9c56e3dde | []
| no_license | pccode21/PyQt5 | 5d5b79f55d6165d03d58768bf30f25382ac7812b | f0af930b1338d0472aacbd3cab65be009bddd96e | refs/heads/master | 2020-12-03T11:07:44.226390 | 2020-02-19T05:29:09 | 2020-02-19T05:29:09 | 231,293,179 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,490 | py | from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
from MainWindow import Ui_MainWindow
from datetime import datetime
import json
import os
import sys
import requests
from urllib.parse import urlencode
# OPENWEATHERMAP_API_KEY = os.environ.get('b020112734ca76c7df0ccad361a58fa3')
"""
从https://openweathermap.org/获取API密钥以与此结合使用
应用.
"""
def from_ts_to_time_of_day(ts):
dt = datetime.fromtimestamp(ts)
return dt.strftime("%I%p").lstrip("0")
class WorkerSignals(QObject):
'''
定义正在运行的工作线程可用的信号.
'''
finished = pyqtSignal()
error = pyqtSignal(str)
result = pyqtSignal(dict, dict)
class WeatherWorker(QRunnable):
'''
工作线程天气更新.
'''
signals = WorkerSignals()
is_interrupted = False
def __init__(self, location):
super(WeatherWorker, self).__init__()
self.location = location
@pyqtSlot()
def run(self):
try:
params = dict(
q=self.location,
appid='b020112734ca76c7df0ccad361a58fa3'
)
url = 'http://api.openweathermap.org/data/2.5/weather?%s&units=metric' % urlencode(params)
r = requests.get(url)
weather = json.loads(r.text)
# 检查我们是否失败(预测将以同样的方式失败).
if weather['cod'] != 200:
raise Exception(weather['message'])
url = 'http://api.openweathermap.org/data/2.5/forecast?%s&units=metric' % urlencode(params)
r = requests.get(url)
forecast = json.loads(r.text)
self.signals.result.emit(weather, forecast)
except Exception as e:
self.signals.error.emit(str(e))
self.signals.finished.emit()
class MainWindow(QMainWindow, Ui_MainWindow):
def __init__(self, *args, **kwargs):
super(MainWindow, self).__init__(*args, **kwargs)
self.setupUi(self)
self.pushButton.pressed.connect(self.update_weather)
self.threadpool = QThreadPool() # 创建线程池类,以处理运行工作程序
self.show()
def alert(self, message):
alert = QMessageBox.warning(self, "Warning", message)
def update_weather(self):
worker = WeatherWorker(self.lineEdit.text())
worker.signals.result.connect(self.weather_result)
worker.signals.error.connect(self.alert)
self.threadpool.start(worker)
def weather_result(self, weather, forecasts):
self.latitudeLabel.setText("%.2f °" % weather['coord']['lat'])
self.longitudeLabel.setText("%.2f °" % weather['coord']['lon'])
self.windLabel.setText("%.2f m/s" % weather['wind']['speed'])
self.temperatureLabel.setText("%.1f °C" % weather['main']['temp'])
self.pressureLabel.setText("%d" % weather['main']['pressure'])
self.humidityLabel.setText("%d" % weather['main']['humidity'])
self.sunriseLabel.setText(from_ts_to_time_of_day(weather['sys']['sunrise']))
# 使用自定义from_ts_to_time_of_day函数处理时间戳,以am / pm格式返回用户友好的一天中的时间,且不带前导零。
self.weatherLabel.setText("%s (%s)" % (
weather['weather'][0]['main'],
weather['weather'][0]['description']
)
)
self.set_weather_icon(self.weatherIcon, weather['weather'])
for n, forecast in enumerate(forecasts['list'][:5], 1):
getattr(self, 'forecastTime%d' % n).setText(from_ts_to_time_of_day(forecast['dt']))
self.set_weather_icon(getattr(self, 'forecastIcon%d' % n), forecast['weather'])
getattr(self, 'forecastTemp%d' % n).setText("%.1f °C" % forecast['main']['temp'])
# 从weatherdict 设置当前的天气图标,然后遍历所提供的前5个天气预报。预报图标,时间和温度标签在Qt Designer中使用forecastIcon<n>,forecastTime<n>和定义 forecastTemp<n>,可以轻松地依次迭代它们并使用getattr当前迭代索引检索它们。
def set_weather_icon(self, label, weather):
label.setPixmap(
QPixmap(os.path.join('./PyQt5/weather/images', "%s.png" %
weather[0]['icon']
)
)
)
if __name__ == '__main__':
app = QApplication([])
window = MainWindow()
app.exec_()
| [
"[email protected]"
]
| |
7c76835603d90ac7c8e51e9c8be02a23b28636b1 | a5dd6bcb59130979624c0274a91bb1566421dbc4 | /thor/config.py | f0faee12c5842bbacca47f5949d4fa2242d68ec3 | [
"BSD-3-Clause"
]
| permissive | mjuric/thor | 62563455526eaec09c96341ac239a5985824f24b | 4e2403bf9c08e998ccd7a277583b0e550b9d3a67 | refs/heads/main | 2023-04-21T02:22:17.359744 | 2021-05-19T20:12:56 | 2021-05-19T20:12:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,027 | py | import numpy as np
__all__ = ["Config"]
class Config:
"""
Config: Holds configuration settings.
Of interest to the user are two main attributes:
columnMapping : This dictionary should define the data
column names of the user's data relative to the
internally used names.
oorbDirectory : Oorb install location should be defined
here.
Parameters
----------
None
Returns
-------
None
"""
MIN_OBS = 5
MIN_ARC_LENGTH = 1.0
CONTAMINATION_PERCENTAGE = 20
BACKEND = "PYOORB"
BACKEND_KWARGS = {}
NUM_THREADS = 60
USE_RAY = False
USE_GPU = False
RANGE_SHIFT_CONFIG = {
"cell_area" : 1000,
"threads" : NUM_THREADS,
"backend" : BACKEND,
"backend_kwargs" : BACKEND_KWARGS,
}
CLUSTER_LINK_CONFIG = {
"vx_range" : [-0.1, 0.1],
"vy_range" : [-0.1, 0.1],
"vx_bins" : 300,
"vy_bins" : 300,
"vx_values" : None,
"vy_values" : None,
"eps" : 5/3600,
"min_samples" : MIN_OBS,
"min_arc_length" : MIN_ARC_LENGTH,
"threads" : NUM_THREADS,
}
IOD_CONFIG = {
"min_obs" : MIN_OBS,
"min_arc_length" : MIN_ARC_LENGTH,
"contamination_percentage" : CONTAMINATION_PERCENTAGE,
"rchi2_threshold" : 1000,
"observation_selection_method" : "combinations",
"iterate" : False,
"light_time" : True,
"linkage_id_col" : "cluster_id",
"identify_subsets" : True,
"threads" : NUM_THREADS,
"backend" : BACKEND,
"backend_kwargs" : BACKEND_KWARGS,
}
OD_CONFIG = {
"min_obs" : MIN_OBS,
"min_arc_length" : MIN_ARC_LENGTH,
"contamination_percentage" : CONTAMINATION_PERCENTAGE,
"rchi2_threshold" : 10,
"delta" : 1e-6,
"max_iter" : 5,
"method" : "central",
"fit_epoch" : False,
"test_orbit" : None,
"threads" : NUM_THREADS,
"backend" : BACKEND,
"backend_kwargs" : BACKEND_KWARGS,
}
ODP_CONFIG = {
"min_obs" : MIN_OBS,
"min_arc_length" : MIN_ARC_LENGTH,
"contamination_percentage" : 0.0,
"rchi2_threshold" : 5,
"eps" : 1/3600,
"delta" : 1e-8,
"max_iter" : 5,
"method" : "central",
"fit_epoch" : False,
"orbits_chunk_size" : 1,
"observations_chunk_size" : 100000,
"threads" : NUM_THREADS,
"backend" : BACKEND,
"backend_kwargs" : BACKEND_KWARGS,
}
ADES_METADATA = {
"observatory_code" : "I11",
"observatory_name" : "Vera C. Rubin Observatory",
"telescope_aperture" : "8.4",
"telescope_design" : "Reflector",
"telescope_detector" : "CCD",
"submitter" : "D. iRAC",
"observers" : ["D. iRAC"],
"measurers" : ["D. iRAC"],
}
COLUMN_MAPPING = {
### Observation Parameters
# Observation ID
"obs_id" : "obsId",
# Exposure time
"exp_mjd" : "exp_mjd",
# Visit ID
"visit_id" : "visitId",
# Field ID
"field_id" : "fieldId",
# Field RA in degrees
"field_RA_deg" : "fieldRA_deg",
# Field Dec in degrees
"field_Dec_deg" : "fieldDec_deg",
# Night number
"night": "night",
# RA in degrees
"RA_deg" : "RA_deg",
# Dec in degrees
"Dec_deg" : "Dec_deg",
# Observatory code
"observatory_code" : "code",
# Observer's x coordinate in AU
"obs_x_au" : "HEclObsy_X_au",
# Observer's y coordinate in AU
"obs_y_au" : "HEclObsy_Y_au",
# Observer's z coordinate in AU
"obs_z_au" : "HEclObsy_Z_au",
# Magnitude (UNUSED)
"mag" : "VMag",
### Truth Parameters
# Object name
"name" : "designation",
# Observer-object distance in AU
"Delta_au" : "Delta_au",
# Sun-object distance in AU (heliocentric distance)
"r_au" : "r_au",
# Object's x coordinate in AU
"obj_x_au" : "HEclObj_X_au",
# Object's y coordinate in AU
"obj_y_au" : "HEclObj_Y_au",
# Object's z coordinate in AU
"obj_z_au" : "HEclObj_Z_au",
# Object's x velocity in AU per day
"obj_dx/dt_au_p_day" : "HEclObj_dX/dt_au_p_day",
# Object's y velocity in AU per day
"obj_dy/dt_au_p_day" : "HEclObj_dY/dt_au_p_day",
# Object's z velocity in AU per day
"obj_dz/dt_au_p_day" : "HEclObj_dZ/dt_au_p_day",
# Semi-major axis
"a_au" : "a_au",
# Inclination
"i_deg" : "i_deg",
# Eccentricity
"e" : "e",
} | [
"[email protected]"
]
| |
348970a0f4e5c0d7929ac752e3078f95f5443c3a | 6e5ab77fee1fb4a0310213dd8c6dd8601828b1b9 | /Algorithm/Swea/D1_6230.py | 11bc95295c19530488c6fba37d18d628e6562027 | []
| no_license | hongyong3/TIL | 36d031c0da9e3e6db3eebb977bd3e12df00a849f | 7f1492128e957a78fc95b255f4f7f2978161e471 | refs/heads/master | 2023-08-19T09:16:03.231757 | 2023-08-18T09:38:47 | 2023-08-18T09:38:47 | 162,100,258 | 1 | 0 | null | 2023-02-11T00:52:32 | 2018-12-17T08:42:42 | Jupyter Notebook | UTF-8 | Python | false | false | 263 | py | data = [88, 30, 61, 55, 95]
for i in range(5):
if data[i] >= 60:
print("{}번 학생은 {}점으로 {}입니다.".format(i + 1, data[i], "합격"))
else:
print("{}번 학생은 {}점으로 {}입니다.".format(i + 1, data[i], "불합격")) | [
"[email protected]"
]
| |
11663c0f28cb942a4a9a90c69f77584703d14b96 | 5633afdce5fb2209f130bb0cd2c478a35bd75957 | /168-理解function.py | 62a304b055863e54e0b2122b6167d3374a9902b5 | []
| no_license | weiyinfu/learnKeras | 36a68e7f9966bf2ac53bb4767b3754864fe6087d | c011005bf760053e9085a0171702e54d19cafebc | refs/heads/master | 2023-03-06T18:06:32.811186 | 2021-02-22T06:05:57 | 2021-02-22T06:05:57 | 147,919,920 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 638 | py | import keras.backend as K
import keras
import tensorflow as tf
"""
keras的function可以方便的求某几个数字的值
"""
input = keras.layers.Input((None,))
output = tf.multiply(input, input)
output2 = keras.layers.multiply([input, input])
called_count = K.variable(0.0)
f = K.function([input], [output, output2, called_count], [K.update_add(called_count, 1)])
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
print(f([[3, 4, 5]]))
print(f([[3, 4, 5]]))
o, oo, c = sess.run([output, output2, called_count], feed_dict={
input: [[3, 4, 5]]
})
print(o, oo, c)
| [
"[email protected]"
]
| |
90f6b044e0738dd4144dea41df919f7fe76752a2 | 167c6226bc77c5daaedab007dfdad4377f588ef4 | /python/ql/test/2/library-tests/PointsTo/import_time/module.py | 0e14ce6e5d765b8d724c6890d6495ef311dde746 | [
"MIT",
"LicenseRef-scancode-python-cwi",
"LicenseRef-scancode-other-copyleft",
"GPL-1.0-or-later",
"LicenseRef-scancode-free-unknown",
"Python-2.0"
]
| permissive | github/codeql | 1eebb449a34f774db9e881b52cb8f7a1b1a53612 | d109637e2d7ab3b819812eb960c05cb31d9d2168 | refs/heads/main | 2023-08-20T11:32:39.162059 | 2023-08-18T14:33:32 | 2023-08-18T14:33:32 | 143,040,428 | 5,987 | 1,363 | MIT | 2023-09-14T19:36:50 | 2018-07-31T16:35:51 | CodeQL | UTF-8 | Python | false | false | 152 | py |
import sys
os_test = sys.platform == "linux2"
version_test = sys.version_info < (3,)
if version_test:
version_2 = True
else:
version_3 = False | [
"[email protected]"
]
| |
fc9a98ef9d50ff78217956a8266a6c2d94e05061 | f67aa51d4afcdb2f31b78032dc910094ec310295 | /2. Python Code/node_tag_audit.py | c7c180574df2c7cce567eab1f29a792c8d6595be | []
| no_license | johncgr/data-wrangle-OpenStreetMap-data | a7e7bc1b0979d897eda55db35678f56b545f8f64 | 90e3aaf0113c312d7caa4a5c0b5978c09a464340 | refs/heads/master | 2016-09-16T11:58:28.772831 | 2015-06-10T20:53:38 | 2015-06-10T20:53:38 | 36,739,086 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,943 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Feb 25 09:41:27 2015
@author: john
"""
import xml.etree.ElementTree as ET
import time
import re
#time of program start
start = time.time()
#error logging function
def add_error(log, key, error_msg):
if key in log:
log[key].append(error_msg)
else:
log[key] = [error_msg]
#tag audit
def tiger_audit(child, parent_element):
e_att = parent_element.attrib
counties = {'Tarrant, TX', 'Wise, TX', 'Denton, TX', 'Dallas, TX', 'Johnson, TX', 'Parker, TX'}
#produce list of name_type add as entry to summary log
if child.get('k') == "tiger:name_type":
add_error(tiger_name_type_log, e_att['id'], child.get('v'))
#could run into problems with this throwing errors when zips have the suffix
if ( child.get('k') == "tiger:zip_left"
or child.get('k') == "tiger:zip_right" ):
if len(child.get('v')) != 5:
add_error(error_log, e_att['id'], 'tiger:zip is not of correct length')
#if zip code not in list of possible zip codes
if child.get('k') not in zips:
add_error(error_log, e_att['id'], 'tiger:zip is not in list of possible zips')
#check tiger:county for possible county
#if you see errors may need to regex parse this out to get at counties
if child.get('k') == 'tiger:county':
if child.get('v') not in counties:
add_error(error_log, e_att['id'], 'tiger:county not one of possible counties')
#check that tiger:cfcc is in correct format
if child.get('k') == 'tiger:cfcc':
cfcc_pattern = re.compile(r'^[a-zA-Z]\d\d$')
if re.search(cfcc_pattern, child.get('v')) == None:
add_error(error_log, e_att['id'], 'cfcc not in correct format')
def tiger_name_crosscheck(child, tag_name):
#change this in second version to actually crosscheck the fields instead
#of creating a log
#tiger:name_base
if child.get('k') == 'tiger:name_base':
add_error(summary_log, 'tiger:name_base', child.get('v'))
#tiger name_type
if child.get('k') == 'tiger:name_type':
add_error(summary_log, 'tiger:name_type', child.get('v'))
#tiger name_direction_prefix
if child.get('k') == 'tiger:name_direction_prefix':
add_error(summary_log, 'tiger:name_direction_preix', child.get('v'))
#tiger name_direction_suffix
if child.get('k') == 'tiger:name_direction_suffix':
add_error(summary_log, 'tiger:name_direction_suffix', child.get('v'))
def tag_audit(child, parent_element):
e_att = parent_element.attrib
#scan for extraneous or missing attributes
if child.attrib.keys() != ['k', 'v']:
#show missing tags
c_set = set(child.attrib.keys())
t_set = set(['k', 'v'])
missing = t_set - c_set
if len(missing) != 0:
missing_msg = 'child <tag> is missing attribute ' + str(missing)
add_error(error_log, e_att['id'], missing_msg)
#show extraneous tags
extraneous = c_set - t_set
if len(extraneous) != 0:
extraneous_msg = 'child <tag> has extra attribute(s) ' + str(extraneous)
add_error(error_log, e_att['id'], extraneous_msg)
#addr:postcode audit
if child.get('k') == 'addr:postcode':
if child.get('v') not in zips:
add_error(error_log, e_att['id'], str(child.get('v')))
#tiger audit
if child.get('k'):
if child.get('k').startswith('tiger') == True:
tiger_audit(child, parent_element)
#extract tag k:name value, if present
if child.get('k') == 'name':
tag_name = child.get('v')
tiger_name_crosscheck(child, tag_name)
#bounds check maxspeed (should only be in <ways>)
#also check for unit of mph
try:
if child.get('k') == 'maxspeed':
speed_pattern = re.compile(r'(\A\d\d)')
mph_pattern = re.compile(r'mph')
speed = re.match(speed_pattern, child.get('v'))
if speed:
speed = float(speed.group())
if speed > 85:
add_error(error_log, e_att['id'], 'listed maxspeed is greater than 85 m.p.h')
if re.search(mph_pattern, child.get('v')) == None:
print(child.get('v'))
add_error(error_log, e_att['id'],
'maxspeed not in mph or is missing unit designation ')
except KeyError:
pass
return None
############Main Program###########
error_log = {}
node_ids = []
summary_log = {}
tiger_name_type_log = {}
minlat = 32.548
maxlat = 32.996
minlon = -97.5497
maxlon = -97.0319
zips = ['75052','75051', '76034', '76103','76248', '76262', '76001', '76002', '76003', '76004', '76005', '76006', '76007', '76010', '76011', '76012', '76013', '76014', '76015', '76016', '76017', '76018', '76019', '76094', '76096', '76020', '76197', '76198', '76021', '76022', '76095', '76109', '76116', '76126', '76132', '76131', '76191', '76166', '76177', '76034', '76195', '76036', '76016', '76039', '76040', '76140', '76193', '76119', '76140', '76101', '76102', '76103', '76104', '76105', '76106', '76107', '76108', '76109', '76110', '76111', '76112', '76113', '76114', '76115', '76116', '76117', '76118', '76119', '76120', '76121', '76122', '76123', '76124', '76126', '76127', '76129', '76130', '76131', '76132', '76133', '76134', '76135', '76136', '76137', '76140', '76147', '76148', '76150', '76155', '76161', '76162', '76163', '76164', '76166', '76177', '76179', '76180', '76181', '76182', '76185', '76191', '76192', '76193', '76195', '76196', '76197', '76198', '76199', '76244', '76051', '76092', '76099', '76111', '76117', '76137', '76148', '76180', '76052', '76053', '76054', '76244', '76248', '76060', '76192', '76135', '76136', '76108', '76135', '76063', '76127', '76127', '76118', '76180', '76182', '76118', '76180', '76182', '76180', '76114', '76013', '76015', '76020', '76118', '76180', '76118', '76180', '76114', '76131', '76179', '76114', '76092', '76115', '76122', '76196', '76129', '76130', '76019', '76019', '76137', '76148', '76107', '76114', '76108']
#path of file to be parsed
filein = r'/home/john/project/tarrant_county.osm'
for event, el in ET.iterparse(filein):
if el.tag == 'node':
for child in el.findall('./*'):
tag_audit(child, el)
print(time.time() - start)
print(error_log)
#print(error_log)
with open(r'/home/john/project/logs/node_tag_audit_error_log.txt', 'w') as fileout:
fileout.write(str(error_log))
with open(r'/home/john/project/logs/node_tag_audit_tiger_name_type_log.txt', 'w') as fileout:
fileout.write(str(tiger_name_type_log))
with open(r'/home/john/project/logs/node_tag_audit_summary_log.txt', 'w') as fileout:
fileout.write(str(error_log)) | [
"[email protected]"
]
| |
132233e2f673ca46ed09870bc39f3069ada4e184 | d79c4fa73bd26550cfaa5d1a3259b20bda1fba46 | /Tests/Services/test_distance_service.py | 79975e946cb9b7d65f9ff492746e0f981a60d6c6 | []
| no_license | dev-11/coding-test | 37e8372b4eff1b6d5c9b0bd2c0c13f88d0940736 | 7bd56b00d48a0419206b99170075fe34183830ee | refs/heads/master | 2021-07-11T02:49:44.832998 | 2021-03-28T12:08:47 | 2021-03-28T12:08:47 | 233,877,609 | 0 | 0 | null | 2020-01-14T15:52:20 | 2020-01-14T15:52:19 | null | UTF-8 | Python | false | false | 2,074 | py | import unittest
from Services import DistanceService
from Tests.TestEnvironment import get_test_stores
class DistanceServiceTests(unittest.TestCase):
def test_get_stores_within_range_returns_every_store_in_one_mile_range(self):
a = [51.460903, -0.301702]
stores = get_test_stores()
service = DistanceService()
result = service.get_stores_within_range(a, stores, 1)
self.assertEqual(len(result), 1)
self.assertEqual(result[0]['geolocation']['latitude'], 51.463437)
self.assertEqual(result[0]['geolocation']['longitude'], -0.288602)
self.assertEqual(result[0]['name'], 'Richmond')
self.assertEqual(result[0]['postcode'], 'TW9 1YB')
def test_get_stores_within_range_returns_every_store_in_five_miles_range(self):
a = [51.460903, -0.301702]
stores = get_test_stores()
service = DistanceService()
result = service.get_stores_within_range(a, stores, 5)
self.assertEqual(len(result), 4)
self.assertEqual(result[0]['geolocation']['latitude'], 51.405065)
self.assertEqual(result[0]['geolocation']['longitude'], -0.238117)
self.assertEqual(result[0]['name'], 'New_Malden')
self.assertEqual(result[0]['postcode'], 'SW20 0JQ')
self.assertEqual(result[1]['geolocation']['latitude'], 51.442892)
self.assertEqual(result[1]['geolocation']['longitude'], -0.412804)
self.assertEqual(result[1]['name'], 'Feltham')
self.assertEqual(result[1]['postcode'], 'TW13 4EX')
self.assertEqual(result[2]['geolocation']['latitude'], 51.482172)
self.assertEqual(result[2]['geolocation']['longitude'], -0.314343)
self.assertEqual(result[2]['name'], 'Brentford')
self.assertEqual(result[2]['postcode'], 'TW8 8JW')
self.assertEqual(result[3]['geolocation']['latitude'], 51.463437)
self.assertEqual(result[3]['geolocation']['longitude'], -0.288602)
self.assertEqual(result[3]['name'], 'Richmond')
self.assertEqual(result[3]['postcode'], 'TW9 1YB')
| [
"[email protected]"
]
| |
9e20c44700047479c01f6cdeb7fbfcafb618f3b9 | a2d36e471988e0fae32e9a9d559204ebb065ab7f | /huaweicloud-sdk-vod/huaweicloudsdkvod/v1/model/show_asset_meta_response.py | 17beda5d5d2fc1bd79e8b76d4ed6bfa0f640b853 | [
"Apache-2.0"
]
| permissive | zhouxy666/huaweicloud-sdk-python-v3 | 4d878a90b8e003875fc803a61414788e5e4c2c34 | cc6f10a53205be4cb111d3ecfef8135ea804fa15 | refs/heads/master | 2023-09-02T07:41:12.605394 | 2021-11-12T03:20:11 | 2021-11-12T03:20:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,451 | py | # coding: utf-8
import re
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ShowAssetMetaResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'asset_info_array': 'list[AssetInfo]',
'is_truncated': 'int',
'total': 'int'
}
attribute_map = {
'asset_info_array': 'asset_info_array',
'is_truncated': 'is_truncated',
'total': 'total'
}
def __init__(self, asset_info_array=None, is_truncated=None, total=None):
"""ShowAssetMetaResponse - a model defined in huaweicloud sdk"""
super(ShowAssetMetaResponse, self).__init__()
self._asset_info_array = None
self._is_truncated = None
self._total = None
self.discriminator = None
if asset_info_array is not None:
self.asset_info_array = asset_info_array
if is_truncated is not None:
self.is_truncated = is_truncated
if total is not None:
self.total = total
@property
def asset_info_array(self):
"""Gets the asset_info_array of this ShowAssetMetaResponse.
媒资信息列表。
:return: The asset_info_array of this ShowAssetMetaResponse.
:rtype: list[AssetInfo]
"""
return self._asset_info_array
@asset_info_array.setter
def asset_info_array(self, asset_info_array):
"""Sets the asset_info_array of this ShowAssetMetaResponse.
媒资信息列表。
:param asset_info_array: The asset_info_array of this ShowAssetMetaResponse.
:type: list[AssetInfo]
"""
self._asset_info_array = asset_info_array
@property
def is_truncated(self):
"""Gets the is_truncated of this ShowAssetMetaResponse.
列表是否被截断。 取值如下: - 1:表示本次查询未返回全部结果。 - 0:表示本次查询已经返回了全部结果。
:return: The is_truncated of this ShowAssetMetaResponse.
:rtype: int
"""
return self._is_truncated
@is_truncated.setter
def is_truncated(self, is_truncated):
"""Sets the is_truncated of this ShowAssetMetaResponse.
列表是否被截断。 取值如下: - 1:表示本次查询未返回全部结果。 - 0:表示本次查询已经返回了全部结果。
:param is_truncated: The is_truncated of this ShowAssetMetaResponse.
:type: int
"""
self._is_truncated = is_truncated
@property
def total(self):
"""Gets the total of this ShowAssetMetaResponse.
查询媒资总数。 > 暂只能统计2万个媒资,若您需要查询具体的媒资总数,请[提交工单](https://console.huaweicloud.com/ticket/?#/ticketindex/business?productTypeId=462902cc39a04ab3a429df872021f970)申请。
:return: The total of this ShowAssetMetaResponse.
:rtype: int
"""
return self._total
@total.setter
def total(self, total):
"""Sets the total of this ShowAssetMetaResponse.
查询媒资总数。 > 暂只能统计2万个媒资,若您需要查询具体的媒资总数,请[提交工单](https://console.huaweicloud.com/ticket/?#/ticketindex/business?productTypeId=462902cc39a04ab3a429df872021f970)申请。
:param total: The total of this ShowAssetMetaResponse.
:type: int
"""
self._total = total
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ShowAssetMetaResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
]
| |
af062882db668d2127cd9f91c3691c449ef42328 | 12c41119156dd3783c3801e07f5f973289f26bb0 | /aliyun-python-sdk-green/aliyunsdkgreen/request/v20170823/DescribeWebsiteScanResultRequest.py | f09d346c2c80b7eb9219b58dbf61434df7b191ec | [
"Apache-2.0"
]
| permissive | toywei/aliyun-openapi-python-sdk | bfe0893da38af9b222ce072fd7587d5b6cdce204 | ce8f683e3201fca8c473512267f50a34f71e31d3 | refs/heads/master | 2020-08-07T23:42:00.053692 | 2019-10-08T08:50:21 | 2019-10-08T08:50:21 | 213,626,962 | 1 | 0 | NOASSERTION | 2019-10-08T11:43:15 | 2019-10-08T11:43:15 | null | UTF-8 | Python | false | false | 2,640 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class DescribeWebsiteScanResultRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Green', '2017-08-23', 'DescribeWebsiteScanResult','green')
def get_TotalCount(self):
return self.get_query_params().get('TotalCount')
def set_TotalCount(self,TotalCount):
self.add_query_param('TotalCount',TotalCount)
def get_SubServiceModule(self):
return self.get_query_params().get('SubServiceModule')
def set_SubServiceModule(self,SubServiceModule):
self.add_query_param('SubServiceModule',SubServiceModule)
def get_SiteUrl(self):
return self.get_query_params().get('SiteUrl')
def set_SiteUrl(self,SiteUrl):
self.add_query_param('SiteUrl',SiteUrl)
def get_SourceIp(self):
return self.get_query_params().get('SourceIp')
def set_SourceIp(self,SourceIp):
self.add_query_param('SourceIp',SourceIp)
def get_HandleStatus(self):
return self.get_query_params().get('HandleStatus')
def set_HandleStatus(self,HandleStatus):
self.add_query_param('HandleStatus',HandleStatus)
def get_Domain(self):
return self.get_query_params().get('Domain')
def set_Domain(self,Domain):
self.add_query_param('Domain',Domain)
def get_PageSize(self):
return self.get_query_params().get('PageSize')
def set_PageSize(self,PageSize):
self.add_query_param('PageSize',PageSize)
def get_CurrentPage(self):
return self.get_query_params().get('CurrentPage')
def set_CurrentPage(self,CurrentPage):
self.add_query_param('CurrentPage',CurrentPage)
def get_Label(self):
return self.get_query_params().get('Label')
def set_Label(self,Label):
self.add_query_param('Label',Label)
def get_Lang(self):
return self.get_query_params().get('Lang')
def set_Lang(self,Lang):
self.add_query_param('Lang',Lang) | [
"[email protected]"
]
| |
4aa2a44af09dce4919240097d2cf50df5c2286cc | 56f155db28b5703786a08fef0ecf821aefb6ffe5 | /lib/testmill/test/test_images.py | f43dc574d9685d3d89f1196cbad690c754365c2e | [
"Apache-2.0"
]
| permissive | h4ckl4bm3/testmill | 595c30facec943b3593febe080b1e6602e82dee2 | 607d5622f14785e1b2f785e162ae862c5e638c5f | refs/heads/master | 2021-05-27T08:58:17.899271 | 2013-04-10T15:40:12 | 2013-04-10T15:41:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,035 | py | # Copyright 2012-2013 Ravello Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, print_function
import os
from testmill.main import main
from testmill.test import *
@systemtest
class TestImages(TestSuite):
"""Run some basic test on the standard images."""
def test_images(self):
args = get_common_args()
args += ['run', '-m', 'platformtest.yml',
'platformtest', 'sh check_image.sh']
retval = main(args)
assert retval == 0
| [
"[email protected]"
]
| |
94a57d37ee01ad48525f12206f52a6d3317127e3 | 04164e028417ff8472b9f2bfec0ec45b0888f743 | /development/pysrc/extract.py | 1b6bc09351d99ac31b3285f0ed8f27a28be337e3 | []
| no_license | Huaguiyuan/quantum-honeycomp | c2b810ff5f5e25d41b1f0c1c1ff7ae500b04dc31 | 50deb0e59fffe4031f05094572552ca5be59e741 | refs/heads/master | 2020-03-22T19:09:58.148862 | 2018-07-08T19:51:58 | 2018-07-08T19:51:58 | 140,510,217 | 1 | 2 | null | 2018-07-11T02:20:32 | 2018-07-11T02:20:32 | null | UTF-8 | Python | false | false | 2,779 | py | # routines to extract channels from a matrix
from __future__ import division
import numpy as np
def spin_channel(m,spin_column=None,spin_row=None,has_spin=True):
"""Extract a channel from a matrix"""
if not has_spin: return m # return initial
if (spin_row is None) or (spin_column is None): return m # return initial
n = m.shape[0] # shape of the matrix
n2 = n//2 # number of orbitals
out = np.zeros((n,n),dtype=np.complex)
if spin_column=="up": ii = 0
else: ii = 1
if spin_row=="up": jj = 0
else: jj = 1
for i in range(n2):
for j in range(n2): out[i,j] = m[2*i+ii,2*j+jj]
return np.matrix(out)
def swave(m):
"""Extract the swave pairing from a matrix, assuming
the Nambu spinor basis"""
n = m.shape[0]//4 # number of sites
ds = np.zeros(n,dtype=np.complex) # pairing
for i in range(n):
ds[i] = m[4*i,4*i+2] # get the pairing
return ds
def mz(m):
"""Extract the z component of the magnetism, assume spin degree of freedom"""
n = m.shape[0]//2 # number of sites
ds = np.zeros(n).real # pairing
for i in range(n):
ds[i] = (m[2*i+1,2*i+1] - m[2*i,2*i]).real/2. # get the pairing
return ds
def mx(m):
"""Extract the z component of the magnetism, assume spin degree of freedom"""
n = m.shape[0]//2 # number of sites
ds = np.zeros(n).real # pairing
for i in range(n):
ds[i] = m[2*i,2*i+1].real
return ds
def my(m):
"""Extract the z component of the magnetism, assume spin degree of freedom"""
n = m.shape[0]//2 # number of sites
ds = np.zeros(n).real # pairing
for i in range(n):
ds[i] = -m[2*i,2*i+1].imag
return ds
def onsite(m,has_spin=True):
"""Extract the z component of the magnetism, assume spin degree of freedom"""
if has_spin: # has spin degree of freedom
n = m.shape[0]//2 # number of sites
ds = np.zeros(n).real # pairing
for i in range(n):
ds[i] = (m[2*i,2*i].real + m[2*i+1,2*i+1].real)/2.
return ds
else:
n = m.shape[0] # number of sites
ds = np.zeros(n).real # pairing
for i in range(n):
ds[i] = m[i,i].real
return ds
def hopping_spinful(m,cutoff=0.001):
"""Extract hopping"""
n = m.shape[0]//2 # number sites
ii = []
jj = []
ts = []
for i in range(n):
for j in range(i,n):
t = np.abs(m[2*i,2*j]) + np.abs(m[2*i+1,2*j+1])
if t>cutoff:
ii.append(i)
jj.append(j)
ts.append(t)
return ii,jj,np.array(ts) # return pairs
def hopping_spinless(m,cutoff=0.001):
"""Extract hopping"""
n = m.shape[0] # number of sites
ii = []
jj = []
ts = []
for i in range(n):
for j in range(i,n):
t = np.abs(m[i,j])
if t>cutoff:
ii.append(i)
jj.append(j)
ts.append(t)
return ii,jj,np.array(ts) # return pairs
| [
"[email protected]"
]
| |
72b7bb7acba687c0f6f14413cd6d43962e8a3351 | bb33e6be8316f35decbb2b81badf2b6dcf7df515 | /source/res/scripts/common/Lib/encodings/iso2022_jp_ext.py | 79e0c5be45183dd71284af4365cf20ec67ea90b1 | []
| no_license | StranikS-Scan/WorldOfTanks-Decompiled | 999c9567de38c32c760ab72c21c00ea7bc20990c | d2fe9c195825ececc728e87a02983908b7ea9199 | refs/heads/1.18 | 2023-08-25T17:39:27.718097 | 2022-09-22T06:49:44 | 2022-09-22T06:49:44 | 148,696,315 | 103 | 39 | null | 2022-09-14T17:50:03 | 2018-09-13T20:49:11 | Python | UTF-8 | Python | false | false | 964 | py | # Python bytecode 2.7 (decompiled from Python 2.7)
# Embedded file name: scripts/common/Lib/encodings/iso2022_jp_ext.py
import _codecs_iso2022, codecs
import _multibytecodec as mbc
codec = _codecs_iso2022.getcodec('iso2022_jp_ext')
class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
class IncrementalEncoder(mbc.MultibyteIncrementalEncoder, codecs.IncrementalEncoder):
codec = codec
class IncrementalDecoder(mbc.MultibyteIncrementalDecoder, codecs.IncrementalDecoder):
codec = codec
class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
codec = codec
class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
codec = codec
def getregentry():
return codecs.CodecInfo(name='iso2022_jp_ext', encode=Codec().encode, decode=Codec().decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamreader=StreamReader, streamwriter=StreamWriter)
| [
"[email protected]"
]
| |
01f03677c1c199afc3763b2adf640bf22524e264 | 8e69eee9b474587925e22413717eb82e4b024360 | /v2.5.7/toontown/coghq/CashbotMintLavaRoomFoyer_Action01.py | b7cfc755c82949fc1587ac270f73a11b84148d12 | [
"MIT"
]
| permissive | TTOFFLINE-LEAK/ttoffline | afaef613c36dc3b70514ccee7030ba73c3b5045b | bb0e91704a755d34983e94288d50288e46b68380 | refs/heads/master | 2020-06-12T15:41:59.411795 | 2020-04-17T08:22:55 | 2020-04-17T08:22:55 | 194,348,185 | 5 | 4 | null | null | null | null | UTF-8 | Python | false | false | 9,037 | py | from toontown.coghq.SpecImports import *
GlobalEntities = {1000: {'type': 'levelMgr', 'name': 'LevelMgr', 'comment': '',
'parentEntId': 0,
'cogLevel': 0,
'farPlaneDistance': 1500,
'modelFilename': 'phase_10/models/cashbotHQ/ZONE18a',
'wantDoors': 1},
1001: {'type': 'editMgr', 'name': 'EditMgr',
'parentEntId': 0,
'insertEntity': None,
'removeEntity': None,
'requestNewEntity': None,
'requestSave': None},
0: {'type': 'zone', 'name': 'UberZone',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
10000: {'type': 'attribModifier', 'name': '<unnamed>',
'comment': '',
'parentEntId': 10004,
'attribName': 'modelPath',
'recursive': 1,
'typeName': 'model',
'value': ''},
10001: {'type': 'attribModifier', 'name': '<unnamed>',
'comment': '',
'parentEntId': 10004,
'attribName': 'scale',
'recursive': 1,
'typeName': 'model',
'value': 'Vec3(.955,1,1)'},
10019: {'type': 'attribModifier', 'name': '<unnamed>',
'comment': '',
'parentEntId': 10015,
'attribName': 'modelPath',
'recursive': 1,
'typeName': 'model',
'value': ''},
10006: {'type': 'gear', 'name': '<unnamed>',
'comment': '',
'parentEntId': 10003,
'pos': Point3(0.0, 0.0, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': Vec3(1.0, 1.0, 1.0),
'degreesPerSec': -4.0,
'gearScale': 14.193780914463838,
'modelType': 'mint',
'orientation': 'horizontal',
'phaseShift': 0},
10007: {'type': 'gear', 'name': 'copy of <unnamed>',
'comment': '',
'parentEntId': 10003,
'pos': Point3(0.0, 0.0, 4.28999996185),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': Vec3(1.0, 1.0, 1.0),
'degreesPerSec': 4.0,
'gearScale': 14.193780914463838,
'modelType': 'mint',
'orientation': 'horizontal',
'phaseShift': 0},
10009: {'type': 'gear', 'name': 'copy of <unnamed> (2)',
'comment': '',
'parentEntId': 10003,
'pos': Point3(0.0, 0.0, 8.57999992371),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': Vec3(1.0, 1.0, 1.0),
'degreesPerSec': -4.0,
'gearScale': 14.193780914463838,
'modelType': 'mint',
'orientation': 'horizontal',
'phaseShift': 0.055},
10014: {'type': 'gear', 'name': 'copy of <unnamed> (3)',
'comment': '',
'parentEntId': 10003,
'pos': Point3(0.0, 0.0, 12.8699998856),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': Vec3(1.0, 1.0, 1.0),
'degreesPerSec': 4.0,
'gearScale': 14.193780914463838,
'modelType': 'mint',
'orientation': 'horizontal',
'phaseShift': 0.06},
10018: {'type': 'healBarrel', 'name': '<unnamed>',
'comment': '',
'parentEntId': 10017,
'pos': Point3(-2.03643107414, 2.34967470169, 5.46433734894),
'hpr': Vec3(34.1522636414, 0.0, 0.0),
'scale': Vec3(1.0, 1.0, 1.0),
'rewardPerGrab': 5,
'rewardPerGrabMax': 0},
10002: {'type': 'model', 'name': '<unnamed>',
'comment': '',
'parentEntId': 10003,
'pos': Point3(0.0, 0.0, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': Vec3(6.5, 6.5, 6.5),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModel',
'modelPath': 'phase_10/models/cogHQ/RoundShadow'},
10005: {'type': 'model', 'name': 'doorwayCrate',
'comment': '',
'parentEntId': 0,
'pos': Point3(27.0090961456, 0.850000023842, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': Vec3(1.0, 1.0, 1.0),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModel',
'modelPath': 'phase_10/models/cogHQ/CBMetalCrate2'},
10008: {'type': 'model', 'name': 'shaft',
'comment': '',
'parentEntId': 10003,
'pos': Point3(0.0, 0.0, 7.25891637802),
'hpr': Vec3(0.0, 0.0, 180.0),
'scale': Vec3(5.35842609406, 5.35842609406, 5.35842609406),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModel',
'modelPath': 'phase_10/models/cashbotHQ/MintGearPost'},
10010: {'type': 'model', 'name': 'middle',
'comment': '',
'parentEntId': 10004,
'pos': Point3(0.0, 0.0, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': Vec3(0.954999983311, 1.0, 1.0),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModel',
'modelPath': 'phase_10/models/cogHQ/CBMetalCrate2'},
10011: {'type': 'model', 'name': 'copy of middle',
'comment': '',
'parentEntId': 10004,
'pos': Point3(-5.72357320786, 0.0, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': Vec3(0.954999983311, 1.0, 1.0),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModel',
'modelPath': 'phase_10/models/cogHQ/CBMetalCrate2'},
10012: {'type': 'model', 'name': 'copy of middle',
'comment': '',
'parentEntId': 10004,
'pos': Point3(5.71999979019, 0.0, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': Vec3(0.954999983311, 1.0, 1.0),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModel',
'modelPath': 'phase_10/models/cogHQ/CBMetalCrate2'},
10013: {'type': 'model', 'name': 'copy of middle',
'comment': '',
'parentEntId': 10004,
'pos': Point3(11.4399995804, 0.0, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': Vec3(0.954999983311, 1.0, 1.0),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModel',
'modelPath': 'phase_10/models/cogHQ/CBMetalCrate2'},
10015: {'type': 'model', 'name': 'crateStack',
'comment': '',
'parentEntId': 0,
'pos': Point3(-18.0376968384, 20.2023410797, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': Vec3(1.0, 1.0, 1.0),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModel',
'modelPath': 'phase_10/models/cogHQ/CBMetalCrate2'},
10016: {'type': 'model', 'name': 'upper',
'comment': '',
'parentEntId': 10015,
'pos': Point3(0.0, 0.0, 5.42841148376),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': Vec3(1.0, 1.0, 1.0),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModel',
'modelPath': 'phase_10/models/cogHQ/CBMetalCrate2'},
10017: {'type': 'model', 'name': 'copy of upper',
'comment': '',
'parentEntId': 10016,
'pos': Point3(0.0, 0.0, 5.43412637711),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': Vec3(1.0, 1.0, 1.0),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModel',
'modelPath': 'phase_10/models/cogHQ/CBMetalCrate2'},
10021: {'type': 'model', 'name': 'crateStack',
'comment': '',
'parentEntId': 10020,
'pos': Point3(21.064825058, 20.1899757385, 9.87216758728),
'hpr': Vec3(270.0, 0.0, 0.0),
'scale': Vec3(1.0, 1.0, 1.0),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModel',
'modelPath': 'phase_10/models/cashbotHQ/crates_C1'},
10003: {'type': 'nodepath', 'name': 'gears',
'comment': '',
'parentEntId': 0,
'pos': Point3(-3.18650078773, 0.0, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': Vec3(1.0, 1.0, 1.0)},
10004: {'type': 'nodepath', 'name': 'wall',
'comment': '',
'parentEntId': 0,
'pos': Point3(19.5468139648, 6.37875938416, 0.0),
'hpr': Point3(270.0, 0.0, 0.0),
'scale': Vec3(1.95812249184, 1.5, 1.79999995232)},
10020: {'type': 'nodepath', 'name': 'props',
'comment': '',
'parentEntId': 0,
'pos': Point3(0.0, 0.0, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': 1}}
Scenario0 = {}
levelSpec = {'globalEntities': GlobalEntities, 'scenarios': [
Scenario0]} | [
"[email protected]"
]
| |
93f01551fc71c691ab7c4d7b49966cb6e2af604c | e4200b764d0b4ffba65180e54cf84b30ee84efcc | /selfdrive/boardd/boardd_setup.py | f987c7aa29e08bc7bdd5e335dc38ac0c14730201 | [
"LicenseRef-scancode-warranty-disclaimer",
"MIT"
]
| permissive | kegman/openpilot | c9ba96a72d905956f02c684e065091e023942883 | 54a8614b5a6451154817a4c6c86141c96103ae47 | refs/heads/kegman-0.7 | 2022-05-22T17:07:16.656336 | 2020-01-23T16:40:55 | 2020-01-23T16:40:55 | 229,979,925 | 105 | 212 | MIT | 2022-03-13T05:47:51 | 2019-12-24T17:27:11 | C | UTF-8 | Python | false | false | 1,019 | py | import subprocess
from distutils.core import Extension, setup
from Cython.Build import cythonize
from common.cython_hacks import BuildExtWithoutPlatformSuffix
from common.basedir import BASEDIR
import os
PHONELIBS = os.path.join(BASEDIR, 'phonelibs')
ARCH = subprocess.check_output(["uname", "-m"], encoding='utf8').rstrip()
ARCH_DIR = 'x64' if ARCH == "x86_64" else 'aarch64'
setup(name='Boardd API Implementation',
cmdclass={'build_ext': BuildExtWithoutPlatformSuffix},
ext_modules=cythonize(
Extension(
"boardd_api_impl",
libraries=[':libcan_list_to_can_capnp.a', ':libcapnp.a', ':libkj.a'] if ARCH == "x86_64" else [':libcan_list_to_can_capnp.a', 'capnp', 'kj'],
library_dirs=[
'./',
PHONELIBS + '/capnp-cpp/' + ARCH_DIR + '/lib/',
PHONELIBS + '/capnp-c/' + ARCH_DIR + '/lib/'
],
sources=['boardd_api_impl.pyx'],
language="c++",
extra_compile_args=["-std=c++11"],
)
)
)
| [
"[email protected]"
]
| |
30e99cd125126168a62391d1dd2870494f66f8d3 | 45de7d905486934629730945619f49281ad19359 | /xlsxwriter/test/comparison/test_optimize11.py | 419bdafcf7b28b46a1cc0c98248bc2b40b67c8d9 | [
"BSD-2-Clause"
]
| permissive | jmcnamara/XlsxWriter | 599e1d225d698120ef931a776a9d93a6f60186ed | ab13807a1be68652ffc512ae6f5791d113b94ee1 | refs/heads/main | 2023-09-04T04:21:04.559742 | 2023-08-31T19:30:52 | 2023-08-31T19:30:52 | 7,433,211 | 3,251 | 712 | BSD-2-Clause | 2023-08-28T18:52:14 | 2013-01-04T01:07:06 | Python | UTF-8 | Python | false | false | 2,279 | py | ###############################################################################
#
# Tests for XlsxWriter.
#
# SPDX-License-Identifier: BSD-2-Clause
# Copyright (c), 2013-2023, John McNamara, [email protected]
#
from ..excel_comparison_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("optimize11.xlsx")
def test_create_file_no_close(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(
self.got_filename, {"constant_memory": True, "in_memory": False}
)
for i in range(1, 10):
worksheet = workbook.add_worksheet()
worksheet.write("A1", "Hello 1")
worksheet.write("A2", "Hello 2")
worksheet.write("A4", "Hello 3")
workbook.close()
self.assertExcelEqual()
def test_create_file_with_close(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(
self.got_filename, {"constant_memory": True, "in_memory": False}
)
for i in range(1, 10):
worksheet = workbook.add_worksheet()
worksheet.write("A1", "Hello 1")
worksheet.write("A2", "Hello 2")
worksheet.write("A4", "Hello 3")
worksheet._opt_close()
workbook.close()
self.assertExcelEqual()
def test_create_file_with_reopen(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(
self.got_filename, {"constant_memory": True, "in_memory": False}
)
for i in range(1, 10):
worksheet = workbook.add_worksheet()
worksheet.write("A1", "Hello 1")
worksheet._opt_close()
worksheet._opt_reopen()
worksheet.write("A2", "Hello 2")
worksheet._opt_close()
worksheet._opt_reopen()
worksheet.write("A4", "Hello 3")
worksheet._opt_close()
worksheet._opt_reopen()
worksheet._opt_close()
workbook.close()
self.assertExcelEqual()
| [
"[email protected]"
]
| |
60970ab65f2384908efc1c74b7fa6fdefbaadf46 | b6a48f9a6158bcb7e6fc75e5eacaef19250fc4c5 | /cosmos/ingestion/ingest/process/detection/src/torch_model/model/utils/config_manager.py | c5af72c9c0d77749c41e4e4151ac91a4091dc749 | [
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
]
| permissive | UW-COSMOS/Cosmos | dcde3be6534e411a20fcf1ff36e422fc8af2ac8a | 5ed4a4c149e03773690668437d2f93aa532453c6 | refs/heads/master | 2023-09-01T18:03:20.525760 | 2023-08-31T13:56:21 | 2023-08-31T13:56:21 | 159,849,583 | 39 | 14 | null | 2023-09-13T14:39:45 | 2018-11-30T16:24:59 | Python | UTF-8 | Python | false | false | 1,242 | py | import yaml
class Struct:
def __init__(self, **entries):
for key, value in entries.items():
value2 = (Struct(**value) if isinstance(value, dict) else value)
self.__dict__[key] = value2
class ConfigManager:
"""
Basic config singleton for easily accessing config parameters
"""
class __Singleton:
def __init__(self, fp):
"""
Initialize a singleton config object
:param fp:
"""
with open(fp) as fh:
config = yaml.load(fh, yaml.Loader)
for key, value in config.items():
value2 = (Struct(**value) if isinstance(value, dict) else value)
self.__dict__[key] = value2
def merge(self, data):
for key in data.keys():
self__dict__[key] = data[key]
instance = None
def __init__(self, fp=None):
if (ConfigManager.instance is None) and (fp is not None):
ConfigManager.instance = ConfigManager.__Singleton(fp)
def __getattr__(self, item):
return getattr(ConfigManager.instance, item)
def __setattr__(self, key, value):
setattr(ConfigManager.instance, key, value)
| [
"[email protected]"
]
| |
90472ae1500003128c099c82b18c65cd294fb594 | 56f5b2ea36a2258b8ca21e2a3af9a5c7a9df3c6e | /CMGTools/H2TauTau/prod/25aug_corrMC/up/mc/DYJetsToLL_M-50_TuneZ2Star_8TeV-madgraph-tarball/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0_1377544840/HTT_24Jul_newTES_manzoni_Up_Jobs/Job_67/run_cfg.py | b7a42574ec89cfa3b75f992ff17c74f8999faf28 | []
| no_license | rmanzoni/HTT | 18e6b583f04c0a6ca10142d9da3dd4c850cddabc | a03b227073b2d4d8a2abe95367c014694588bf98 | refs/heads/master | 2016-09-06T05:55:52.602604 | 2014-02-20T16:35:34 | 2014-02-20T16:35:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,539 | py | import FWCore.ParameterSet.Config as cms
import os,sys
sys.path.append('/afs/cern.ch/user/m/manzoni/summer13/CMGTools/CMSSW_5_3_9/src/CMGTools/H2TauTau/prod/25aug_corrMC/up/mc/DYJetsToLL_M-50_TuneZ2Star_8TeV-madgraph-tarball/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0_1377544840/HTT_24Jul_newTES_manzoni_Up_Jobs')
from base_cfg import *
process.source = cms.Source("PoolSource",
noEventSort = cms.untracked.bool(True),
inputCommands = cms.untracked.vstring('keep *',
'drop cmgStructuredPFJets_cmgStructuredPFJetSel__PAT'),
duplicateCheckMode = cms.untracked.string('noDuplicateCheck'),
fileNames = cms.untracked.vstring('/store/cmst3/user/cmgtools/CMG/DYJetsToLL_M-50_TuneZ2Star_8TeV-madgraph-tarball/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0/cmgTuple_1297.root',
'/store/cmst3/user/cmgtools/CMG/DYJetsToLL_M-50_TuneZ2Star_8TeV-madgraph-tarball/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0/cmgTuple_1298.root',
'/store/cmst3/user/cmgtools/CMG/DYJetsToLL_M-50_TuneZ2Star_8TeV-madgraph-tarball/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0/cmgTuple_1299.root',
'/store/cmst3/user/cmgtools/CMG/DYJetsToLL_M-50_TuneZ2Star_8TeV-madgraph-tarball/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0/cmgTuple_13.root',
'/store/cmst3/user/cmgtools/CMG/DYJetsToLL_M-50_TuneZ2Star_8TeV-madgraph-tarball/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0/cmgTuple_130.root')
)
| [
"[email protected]"
]
| |
97c3730c522f14d3e70b194878b0d860135c6b52 | def06466dadf32385b083615e46a07188ef841c2 | /web_app/primes/primes/wsgi.py | 4839f01dfbebfa726790474ac354f5d2b5730dc8 | []
| no_license | ChillarAnand/just-queue-it | ead51fa0fa14bca6276c452b32a8d4e382e37f95 | c58a214507b429d8854a1049e4b5ed6377435a82 | refs/heads/master | 2020-05-23T14:05:38.511931 | 2015-02-19T21:42:34 | 2015-02-19T21:42:34 | 31,038,556 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,560 | py | """
WSGI config for primes project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
from os.path import abspath, dirname
from sys import path
SITE_ROOT = dirname(dirname(abspath(__file__)))
path.append(SITE_ROOT)
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "jajaja.settings"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "primes.settings.production")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| [
"[email protected]"
]
| |
76365823d072d54826924eb954f54f08ee1178c8 | 616c3c02be31b9ae4d06bd7c5a8d4a2e7c446aa1 | /401.二进制手表.py | c1394764a8ed4675d2bc74aff7690c1c59620be7 | []
| no_license | L1nwatch/leetcode-python | 8b7c47c04ee9400d50d8b0764a544a0463df8f06 | 0484cbc3273ada25992c72105658cd67411c5d39 | refs/heads/master | 2023-01-11T14:53:15.339276 | 2023-01-11T05:24:43 | 2023-01-11T05:24:43 | 194,516,548 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 467 | py | #
# @lc app=leetcode.cn id=401 lang=python3
#
# [401] 二进制手表
#
# @lc code=start
class Solution:
def readBinaryWatch(self, turnedOn: int) -> List[str]:
result = list()
for hour in range(12):
bin_hour_1 = bin(hour).count("1")
for minute in range(60):
if bin_hour_1 + bin(minute).count("1") == turnedOn:
result.append(f"{hour}:{minute:0>2d}")
return result
# @lc code=end
| [
"[email protected]"
]
| |
2d35ba558e65b2aa0a4c270411cd0a7207189d72 | 9cf434b6ee59ab22496ee031fb4ab145bbaff1a2 | /tranque_v1.8.4_source/backend/src/targets/migrations/0025_threshold_kind.py | 9da935043934aadd20fada38b72528d8345ff01b | []
| no_license | oliverhernandezmoreno/SourcesOH | f2ff1a5e3377f0ac1fb8b3153d99d0ee703700b7 | 5d9ca5ab1caceafd4d11207139c9e56210156ef8 | refs/heads/master | 2023-01-05T02:51:25.172103 | 2020-08-27T14:39:34 | 2020-08-27T14:39:34 | 64,422,812 | 0 | 1 | null | 2022-12-30T17:25:10 | 2016-07-28T19:33:44 | JavaScript | UTF-8 | Python | false | false | 402 | py | # Generated by Django 2.1 on 2019-06-04 19:30
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('targets', '0024_target_remote'),
]
operations = [
migrations.AddField(
model_name='threshold',
name='kind',
field=models.SlugField(blank=True, max_length=255, null=True),
),
]
| [
"[email protected]"
]
| |
1180c2df653973dfeb4478f34ad3c39fd22cab39 | eb9c3dac0dca0ecd184df14b1fda62e61cc8c7d7 | /google/cloud/workflows/v1beta/workflows-v1beta-py/google/cloud/workflows_v1beta/types/__init__.py | 66aec79fe2b77723f73afe591aafa1edbbb647c0 | [
"Apache-2.0"
]
| permissive | Tryweirder/googleapis-gen | 2e5daf46574c3af3d448f1177eaebe809100c346 | 45d8e9377379f9d1d4e166e80415a8c1737f284d | refs/heads/master | 2023-04-05T06:30:04.726589 | 2021-04-13T23:35:20 | 2021-04-13T23:35:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,050 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .workflows import (
CreateWorkflowRequest,
DeleteWorkflowRequest,
GetWorkflowRequest,
ListWorkflowsRequest,
ListWorkflowsResponse,
OperationMetadata,
UpdateWorkflowRequest,
Workflow,
)
__all__ = (
'CreateWorkflowRequest',
'DeleteWorkflowRequest',
'GetWorkflowRequest',
'ListWorkflowsRequest',
'ListWorkflowsResponse',
'OperationMetadata',
'UpdateWorkflowRequest',
'Workflow',
)
| [
"bazel-bot-development[bot]@users.noreply.github.com"
]
| bazel-bot-development[bot]@users.noreply.github.com |
03756a7acb99e8907d2bf21186f702c06e303a3b | 731c136992f98cab61508b9e5661afbd491962b6 | /Sort/Sort.py | 2f1338d9504c5dc5d5304e321cc3d067484b1d45 | []
| no_license | yangze01/py_LeetCode | c311235dbe1053c68694aea04fe29296ccb3a6e2 | 2b7213d00e2e482379a2f160b0d8e267a7951599 | refs/heads/master | 2021-01-20T06:03:53.852486 | 2017-12-08T01:30:26 | 2017-12-08T01:30:26 | 101,479,228 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,980 | py | #coding=utf8
import sys
"""
算法复习开始: 八大排序算法
"""
def bubble_sort(list):
"""
冒泡排序
:param list:
:return:
"""
length = len(list)
# 第一级遍历
for index in range(length):
# 第二级遍历
for j in range(1, length - index):
if list[j-1] > list[j]:
# 交换两者数据
list[j-1], list[j] = list[j], list[j-1]
return list
def bubble_sort_flag(list):
"""
改进冒泡排序,如果已经是顺序的,则不用进行排序,直接返回结果
:param list:
:return:
"""
length = len(list)
for index in range(length):
# 标志位
flag = True
for j in range(1, length - index):
if list[j - 1] > list[j]:
list[j - 1], list[j] = list[j], list[j - 1]
flag = False
if flag:
return list
return list
def selection_sort(list):
"""
选择排序,每次将序列中最小或者最大的元素找出来,
然后放在序列的起始位置
:param list:
:return:
"""
n = len(list)
for i in range(0, n):
min_index = i
for j in range(i + 1, n):
if list[j] < list[min_index]:
min_index = j
list[min_index], list[i] = list[i], list[min_index]
return list
def insert_sort(list):
"""
插入排序,通过构建有序序列,对于未排序的数据,
在已排序序列中从后向前扫描,找到相应位置并插入。
步骤
1. 从第一个元素开始,该元素可以认为已经被排序
2. 取出下一个元素,在已经排序的序列中从后向前扫描
3. 如果该元素(已排序)大于新元素,将该元素移到下一位置
4. 重复步骤3, 直到找到已排序的元素小于或者等于新元素的位置
5. 将新元素插入到该位置后
6. 重复步骤2-5
:param list:
:return:
"""
n = len(list)
for i in range(1, n):
# 后一个元素跟前一个元素比较
# 如果比前一个小
if list[i] < list[i - 1]:
# 将这个数取出
temp = list[i]
# 保存下标
index = i
# 从后往前一次比较每个元素
for j in range(i - 1, -1, -1):
# 和比取出元素大的元素交换
if list[j] > temp:
list[j + 1] = list[j]
index = j
else:
break
# 插入元素
list[index] = temp
return list
def insert_sort2(lists):
"""
插入排序
:param lists:
:return:
"""
# 插入排序
count = len(lists)
# 每次遍历已经排好序的部分,生成结果。
for i in range(1, count):
# 记录当前元素
key = lists[i]
j = i - 1
# 从已经排好序的元素开始,遍历当前元素应该插入到哪一个
while j >= 0:
if lists[j] > key:
lists[j + 1] = lists[j]
lists[j] = key
j -= 1
return lists
# def insert_sort3(lists):
# count = len(lists)
# for i in range(1, count):
# # 记录当前元素
# key = lists[i]
# j = i - 1
# while j >= 0:
# if lists[j] > key:
# lists[j+1] = lists[j]
# lists[j] = key
# j -= 1
# return lists
def shell_sort(lists):
"""
希尔排序,每次以一定的步长(跳过等距的数)进行排序,直至步长为1.
:param list:
:return:
"""
n = len(lists)
# 初始步长
gap = round(n/2)
while gap > 0:
for i in range(gap, n):
# 每个步长进行插入排序
temp = lists[i]
j = i
# 插入排序
# while j >= gap and list[j - gap] > temp:
# list[j] = list[j - gap]
while j >= gap and lists[j - gap] > temp:
lists[j] = lists[j - gap]
j -= gap
lists[j] = temp
# 得到新的步长
gap = round(gap / 2)
return lists
# 递归方法实现归并排序
def merge_sort(lists):
# 认为长度不大于1的数列是有序的
if len(lists) <= 1:
return lists
# 二分列表
middle = len(lists) // 2
left = merge_sort(lists[:middle])
right = merge_sort(lists[middle:])
# 最后一次合并
return merge(left, right)
# 合并
def merge(left, right):
l,r=0,0
result=[]
while l<len(left) and r<len(right):
if left[l] <right[r]:
result.append(left[l])
l += 1
else:
result.append(right[r])
r += 1
# print(l,r)
result += left[l:]
result += right[r:]
return result
# 迭代方法实现归并排序
def merge_sort2(lists):
length = len(lists)
step = 1
# 步长为1, 2, 4, 8, ..., 一直合并下去
while step <= length:
offset = step << 1
for index in range(0, length, offset):
merge2(lists, index, min(index+step, length-1), min(index+offset-1, length-1))
step = offset
def merge2(lists, head1, head2, tail2):
# 合并两个排好序的区间:[head1, tail1]与[head2, tail2]
tail1 = head2 - 1
start = head1
index = 0
tmp = [0] * (tail2-head1+1)
while head1 <= tail1 or head2 <= tail2:
if head1 > tail1:
tmp[index] = lists[head2]
elif head2 > tail2:
tmp[index] = lists[head1]
else:
if lists[head1] <= lists[head2]:
tmp[index] = lists[head1]
else:
tmp[index] = lists[head2]
if head1 <= tail1 and tmp[index] == lists[head1]:
head1 += 1
else:
head2 += 1
index += 1
for i in range(start, tail2 + 1):
lists[i] = tmp[i-start]
# 快速排序 递归
def quick_sort(lists, left, right):
if left >= right:
return lists
key = lists[left]
low = left
high = right
while left < right:
while left < right and lists[right] >= key:
right -= 1
lists[left] = lists[right]
while left < right and lists[left] <= key:
left += 1
lists[right] = lists[left]
lists[right] = key
quick_sort(lists, low, left - 1)
quick_sort(lists, left + 1, high)
return lists
# 快速排序
def quick_sort2(lists):
less = []
pivotList = []
more = []
# 递归出口
if len(lists) <= 1:
return lists
else:
# 第一个值为基准
pivot = lists[0]
for i in lists:
# 将比base小的值放到less里面
if i < pivot:
less.append(i)
# 将比base大的值放到More里面
elif i > pivot:
more.append(i)
else:
pivotList.append(i)
less = quick_sort2(less)
more = quick_sort2(more)
return less + pivotList + more
def adjust_heap(lists, i, size):
# print(1)
lchild = 2 * i + 1 # i的左孩子节点序号
rchild = 2 * i + 2 # i的右孩子节点序号
max = i
if i <= size/2:
if lchild < size and lists[lchild] > lists[max]:
max = lchild
if rchild < size and lists[rchild] > lists[max]:
max = rchild
if max != i:
lists[i], lists[max] = lists[max], lists[i]
adjust_heap(lists, max, size) # 避免调整之后以max为父节点的子树不是堆
def build_heap(lists, size):
for i in range(0, (int(size/2)))[::-1]:
adjust_heap(lists, i, size)
def heap_sort(lists):
size = len(lists)
build_heap(lists, size)
for i in range(0, size)[::-1]:
lists[0], lists[i] = lists[i], lists[0]
adjust_heap(lists, 0, i)
return lists
if __name__ == "__main__":
# print(1)
lists = [7, 13, 3, 1, 5, 10, 2, 20]
print("bubble_sort")
print(bubble_sort(lists))
lists = [7, 13, 3, 1, 5, 10, 2, 20]
print("bubble_sort2")
print(bubble_sort_flag(lists))
lists = [7, 13, 3, 1, 5, 10, 2, 20]
print("selection sort")
print(bubble_sort_flag(lists))
lists = [7, 13, 3, 1, 5, 10, 2, 20]
print("insert sort")
print(insert_sort2(lists))
lists = [7, 13, 3, 1, 5, 10, 2, 20]
print("shell sort")
print(shell_sort(lists))
lists = [7, 13, 3, 1, 5, 10, 2, 20]
print("merge sort")
print(merge_sort(lists))
lists = [7, 13, 3, 1, 5, 10, 2, 20]
print("merge sort2")
merge_sort2(lists)
print(lists)
lists = [7, 13, 3, 1, 5, 10, 2, 20]
print("quick sort")
print(quick_sort(lists, 0, len(lists)-1))
lists = [7, 13, 3, 1, 5, 10, 2, 20]
print("heap sort")
print(heap_sort(lists))
| [
"[email protected]"
]
| |
58270a7c262944cd188186aa67ab970c20b93094 | 7bb9f2e6e8993c6104c1109c1c2714e331c09ac2 | /toolbox/workload/forms.py | e1b7346061cdb45ffd663c20b22b963dac2ebc2f | []
| no_license | oinopion/toolbox | 6a775156cb20660f2d92e1d825e4cbabc9df3be7 | a8df57ee6f2343aaaa512703da74dae5fa3d4cfd | refs/heads/master | 2021-01-19T18:32:54.484006 | 2011-12-22T15:00:48 | 2011-12-22T15:00:48 | 3,033,048 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 978 | py | # encoding: utf-8
from django import forms
from django.forms import fields
from toolbox.workload.models import Assignment
from workload.grid import date_range_inclusive
class AssignmentForm(forms.ModelForm):
beginnig = fields.DateField(
widget=forms.DateInput(attrs={'class': 'date-picker'}))
end = fields.DateField(
widget=forms.DateInput(attrs={'class': 'date-picker'}))
next = fields.CharField(widget=forms.HiddenInput())
class Meta:
exclude = ['date']
model = Assignment
def save(self, commit=True):
dates = date_range_inclusive(self.cleaned_data['beginnig'],
self.cleaned_data['end'],
exclude_weekends=True)
for date in dates:
Assignment.objects.create(**{
'date': date,
'person': self.cleaned_data['person'],
'project': self.cleaned_data['project'],
})
| [
"[email protected]"
]
| |
389e956262735deadbff885eaace35377b9672fa | cbcbb04be207839cab8d26d352f64cc505a5eec9 | /virtual/lib/python3.6/site-packages/virtualenv/create/creator.py | 45075dae96cf5208adec66636979324cfeb1a046 | [
"MIT"
]
| permissive | Antony-me/perfect-pitch | 8c61b7d6de1d00fddff5c2feea0293eb85ea8f92 | a2de0adaa3a22844390627459796b823e4ac8e71 | refs/heads/main | 2023-01-10T23:25:46.931458 | 2020-11-02T20:34:52 | 2020-11-02T20:34:52 | 308,002,503 | 0 | 0 | MIT | 2020-11-02T07:58:34 | 2020-10-28T12:01:00 | Python | UTF-8 | Python | false | false | 8,501 | py | from __future__ import absolute_import, print_function, unicode_literals
import json
import logging
import os
import sys
from abc import ABCMeta, abstractmethod
from argparse import ArgumentTypeError
from ast import literal_eval
from collections import OrderedDict
from textwrap import dedent
from six import add_metaclass
from virtualenv.discovery.cached_py_info import LogCmd
from virtualenv.info import WIN_CPYTHON_2
from virtualenv.util.path import Path, safe_delete
from virtualenv.util.six import ensure_str, ensure_text
from virtualenv.util.subprocess import run_cmd
from virtualenv.version import __version__
from .pyenv_cfg import PyEnvCfg
HERE = Path(os.path.abspath(__file__)).parent
DEBUG_SCRIPT = HERE / "debug.py"
class CreatorMeta(object):
def __init__(self):
self.error = None
@add_metaclass(ABCMeta)
class Creator(object):
"""A class that given a python Interpreter creates a virtual environment"""
def __init__(self, options, interpreter):
"""Construct a new virtual environment creator.
:param options: the CLI option as parsed from :meth:`add_parser_arguments`
:param interpreter: the interpreter to create virtual environment from
"""
self.interpreter = interpreter
self._debug = None
self.dest = Path(options.dest)
self.clear = options.clear
self.pyenv_cfg = PyEnvCfg.from_folder(self.dest)
self.app_data = options.app_data
def __repr__(self):
return ensure_str(self.__unicode__())
def __unicode__(self):
return "{}({})".format(self.__class__.__name__, ", ".join("{}={}".format(k, v) for k, v in self._args()))
def _args(self):
return [
("dest", ensure_text(str(self.dest))),
("clear", self.clear),
]
@classmethod
def can_create(cls, interpreter):
"""Determine if we can create a virtual environment.
:param interpreter: the interpreter in question
:return: ``None`` if we can't create, any other object otherwise that will be forwarded to \
:meth:`add_parser_arguments`
"""
return True
@classmethod
def add_parser_arguments(cls, parser, interpreter, meta, app_data):
"""Add CLI arguments for the creator.
:param parser: the CLI parser
:param app_data: the application data folder
:param interpreter: the interpreter we're asked to create virtual environment for
:param meta: value as returned by :meth:`can_create`
"""
parser.add_argument(
"dest",
help="directory to create virtualenv at",
type=cls.validate_dest,
)
parser.add_argument(
"--clear",
dest="clear",
action="store_true",
help="remove the destination directory if exist before starting (will overwrite files otherwise)",
default=False,
)
@abstractmethod
def create(self):
"""Perform the virtual environment creation."""
raise NotImplementedError
@classmethod
def validate_dest(cls, raw_value):
"""No path separator in the path, valid chars and must be write-able"""
def non_write_able(dest, value):
common = Path(*os.path.commonprefix([value.parts, dest.parts]))
raise ArgumentTypeError(
"the destination {} is not write-able at {}".format(dest.relative_to(common), common),
)
# the file system must be able to encode
# note in newer CPython this is always utf-8 https://www.python.org/dev/peps/pep-0529/
encoding = sys.getfilesystemencoding()
refused = OrderedDict()
kwargs = {"errors": "ignore"} if encoding != "mbcs" else {}
for char in ensure_text(raw_value):
try:
trip = char.encode(encoding, **kwargs).decode(encoding)
if trip == char:
continue
raise ValueError(trip)
except ValueError:
refused[char] = None
if refused:
raise ArgumentTypeError(
"the file system codec ({}) cannot handle characters {!r} within {!r}".format(
encoding,
"".join(refused.keys()),
raw_value,
),
)
if os.pathsep in raw_value:
raise ArgumentTypeError(
"destination {!r} must not contain the path separator ({}) as this would break "
"the activation scripts".format(raw_value, os.pathsep),
)
value = Path(raw_value)
if value.exists() and value.is_file():
raise ArgumentTypeError("the destination {} already exists and is a file".format(value))
if (3, 3) <= sys.version_info <= (3, 6):
# pre 3.6 resolve is always strict, aka must exists, sidestep by using os.path operation
dest = Path(os.path.realpath(raw_value))
else:
dest = Path(os.path.abspath(str(value))).resolve() # on Windows absolute does not imply resolve so use both
value = dest
while dest:
if dest.exists():
if os.access(ensure_text(str(dest)), os.W_OK):
break
else:
non_write_able(dest, value)
base, _ = dest.parent, dest.name
if base == dest:
non_write_able(dest, value) # pragma: no cover
dest = base
return str(value)
def run(self):
if self.dest.exists() and self.clear:
logging.debug("delete %s", self.dest)
safe_delete(self.dest)
self.create()
self.set_pyenv_cfg()
self.setup_ignore_vcs()
def set_pyenv_cfg(self):
self.pyenv_cfg.content = OrderedDict()
self.pyenv_cfg["home"] = self.interpreter.system_exec_prefix
self.pyenv_cfg["implementation"] = self.interpreter.implementation
self.pyenv_cfg["version_info"] = ".".join(str(i) for i in self.interpreter.version_info)
self.pyenv_cfg["virtualenv"] = __version__
def setup_ignore_vcs(self):
"""Generate ignore instructions for version control systems."""
# mark this folder to be ignored by VCS, handle https://www.python.org/dev/peps/pep-0610/#registered-vcs
git_ignore = self.dest / ".gitignore"
if not git_ignore.exists():
git_ignore.write_text(
dedent(
"""
# created by virtualenv automatically
*
""",
).lstrip(),
)
# Mercurial - does not support the .hgignore file inside a subdirectory directly, but only if included via the
# subinclude directive from root, at which point on might as well ignore the directory itself, see
# https://www.selenic.com/mercurial/hgignore.5.html for more details
# Bazaar - does not support ignore files in sub-directories, only at root level via .bzrignore
# Subversion - does not support ignore files, requires direct manipulation with the svn tool
@property
def debug(self):
"""
:return: debug information about the virtual environment (only valid after :meth:`create` has run)
"""
if self._debug is None and self.exe is not None:
self._debug = get_env_debug_info(self.exe, self.debug_script(), self.app_data)
return self._debug
# noinspection PyMethodMayBeStatic
def debug_script(self):
return DEBUG_SCRIPT
def get_env_debug_info(env_exe, debug_script, app_data):
env = os.environ.copy()
env.pop(str("PYTHONPATH"), None)
with app_data.ensure_extracted(debug_script) as debug_script:
cmd = [str(env_exe), str(debug_script)]
if WIN_CPYTHON_2:
cmd = [ensure_text(i) for i in cmd]
logging.debug(str("debug via %r"), LogCmd(cmd))
code, out, err = run_cmd(cmd)
# noinspection PyBroadException
try:
if code != 0:
result = literal_eval(out)
else:
result = json.loads(out)
if err:
result["err"] = err
except Exception as exception:
return {"out": out, "err": err, "returncode": code, "exception": repr(exception)}
if "sys" in result and "path" in result["sys"]:
del result["sys"]["path"][0]
return result
| [
"[email protected]"
]
| |
c33f29d71bbf135ea10ec41aa87c6f4a64b32f7e | 62179a165ec620ba967dbc20016e890978fbff50 | /tests/torch/modules/seq2seq/seq2seq_base.py | ebe3e13913b31bd5beac08c8b2640c3364faf5eb | [
"Apache-2.0"
]
| permissive | openvinotoolkit/nncf | 91fcf153a96f85da166aacb7a70ca4941e4ba4a4 | c027c8b43c4865d46b8de01d8350dd338ec5a874 | refs/heads/develop | 2023-08-24T11:25:05.704499 | 2023-08-23T14:44:05 | 2023-08-23T14:44:05 | 263,687,600 | 558 | 157 | Apache-2.0 | 2023-09-14T17:06:41 | 2020-05-13T16:41:05 | Python | UTF-8 | Python | false | false | 3,173 | py | # Copyright (c) 2023 Intel Corporation
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from torch import nn
from torch.nn.functional import log_softmax
PAD = 0
class Seq2Seq(nn.Module):
"""
Generic Seq2Seq module, with an encoder and a decoder.
"""
def __init__(self, encoder=None, decoder=None, batch_first=False):
"""
Constructor for the Seq2Seq module.
:param encoder: encoder module
:param decoder: decoder module
:param batch_first: if True the model uses (batch, seq, feature)
tensors, if false the model uses (seq, batch, feature) tensors
"""
super().__init__()
self.encoder = encoder
self.decoder = decoder
self.batch_first = batch_first
def encode(self, inputs, lengths):
"""
Applies the encoder to inputs with a given input sequence lengths.
:param inputs: tensor with inputs (batch, seq_len) if 'batch_first'
else (seq_len, batch)
:param lengths: vector with sequence lengths (excluding padding)
"""
return self.encoder(inputs, lengths)
def decode(self, inputs, context, inference=False):
"""
Applies the decoder to inputs, given the context from the encoder.
:param inputs: tensor with inputs (batch, seq_len) if 'batch_first'
else (seq_len, batch)
:param context: context from the encoder
:param inference: if True inference mode, if False training mode
"""
return self.decoder(inputs, context, inference)
def generate(self, inputs, context, beam_size):
"""
Autoregressive generator, works with SequenceGenerator class.
Executes decoder (in inference mode), applies log_softmax and topK for
inference with beam search decoding.
:param inputs: tensor with inputs to the decoder
:param context: context from the encoder
:param beam_size: beam size for the generator
returns: (words, logprobs, scores, new_context)
words: indices of topK tokens
logprobs: log probabilities of topK tokens
scores: scores from the attention module (for coverage penalty)
new_context: new decoder context, includes new hidden states for
decoder RNN cells
"""
logits, scores, new_context = self.decode(inputs, context, True)
logprobs = log_softmax(logits, dim=-1)
logprobs, words = logprobs.topk(beam_size, dim=-1)
return words, logprobs, scores, new_context
def forward(self, input_encoder, input_enc_len, input_decoder):
raise NotImplementedError
| [
"[email protected]"
]
| |
7083f94716d817a0f64bfe154b86ee5261c2109e | e17b0ad0ebeb361e5565eb3d12e717f296a7b878 | /SheetAPI/config_example.py | a3fa30cc58a3bb13f0e1eee83397cd254f4f0c2e | []
| no_license | easy-rpg/SheetAPI | 94ea732083c3a7a82577e59e3a882a878772d6eb | 5542197f8388eed761a15a79c6ccca4fd481ccba | refs/heads/master | 2022-12-11T17:01:16.130002 | 2018-07-05T00:26:48 | 2018-07-05T00:26:48 | 131,898,341 | 1 | 0 | null | 2022-11-22T02:30:09 | 2018-05-02T19:44:34 | Python | UTF-8 | Python | false | false | 231 | py | # DB Heroku
# import dj_database_url
# DATABASES = {'default': dj_database_url.config(conn_max_age=600, ssl_require=True)}
# DB LOCAL
DB_HOST = "localhost"
DB_PORT = ""
DB_NAME = "DB_NAME"
DB_USER = "DB_USER"
DB_PASSWORD = ""
| [
"[email protected]"
]
| |
40984c2fb2d800dd58b439a634f44d0ceae530a0 | f9d564f1aa83eca45872dab7fbaa26dd48210d08 | /huaweicloud-sdk-eihealth/huaweicloudsdkeihealth/v1/model/list_message_statistics_response.py | 1e7816007f8524b86b1888cb87a5c5deb1613cd5 | [
"Apache-2.0"
]
| permissive | huaweicloud/huaweicloud-sdk-python-v3 | cde6d849ce5b1de05ac5ebfd6153f27803837d84 | f69344c1dadb79067746ddf9bfde4bddc18d5ecf | refs/heads/master | 2023-09-01T19:29:43.013318 | 2023-08-31T08:28:59 | 2023-08-31T08:28:59 | 262,207,814 | 103 | 44 | NOASSERTION | 2023-06-22T14:50:48 | 2020-05-08T02:28:43 | Python | UTF-8 | Python | false | false | 3,215 | py | # coding: utf-8
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ListMessageStatisticsResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'count': 'int'
}
attribute_map = {
'count': 'count'
}
def __init__(self, count=None):
"""ListMessageStatisticsResponse
The model defined in huaweicloud sdk
:param count: 所有消息总数
:type count: int
"""
super(ListMessageStatisticsResponse, self).__init__()
self._count = None
self.discriminator = None
if count is not None:
self.count = count
@property
def count(self):
"""Gets the count of this ListMessageStatisticsResponse.
所有消息总数
:return: The count of this ListMessageStatisticsResponse.
:rtype: int
"""
return self._count
@count.setter
def count(self, count):
"""Sets the count of this ListMessageStatisticsResponse.
所有消息总数
:param count: The count of this ListMessageStatisticsResponse.
:type count: int
"""
self._count = count
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ListMessageStatisticsResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
]
| |
0667f97fb57c8c12e435d2f0e0d28df739385605 | fcf3c983043273c4e57ac33330efaa0a9e5643a2 | /model-optimizer/mo/front/mxnet/extractors/utils_test.py | 070d5323122452347c77478d42a838fab10ae476 | [
"Apache-2.0"
]
| permissive | p3tromyz0n/dldt | e7ab259848c90fdffd1395eaf5cf53ecd2b1e2f3 | 669bee86e580cbbc8ef40b440ab195ba2cbf5142 | refs/heads/2018 | 2020-05-15T13:03:47.748654 | 2019-03-14T10:13:27 | 2019-03-14T10:13:27 | 158,445,061 | 0 | 1 | Apache-2.0 | 2019-04-19T15:24:15 | 2018-11-20T20:07:50 | C++ | UTF-8 | Python | false | false | 6,599 | py | """
Copyright (c) 2018 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from unittest.mock import patch
import mxnet as mx
from mo.front.mxnet.extractors.utils import AttrDictionary
from mo.front.mxnet.extractors.utils import load_params
class TestAttrDictionary(unittest.TestCase):
def testBool(self):
attrs = {
"global_pool": "True"
}
attr_dict = AttrDictionary(attrs)
global_pool = attr_dict.bool("global_pool", False)
self.assertEqual(True, global_pool)
def testBoolAsDigits(self):
attrs = {
"global_pool": "1"
}
attr_dict = AttrDictionary(attrs)
global_pool = attr_dict.bool("global_pool", False)
self.assertEqual(True, global_pool)
def testBoolWithoutAttr(self):
attrs = {
"something": "1"
}
attr_dict = AttrDictionary(attrs)
global_pool = attr_dict.bool("global_pool", False)
self.assertEqual(False, global_pool)
def testStrAttr(self):
attrs = {
"something": "Val"
}
attr_dict = AttrDictionary(attrs)
attr = attr_dict.str("something", "Text")
self.assertEqual("Val", attr)
def testStrAttrWithoutAttr(self):
attrs = {
"something2": "Val"
}
attr_dict = AttrDictionary(attrs)
attr = attr_dict.str("something", "Text")
self.assertEqual("Text", attr)
def testFloatAttr(self):
attrs = {
"something": "0.5"
}
attr_dict = AttrDictionary(attrs)
attr = attr_dict.float("something", 0.1)
self.assertEqual(0.5, attr)
def testFloatWithoutAttr(self):
attrs = {
"something2": "0.5"
}
attr_dict = AttrDictionary(attrs)
attr = attr_dict.float("something", 0.1)
self.assertEqual(0.1, attr)
def testIntAttr(self):
attrs = {
"something": "5"
}
attr_dict = AttrDictionary(attrs)
attr = attr_dict.float("something", 1)
self.assertEqual(5, attr)
def testIntWithoutAttr(self):
attrs = {
"something2": "5"
}
attr_dict = AttrDictionary(attrs)
attr = attr_dict.float("something", 1)
self.assertEqual(1, attr)
def testTupleAttr(self):
attrs = {
"something": "(5,6,7)"
}
attr_dict = AttrDictionary(attrs)
a, b, c = attr_dict.tuple("something", int, (1, 2, 3))
self.assertEqual(5, a)
self.assertEqual(6, b)
self.assertEqual(7, c)
def testTupleWithoutAttr(self):
attrs = {
"something2": "(5,6,7)"
}
attr_dict = AttrDictionary(attrs)
a, b, c = attr_dict.tuple("something", int, (1, 2, 3))
self.assertEqual(1, a)
self.assertEqual(2, b)
self.assertEqual(3, c)
def testTupleWithEmptyTupleAttr(self):
attrs = {
"something2": "()"
}
attr_dict = AttrDictionary(attrs)
a, b = attr_dict.tuple("something", int, (2, 3))
self.assertEqual(2, a)
self.assertEqual(3, b)
def testTupleWithEmptyListAttr(self):
attrs = {
"something2": "[]"
}
attr_dict = AttrDictionary(attrs)
a, b = attr_dict.tuple("something", int, (2, 3))
self.assertEqual(2, a)
self.assertEqual(3, b)
def testListAttr(self):
attrs = {
"something": "5,6,7"
}
attr_dict = AttrDictionary(attrs)
l = attr_dict.list("something", int, [1, 2, 3])
self.assertEqual(5, l[0])
self.assertEqual(6, l[1])
self.assertEqual(7, l[2])
def testListWithoutAttr(self):
attrs = {
"something2": "5,6,7"
}
attr_dict = AttrDictionary(attrs)
l = attr_dict.list("something", int, [1, 2, 3])
self.assertEqual(1, l[0])
self.assertEqual(2, l[1])
self.assertEqual(3, l[2])
class TestUtils(unittest.TestCase):
@patch('mxnet.nd.load')
def test_load_symbol_nodes_from_params(self, mock_nd_load):
mock_nd_load.return_value = {'arg:conv0_weight': mx.nd.array([1, 2], dtype='float32'),
'arg:conv1_weight': mx.nd.array([2, 3], dtype='float32'),
'aux:bn_data_mean': mx.nd.array([5, 6], dtype='float32')}
model_params = load_params("model.params")
self.assertTrue('conv0_weight' in model_params._param_names)
self.assertTrue('conv1_weight' in model_params._param_names)
self.assertTrue('bn_data_mean' in model_params._aux_names)
self.assertEqual([1., 2.], model_params._arg_params['conv0_weight'].asnumpy().tolist())
self.assertEqual([2., 3.], model_params._arg_params['conv1_weight'].asnumpy().tolist())
self.assertEqual([5., 6.], model_params._aux_params['bn_data_mean'].asnumpy().tolist())
@patch('mxnet.nd.load')
def test_load_symbol_nodes_from_args_nd(self, mock_nd_load):
mock_nd_load.return_value = {'conv0_weight': mx.nd.array([1, 2], dtype='float32'),
'conv1_weight': mx.nd.array([2, 3], dtype='float32')}
model_params = load_params("args_model.nd", data_names=('data1', 'data2'))
self.assertTrue('conv0_weight' in model_params._param_names)
self.assertTrue('conv1_weight' in model_params._param_names)
self.assertEqual([1., 2.], model_params._arg_params['conv0_weight'].asnumpy().tolist())
self.assertEqual([2., 3.], model_params._arg_params['conv1_weight'].asnumpy().tolist())
@patch('mxnet.nd.load')
def test_load_symbol_nodes_from_auxs_nd(self, mock_nd_load):
mock_nd_load.return_value = {'bn_data_mean': mx.nd.array([5, 6], dtype='float32')}
model_params = load_params("auxs_model.nd")
self.assertTrue('bn_data_mean' in model_params._aux_names)
self.assertEqual([5., 6.], model_params._aux_params['bn_data_mean'].asnumpy().tolist())
| [
"[email protected]"
]
| |
4bf01b7ae1c62134c913b6119a7902635486c910 | f44c9ab8a25c5f4a2811fc1e77a59cdce2fe588c | /analysis/check_audio_problems.py | 790ab272d5f3822830562109658a06f5fe559128 | []
| no_license | vejmelkam/StimPackC | 645e1137ef057379971054778cf45f7a9d89ed07 | b82dbbf267073017be3202996906fd0fe900e89e | refs/heads/master | 2021-01-10T20:39:14.301366 | 2011-08-24T17:39:54 | 2011-08-24T17:39:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 583 | py | #!/usr/bin/env python
import sys
import string
# read lines from log file
f = open(sys.argv[1], "r")
lines = f.readlines()
f.close()
# find number of instance of "dropping buffer"
found = 0
for line in lines:
if string.find(line, "dropping buffer") >= 0:
found += 1
print("\n **** check audio problems script ****");
print("VLC log contains %d lines." % len(lines))
if found < 20:
print("Audio problems noted %d times, no problem for 4 videos." % found)
else:
print("Audio problems noted %d times !!! Check audio log and question subject." % found)
| [
"devnull@localhost"
]
| devnull@localhost |
cdc75150fd9e9b0bb84009d08bf0c00bb9a0f43b | 05ac6b13a380f1b0ed0676afaae9f8467b86b4a9 | /livegraph.py | d4bb9ed1fad2e85763f54554907e3f0591ba2853 | [
"MIT"
]
| permissive | UncleEngineer/LiveGraph | fe6177473dca2bb16815dfb0f65dd3084b72c10e | 825dc11663fe3dbbfde6a722bf9ec35adac1c7f2 | refs/heads/main | 2023-02-13T09:19:44.307744 | 2021-01-25T16:28:16 | 2021-01-25T16:28:16 | 332,809,674 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,419 | py | """
===============
Embedding in Tk
===============
"""
from tkinter import *
from tkinter import ttk
import random
import tkinter
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2Tk
from matplotlib.backend_bases import key_press_handler
from matplotlib.figure import Figure
GUI = Tk()
GUI.geometry('600x700')
GUI.wm_title("AutoUpdate Graph")
MF1 = Frame(GUI)
MF1.pack()
# toolbar = NavigationToolbar2Tk(canvas, GUI)
# toolbar.update()
# canvas.get_tk_widget().pack(side=tkinter.TOP, fill=tkinter.BOTH, expand=1)
#canvas.get_tk_widget().place(x=20,y=20)
#toolbar.pack_forget()
def UpdateData():
global y
global canvas
global cv
try:
cv.destroy()
except:
pass
# remove line
# create graph
fig = Figure(figsize=(6, 5), dpi=100)
t = [0,1,2,3,4]
y = []
for i in range(len(t)):
d = random.randint(30,70)
y.append(d)
label = ['A','B','C','D','E']
graph = fig.add_subplot(111)
graph.plot(t, y)
graph.axis([None, None, 0, 100])
canvas = FigureCanvasTkAgg(fig, master=MF1) # A tk.DrawingArea.
canvas.draw()
canvas.get_tk_widget().pack(side=tkinter.TOP, fill=tkinter.BOTH, expand=1)
cv = canvas.get_tk_widget()
cv.pack(side=tkinter.TOP, fill=tkinter.BOTH, expand=1)
MF1.after(5000,UpdateData)
#button = ttk.Button(master=GUI, text="Update Data", command=UpdateData)
#button.pack(ipadx=20 , ipady=10 ,pady=20)
UpdateData()
GUI.mainloop()
| [
"[email protected]"
]
| |
4652f613145fb60655bd9d03b2e0216af7a37090 | eb9c3dac0dca0ecd184df14b1fda62e61cc8c7d7 | /google/cloud/retail/v2beta/retail-v2beta-py/google/cloud/retail_v2beta/types/product.py | e4a0de410942b7658ddae38bbd0a119804277476 | [
"Apache-2.0"
]
| permissive | Tryweirder/googleapis-gen | 2e5daf46574c3af3d448f1177eaebe809100c346 | 45d8e9377379f9d1d4e166e80415a8c1737f284d | refs/heads/master | 2023-04-05T06:30:04.726589 | 2021-04-13T23:35:20 | 2021-04-13T23:35:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,576 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.cloud.retail_v2beta.types import common
from google.protobuf import timestamp_pb2 as timestamp # type: ignore
from google.protobuf import wrappers_pb2 as wrappers # type: ignore
__protobuf__ = proto.module(
package='google.cloud.retail.v2beta',
manifest={
'Product',
},
)
class Product(proto.Message):
r"""Product captures all metadata information of items to be
recommended or searched.
Attributes:
name (str):
Immutable. Full resource name of the product, such as
"projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id".
The branch ID must be "default_branch".
id (str):
Immutable. [Product][google.cloud.retail.v2beta.Product]
identifier, which is the final component of
[name][google.cloud.retail.v2beta.Product.name]. For
example, this field is "id_1", if
[name][google.cloud.retail.v2beta.Product.name] is
"projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/id_1".
This field must be a UTF-8 encoded string with a length
limit of 128 characters. Otherwise, an INVALID_ARGUMENT
error is returned.
Google Merchant Center property
`id <https://support.google.com/merchants/answer/6324405>`__.
Schema.org Property
`Product.sku <https://schema.org/sku>`__.
type_ (google.cloud.retail_v2beta.types.Product.Type):
Immutable. The type of the product. This
field is output-only.
primary_product_id (str):
Variant group identifier. Must be an
[id][google.cloud.retail.v2beta.Product.id], with the same
parent branch with this product. Otherwise, an error is
thrown.
For
[Type.PRIMARY][google.cloud.retail.v2beta.Product.Type.PRIMARY]
[Product][google.cloud.retail.v2beta.Product]s, this field
can only be empty or set to the same value as
[id][google.cloud.retail.v2beta.Product.id].
For VARIANT [Product][google.cloud.retail.v2beta.Product]s,
this field cannot be empty. A maximum of 2,000 products are
allowed to share the same
[Type.PRIMARY][google.cloud.retail.v2beta.Product.Type.PRIMARY]
[Product][google.cloud.retail.v2beta.Product]. Otherwise, an
INVALID_ARGUMENT error is returned.
Google Merchant Center Property
`item_group_id <https://support.google.com/merchants/answer/6324507>`__.
Schema.org Property
`Product.inProductGroupWithID <https://schema.org/inProductGroupWithID>`__.
This field must be enabled before it can be used. `Learn
more </recommendations-ai/docs/catalog#item-group-id>`__.
categories (Sequence[str]):
Product categories. This field is repeated for supporting
one product belonging to several parallel categories.
Strongly recommended using the full path for better search /
recommendation quality.
To represent full path of category, use '>' sign to separate
different hierarchies. If '>' is part of the category name,
please replace it with other character(s).
For example, if a shoes product belongs to both ["Shoes &
Accessories" -> "Shoes"] and ["Sports & Fitness" ->
"Athletic Clothing" -> "Shoes"], it could be represented as:
::
"categories": [
"Shoes & Accessories > Shoes",
"Sports & Fitness > Athletic Clothing > Shoes"
]
Must be set for
[Type.PRIMARY][google.cloud.retail.v2beta.Product.Type.PRIMARY]
[Product][google.cloud.retail.v2beta.Product] otherwise an
INVALID_ARGUMENT error is returned.
At most 250 values are allowed per
[Product][google.cloud.retail.v2beta.Product]. Empty values
are not allowed. Each value must be a UTF-8 encoded string
with a length limit of 5,000 characters. Otherwise, an
INVALID_ARGUMENT error is returned.
Google Merchant Center property
`google_product_category <https://support.google.com/merchants/answer/6324436>`__.
Schema.org property [Product.category]
(https://schema.org/category).
title (str):
Required. Product title.
This field must be a UTF-8 encoded string with a length
limit of 128 characters. Otherwise, an INVALID_ARGUMENT
error is returned.
Google Merchant Center property
`title <https://support.google.com/merchants/answer/6324415>`__.
Schema.org property
`Product.name <https://schema.org/name>`__.
description (str):
Product description.
This field must be a UTF-8 encoded string with a length
limit of 5,000 characters. Otherwise, an INVALID_ARGUMENT
error is returned.
Google Merchant Center property
`description <https://support.google.com/merchants/answer/6324468>`__.
schema.org property
`Product.description <https://schema.org/description>`__.
attributes (Sequence[google.cloud.retail_v2beta.types.Product.AttributesEntry]):
Highly encouraged. Extra product attributes to be included.
For example, for products, this could include the store
name, vendor, style, color, etc. These are very strong
signals for recommendation model, thus we highly recommend
providing the attributes here.
Features that can take on one of a limited number of
possible values. Two types of features can be set are:
Textual features. some examples would be the brand/maker of
a product, or country of a customer. Numerical features.
Some examples would be the height/weight of a product, or
age of a customer.
For example:
``{ "vendor": {"text": ["vendor123", "vendor456"]}, "lengths_cm": {"numbers":[2.3, 15.4]}, "heights_cm": {"numbers":[8.1, 6.4]} }``.
A maximum of 150 attributes are allowed. Otherwise, an
INVALID_ARGUMENT error is returned.
The key must be a UTF-8 encoded string with a length limit
of 5,000 characters. Otherwise, an INVALID_ARGUMENT error is
returned.
tags (Sequence[str]):
Custom tags associated with the product.
At most 250 values are allowed per
[Product][google.cloud.retail.v2beta.Product]. This value
must be a UTF-8 encoded string with a length limit of 1,000
characters. Otherwise, an INVALID_ARGUMENT error is
returned.
This tag can be used for filtering recommendation results by
passing the tag as part of the
[PredictRequest.filter][google.cloud.retail.v2beta.PredictRequest.filter].
Google Merchant Center property
`custom_label_0–4 <https://support.google.com/merchants/answer/6324473>`__.
price_info (google.cloud.retail_v2beta.types.PriceInfo):
Product price and cost information.
Google Merchant Center property
`price <https://support.google.com/merchants/answer/6324371>`__.
available_time (google.protobuf.timestamp_pb2.Timestamp):
The timestamp when this
[Product][google.cloud.retail.v2beta.Product] becomes
available recommendation and search.
availability (google.cloud.retail_v2beta.types.Product.Availability):
The online availability of the
[Product][google.cloud.retail.v2beta.Product]. Default to
[Availability.IN_STOCK][google.cloud.retail.v2beta.Product.Availability.IN_STOCK].
Google Merchant Center Property
`availability <https://support.google.com/merchants/answer/6324448>`__.
Schema.org Property
`Offer.availability <https://schema.org/availability>`__.
available_quantity (google.protobuf.wrappers_pb2.Int32Value):
The available quantity of the item.
uri (str):
Canonical URL directly linking to the product detail page.
This field must be a UTF-8 encoded string with a length
limit of 5,000 characters. Otherwise, an INVALID_ARGUMENT
error is returned.
Google Merchant Center property
`link <https://support.google.com/merchants/answer/6324416>`__.
Schema.org property `Offer.url <https://schema.org/url>`__.
images (Sequence[google.cloud.retail_v2beta.types.Image]):
Product images for the product.
A maximum of 300 images are allowed.
Google Merchant Center property
`image_link <https://support.google.com/merchants/answer/6324350>`__.
Schema.org property
`Product.image <https://schema.org/image>`__.
"""
class Type(proto.Enum):
r"""The type of this product."""
TYPE_UNSPECIFIED = 0
PRIMARY = 1
VARIANT = 2
COLLECTION = 3
class Availability(proto.Enum):
r"""Product availability. If this field is unspecified, the
product is assumed to be in stock.
"""
AVAILABILITY_UNSPECIFIED = 0
IN_STOCK = 1
OUT_OF_STOCK = 2
PREORDER = 3
BACKORDER = 4
name = proto.Field(proto.STRING, number=1)
id = proto.Field(proto.STRING, number=2)
type_ = proto.Field(proto.ENUM, number=3,
enum=Type,
)
primary_product_id = proto.Field(proto.STRING, number=4)
categories = proto.RepeatedField(proto.STRING, number=7)
title = proto.Field(proto.STRING, number=8)
description = proto.Field(proto.STRING, number=10)
attributes = proto.MapField(proto.STRING, proto.MESSAGE, number=12,
message=common.CustomAttribute,
)
tags = proto.RepeatedField(proto.STRING, number=13)
price_info = proto.Field(proto.MESSAGE, number=14,
message=common.PriceInfo,
)
available_time = proto.Field(proto.MESSAGE, number=18,
message=timestamp.Timestamp,
)
availability = proto.Field(proto.ENUM, number=19,
enum=Availability,
)
available_quantity = proto.Field(proto.MESSAGE, number=20,
message=wrappers.Int32Value,
)
uri = proto.Field(proto.STRING, number=22)
images = proto.RepeatedField(proto.MESSAGE, number=23,
message=common.Image,
)
__all__ = tuple(sorted(__protobuf__.manifest))
| [
"bazel-bot-development[bot]@users.noreply.github.com"
]
| bazel-bot-development[bot]@users.noreply.github.com |
cd4b5d06ac6645f6260588192fe3ce2be88410b7 | 59bd9c968a3a31a73d17f252fe716a3eacdf7f4f | /portfolio/Python/scrapy/seapets/ebay_spider.py | 60ab55266368702543f063870e4045f0adfb606e | [
"Apache-2.0"
]
| permissive | 0--key/lib | 113ff1e9cf75e446fa50eb065bc3bc36c090d636 | a619938ea523e96ab9e676ace51f5a129e6612e6 | refs/heads/master | 2023-06-23T22:17:54.244257 | 2023-06-21T17:42:57 | 2023-06-21T17:42:57 | 23,730,551 | 3 | 5 | null | 2016-03-22T08:19:30 | 2014-09-06T08:46:41 | Python | UTF-8 | Python | false | false | 1,675 | py | import os
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request, HtmlResponse
from scrapy.utils.url import urljoin_rfc
from scrapy.utils.response import get_base_url
from product_spiders.items import Product, ProductLoaderWithNameStrip as ProductLoader
from product_spiders.fuzzywuzzy import process
from product_spiders.fuzzywuzzy import fuzz
HERE = os.path.abspath(os.path.dirname(__file__))
class EbaySpider(BaseSpider):
name = 'seapets-ebay.co.uk'
allowed_domains = ['ebay.co.uk']
start_urls = ['http://stores.ebay.co.uk/Nemos-Palace']
#def parse(self, response):
# hxs = HtmlXPathSelector(response)
# categories = hxs.select('//div[@class="lcat"]/ul[@class="lev1"]/li/a/@href').extract()
# for category in categories:
# url = urljoin_rfc(get_base_url(response), category)
# yield Request(url, callback=self.parse_products)
def parse(self, response):
hxs = HtmlXPathSelector(response)
products = hxs.select('//table[@class="grid"]/tr/td')
for product in products:
loader = ProductLoader(item=Product(), selector=product)
loader.add_xpath('name', 'table/tr/td/div[@class="ttl g-std"]/a/@title')
loader.add_xpath('url', 'table/tr/td/div[@class="ttl g-std"]/a/@href')
loader.add_xpath('price', 'table/tr/td/div/table/tr/td/span[@itemprop="price"]/text()')
yield loader.load_item()
next = hxs.select('//td[@class="next"]/a/@href').extract()
if next:
url = urljoin_rfc(get_base_url(response), next[0])
yield Request(url)
| [
"[email protected]"
]
| |
e93010ae26c2f452cbfb17ba59524682846ac2e7 | e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f | /indices/asp.py | 5132eb9ef039bf880ace7b2535f8a47941dfbc54 | []
| no_license | psdh/WhatsintheVector | e8aabacc054a88b4cb25303548980af9a10c12a8 | a24168d068d9c69dc7a0fd13f606c080ae82e2a6 | refs/heads/master | 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 | Python | UTF-8 | Python | false | false | 162 | py | ii = [('WilkJMC3.py', 4), ('PettTHE.py', 11), ('WilkJMC2.py', 5), ('CoolWHM.py', 1), ('LyelCPG.py', 1), ('WestJIT2.py', 1), ('LandWPA2.py', 1), ('SomeMMH.py', 1)] | [
"[email protected]"
]
| |
e3de59ab0a628f70e1187295bc11caee29962f62 | 308e318d1fd56520b1cfe093a5436043c72703db | /medicalcase/urls.py | 7fbff357f5082f6a7d17dabd49a02a808157e9fd | []
| no_license | NicholasTurner23/360MedNet-1 | b35e2b79712cd5568054e697298ad02c368f8853 | fb3939031c455c62c889383f73611b5b6845d8dd | refs/heads/master | 2021-06-18T09:57:32.656789 | 2017-06-17T22:33:32 | 2017-06-17T22:33:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 436 | py | from django.conf.urls import url
from medicalcase import views as medicalcase_views
urlpatterns = [
url(r'^post/medical_case/$', medicalcase_views.MedicalCaseCreate.as_view(), name='medical-case'),
url(r'^medical_cases/$', medicalcase_views.MedicalCaseList.as_view(), name='medical_cases'),
url(r'^medical_case/(?P<pk>[0-9]+)/detail/$', medicalcase_views.MedicalCaseDetail.as_view(),
name='medical_case-detail'),
]
| [
"[email protected]"
]
| |
24b8b0d128b1755bfce972e35b56b2635439d049 | 927eb86f9d2b0466f580c08ec84e6a13604ba6f8 | /worldcupapp/views/media.py | 7d48a519f7bb1ae79aa49c2624f70fec9e7f0476 | []
| no_license | by-Exist/piku_backend_api | 61ee1aa0526d29d735f0fd8c0cf0a69d2a01abe4 | 5dfc4a3fc6cb842e2dc16d5af5b6fd7dea609b4f | refs/heads/main | 2023-06-11T21:10:51.652924 | 2021-07-07T14:35:33 | 2021-07-07T14:35:33 | 338,810,443 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,041 | py | from itertools import chain
from django.shortcuts import get_object_or_404
from django.utils.functional import cached_property
from worldcupapp.models.worldcup import Worldcup
from rest_framework import mixins, viewsets, response, status
from rest_framework.decorators import action
from drf_spectacular.utils import (
PolymorphicProxySerializer,
extend_schema_view,
extend_schema,
)
from drf_patchonly_mixin import mixins as dpm_mixins
from ..models import Media, TextMedia, ImageMedia, GifMedia, VideoMedia
from ..policys import MediaViewSetAccessPolicy
from ..serializers import (
GifMediaDetailSerializer,
GifMediaListSerializer,
ImageMediaDetailSerializer,
ImageMediaListSerializer,
TextMediaDetailSerializer,
TextMediaListSerializer,
VideoMediaDetailSerializer,
VideoMediaListSerializer,
MediaCountListSerializer,
)
class MediaViewSet(
mixins.ListModelMixin,
mixins.CreateModelMixin,
dpm_mixins.PatchOnlyMixin,
mixins.DestroyModelMixin,
viewsets.GenericViewSet,
):
detail_serializer_class = {
"Text": TextMediaDetailSerializer,
"Image": ImageMediaDetailSerializer,
"Gif": GifMediaDetailSerializer,
"Video": VideoMediaDetailSerializer,
}
list_serializer_class = {
"Text": TextMediaListSerializer,
"Image": ImageMediaListSerializer,
"Gif": GifMediaListSerializer,
"Video": VideoMediaListSerializer,
}
permission_classes = [MediaViewSetAccessPolicy]
@cached_property
def parent_object(self):
return get_object_or_404(Worldcup, pk=self.kwargs["worldcup_pk"])
def get_queryset(self):
if self.queryset:
return self.queryset
media_type_model_mapping = {
"Text": TextMedia,
"Image": ImageMedia,
"Gif": GifMedia,
"Video": VideoMedia,
}
model_cls = media_type_model_mapping[self.parent_object.media_type]
self.queryset = model_cls.objects.select_related("worldcup").filter(
worldcup=self.parent_object
)
return self.queryset
def get_serializer_class(self):
if self.action == "counts":
return MediaCountListSerializer
if self.action in ("create", "list"):
return self.list_serializer_class[self.parent_object.media_type]
return self.detail_serializer_class[self.parent_object.media_type]
@action(methods=["patch"], detail=False)
def counts(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
if serializer.is_valid():
medias = self.get_queryset()
for counts_data in serializer.validated_data["counts"]:
media_id = counts_data["media_id"]
if up_win_count := counts_data.get("up_win_count", None):
medias.get(pk=media_id).win_count_up(up_win_count)
if up_view_count := counts_data.get("up_view_count", None):
medias.get(pk=media_id).view_count_up(up_view_count)
if up_choice_count := counts_data.get("up_choice_count", None):
medias.get(pk=media_id).choice_count_up(up_choice_count)
Media.objects.bulk_update(
medias, ["win_count", "view_count", "choice_count"]
)
return response.Response(status=status.HTTP_204_NO_CONTENT)
return response.Response(
data=serializer.errors, status=status.HTTP_400_BAD_REQUEST
)
MediaListPolymorphicSerializer = PolymorphicProxySerializer(
component_name="MediaListPolymorphic",
serializers=[
TextMediaListSerializer,
ImageMediaListSerializer,
GifMediaListSerializer,
VideoMediaListSerializer,
],
resource_type_field_name=None,
)
MediaDetailPolymorphicSerializer = PolymorphicProxySerializer(
component_name="MediaDetailPolymorphic",
serializers=[
TextMediaDetailSerializer,
ImageMediaDetailSerializer,
GifMediaDetailSerializer,
VideoMediaDetailSerializer,
],
resource_type_field_name=None,
)
MediaViewSet = extend_schema_view(
list=extend_schema(
description="\n\n".join(
[
"## [ Description ]",
"- Worldcup's Media List",
"## [ Permission ]",
"- AllowAny",
]
),
responses=MediaListPolymorphicSerializer,
),
create=extend_schema(
description="\n\n".join(
[
"## [ Description ]",
"- Worldcup's Media Create",
"## [ Permission ]",
"- IsWorldcupCreator",
]
),
request=MediaListPolymorphicSerializer,
responses=MediaListPolymorphicSerializer,
),
partial_update=extend_schema(
description="\n\n".join(
[
"## [ Description ]",
"- Worldcup's Media Partial Update",
"## [ Permission ]",
"- IsWorldcupCreator",
]
),
request=MediaDetailPolymorphicSerializer,
responses=MediaDetailPolymorphicSerializer,
),
destroy=extend_schema(
description="\n\n".join(
[
"## [ Description ]",
"- Worldcup's Media Destroy",
"## [ Permission ]",
"- IsWorldcupCreator",
]
),
),
counts=extend_schema(
description="\n\n".join(
[
"## [ Description ]",
"- Media's counts Update",
"- 게임이 종료될 때 사용된 미디어들의 정보 업데이트에 사용",
"- media의 win_count, view_count, choice_count를 대상으로 함",
"## [ Permission ]",
"- AllowAny",
]
),
responses={
200: None,
400: None,
},
),
)(MediaViewSet)
| [
"[email protected]"
]
| |
a712979f0746ffdb9d01e4e7639de181f610ecfc | 82b946da326148a3c1c1f687f96c0da165bb2c15 | /sdk/python/pulumi_azure_native/apimanagement/v20210101preview/list_delegation_setting_secrets.py | 7b3884925eda30d4b2d81d99584cc3666a53a128 | [
"BSD-3-Clause",
"Apache-2.0"
]
| permissive | morrell/pulumi-azure-native | 3916e978382366607f3df0a669f24cb16293ff5e | cd3ba4b9cb08c5e1df7674c1c71695b80e443f08 | refs/heads/master | 2023-06-20T19:37:05.414924 | 2021-07-19T20:57:53 | 2021-07-19T20:57:53 | 387,815,163 | 0 | 0 | Apache-2.0 | 2021-07-20T14:18:29 | 2021-07-20T14:18:28 | null | UTF-8 | Python | false | false | 2,469 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = [
'ListDelegationSettingSecretsResult',
'AwaitableListDelegationSettingSecretsResult',
'list_delegation_setting_secrets',
]
@pulumi.output_type
class ListDelegationSettingSecretsResult:
"""
Client or app secret used in IdentityProviders, Aad, OpenID or OAuth.
"""
def __init__(__self__, validation_key=None):
if validation_key and not isinstance(validation_key, str):
raise TypeError("Expected argument 'validation_key' to be a str")
pulumi.set(__self__, "validation_key", validation_key)
@property
@pulumi.getter(name="validationKey")
def validation_key(self) -> Optional[str]:
"""
This is secret value of the validation key in portal settings.
"""
return pulumi.get(self, "validation_key")
class AwaitableListDelegationSettingSecretsResult(ListDelegationSettingSecretsResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return ListDelegationSettingSecretsResult(
validation_key=self.validation_key)
def list_delegation_setting_secrets(resource_group_name: Optional[str] = None,
service_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableListDelegationSettingSecretsResult:
"""
Client or app secret used in IdentityProviders, Aad, OpenID or OAuth.
:param str resource_group_name: The name of the resource group.
:param str service_name: The name of the API Management service.
"""
__args__ = dict()
__args__['resourceGroupName'] = resource_group_name
__args__['serviceName'] = service_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:apimanagement/v20210101preview:listDelegationSettingSecrets', __args__, opts=opts, typ=ListDelegationSettingSecretsResult).value
return AwaitableListDelegationSettingSecretsResult(
validation_key=__ret__.validation_key)
| [
"[email protected]"
]
| |
224733db7bbbe943a5cdd5d14513e71863001123 | 37879f158886946a3328cb7c938b774eef6b12f4 | /feature_engineering_pandas.py | 003cf38f3ec684d66b08086075a253ee2016ccec | [
"MIT"
]
| permissive | beckernick/cml_rapids | 82f73bb4a7a12783967e1392ab5dba0d4ca01fde | da29a412418ac5c5be038f6c96af0b926c57c1ea | refs/heads/main | 2023-04-28T17:25:42.612687 | 2021-05-13T12:17:49 | 2021-05-13T12:17:49 | 367,154,418 | 0 | 0 | MIT | 2021-05-13T19:31:23 | 2021-05-13T19:31:23 | null | UTF-8 | Python | false | false | 3,622 | py | ## Feature Engineering using dask
import time
import pandas as dd
import pandas as pd
import numpy as np
from feature_engineering_2 import (
pos_cash, process_unified, process_bureau_and_balance,
process_previous_applications, installments_payments,
credit_card_balance
)
### Load Data
bureau_balance = dd.read_parquet('raw_data/bureau_balance.parquet')
bureau = dd.read_parquet('raw_data/bureau.parquet')
# behaviour data linked to prev as well as current loan
cc_balance = dd.read_parquet('raw_data/cc_balance.parquet')
payments = dd.read_parquet('raw_data/payments.parquet')
pc_balance = dd.read_parquet('raw_data/pc_balance.parquet')
prev = dd.read_parquet('raw_data/prev.parquet')
train = dd.read_parquet('raw_data/train.parquet')
test = dd.read_parquet('raw_data/test.parquet')
train_index = train.index
test_index = test.index
train_target = train['TARGET']
unified = dd.concat([train.drop('TARGET', axis=1), test])
# fix for the process functions not working with columns of type `category`
bureau_balance['STATUS'] = bureau_balance['STATUS'].astype('object')
bureau['CREDIT_ACTIVE'] = bureau['CREDIT_ACTIVE'].astype('object')
bureau['CREDIT_CURRENCY'] = bureau['CREDIT_CURRENCY'].astype('object')
prev['NAME_CONTRACT_STATUS'] = prev['NAME_CONTRACT_STATUS'].astype('object')
# need to split out the parquet writing
# also need to fix a UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
unified_feat = process_unified(unified, dd)
bureau_agg = process_bureau_and_balance(bureau, bureau_balance, dd)
prev_agg = process_previous_applications(prev, dd)
pos_agg = pos_cash(pc_balance, dd)
ins_agg = installments_payments(payments, dd)
cc_agg = credit_card_balance(cc_balance, dd)
unified_feat = unified_feat.join(bureau_agg, how='left', on='SK_ID_CURR') \
.join(prev_agg, how='left', on='SK_ID_CURR') \
.join(pos_agg, how='left', on='SK_ID_CURR') \
.join(ins_agg, how='left', on='SK_ID_CURR') \
.join(cc_agg, how='left', on='SK_ID_CURR')
# we can't use bool column types in xgb later on
bool_columns = [col for col in unified_feat.columns if (unified_feat[col].dtype in ['bool']) ]
unified_feat[bool_columns] = unified_feat[bool_columns].astype('int64')
# We will label encode for xgb later on
from sklearn.preprocessing import LabelEncoder
# label encode cats
label_encode_dict = {}
categorical = unified_feat.select_dtypes(include=pd.CategoricalDtype).columns
for column in categorical:
label_encode_dict[column] = LabelEncoder()
unified_feat[column] = label_encode_dict[column].fit_transform(unified_feat[column])
unified_feat[column] = unified_feat[column].astype('int64')
### Fix for Int64D
Int64D = unified_feat.select_dtypes(include=[pd.Int64Dtype]).columns
unified_feat[Int64D] = unified_feat[Int64D].fillna(0)
unified_feat[Int64D] = unified_feat[Int64D].astype('int64')
### fix unit8
uint8 = unified_feat.select_dtypes(include=['uint8']).columns
unified_feat[uint8] = unified_feat[uint8].astype('int64')
nan_columns = unified_feat.columns[unified_feat.isna().any()].tolist()
unified_feat.replace([np.inf, -np.inf], np.nan, inplace=True)
unified_feat[nan_columns] = unified_feat[nan_columns].fillna(0)
train_feats = unified_feat.loc[train_index].merge(train_target, how='left',
left_index=True, right_index=True)
test_feats = unified_feat.loc[test_index]
train_feats.to_parquet('data_eng/feats/train_feats.parquet')
test_feats.to_parquet('data_eng/feats/test_feats.parquet') | [
"[email protected]"
]
| |
5eae1492af790922bb806b1d1c75466db26ca638 | 1d22e0cc8db1ddbdab6c06a049ccc15f35dfff99 | /hmm_class/hmm_classifier.py | ef78103fd4f6f7572a36a305ebe37019bd61ebd0 | []
| no_license | JiaxinYu/machine_learning_examples | 59f37335407d9b9523a6879602ad3d58eac7da77 | db49879ca5efd34e7d2ad6c3ddf1fb4854c24429 | refs/heads/master | 2020-06-11T07:24:29.871826 | 2016-11-27T17:54:19 | 2016-11-27T17:54:19 | 75,734,758 | 1 | 0 | null | 2016-12-06T13:39:27 | 2016-12-06T13:39:27 | null | UTF-8 | Python | false | false | 2,841 | py | # https://udemy.com/unsupervised-machine-learning-hidden-markov-models-in-python
# http://lazyprogrammer.me
# Demonstrate how HMMs can be used for classification.
import string
import numpy as np
import matplotlib.pyplot as plt
from hmmd_theano import HMM
from sklearn.utils import shuffle
from nltk import pos_tag, word_tokenize
class HMMClassifier:
def __init__(self):
pass
def fit(self, X, Y, V):
K = len(set(Y)) # number of classes - assume 0..K-1
self.models = []
self.priors = []
for k in xrange(K):
# gather all the training data for this class
thisX = [x for x, y in zip(X, Y) if y == k]
C = len(thisX)
self.priors.append(np.log(C))
hmm = HMM(5)
hmm.fit(thisX, V=V, p_cost=0.1, print_period=1, learning_rate=10e-5, max_iter=100)
self.models.append(hmm)
def score(self, X, Y):
N = len(Y)
correct = 0
for x, y in zip(X, Y):
lls = [hmm.log_likelihood(x) + prior for hmm, prior in zip(self.models, self.priors)]
p = np.argmax(lls)
if p == y:
correct += 1
return float(correct) / N
# def remove_punctuation(s):
# return s.translate(None, string.punctuation)
def get_tags(s):
tuples = pos_tag(word_tokenize(s))
return [y for x, y in tuples]
def get_data():
word2idx = {}
current_idx = 0
X = []
Y = []
for fn, label in zip(('robert_frost.txt', 'edgar_allan_poe.txt'), (0, 1)):
count = 0
for line in open(fn):
line = line.rstrip()
if line:
print line
# tokens = remove_punctuation(line.lower()).split()
tokens = get_tags(line)
if len(tokens) > 1:
# scan doesn't work nice here, technically could fix...
for token in tokens:
if token not in word2idx:
word2idx[token] = current_idx
current_idx += 1
sequence = np.array([word2idx[w] for w in tokens])
X.append(sequence)
Y.append(label)
count += 1
print count
if count >= 50:
break
print "Vocabulary:", word2idx.keys()
return X, Y, current_idx
def main():
X, Y, V = get_data()
# print "Finished loading data"
print "len(X):", len(X)
print "Vocabulary size:", V
X, Y = shuffle(X, Y)
N = 20 # number to test
Xtrain, Ytrain = X[:-N], Y[:-N]
Xtest, Ytest = X[-N:], Y[-N:]
model = HMMClassifier()
model.fit(Xtrain, Ytrain, V)
print "Score:", model.score(Xtest, Ytest)
if __name__ == '__main__':
main()
| [
"[email protected]"
]
| |
18ab42c276337f57636ec03c57500e23dd33eeda | d57f6c045c7b07dd53ee80982005beb33450b64b | /migrations/versions/b75221b7534f_.py | 80700eb2975f945de0b9aab80daaa6d3a076c042 | []
| no_license | gwynethbradbury/ouss_ball | 7df0ccafd42bd8d6fd22816c71fbe9a6a852351a | 1115fe316f7c1ee1407017a60a054b1f7291f331 | refs/heads/master | 2023-05-11T18:36:29.921936 | 2018-03-22T15:56:52 | 2018-03-22T15:56:52 | 122,100,136 | 1 | 0 | null | 2018-03-22T13:55:05 | 2018-02-19T17:58:55 | PHP | UTF-8 | Python | false | false | 641 | py | """empty message
Revision ID: b75221b7534f
Revises: 57bc3837370a
Create Date: 2016-01-11 19:56:43.653390
"""
# revision identifiers, used by Alembic.
revision = 'b75221b7534f'
down_revision = '57bc3837370a'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('postage', sa.Column('paid', sa.Boolean(), nullable=False))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('postage', 'paid')
### end Alembic commands ###
| [
"[email protected]"
]
| |
bd0fab02a5fbbadc2955432d86b4c0f514793a5d | 1817aca734cda258cbbfd9e13fbf040d76824621 | /aliyun-python-sdk-slb/aliyunsdkslb/request/v20140515/SetLogsDownloadStatusRequest.py | 3f5de92cf81226eceacc5ace8c2ca2a158173dc2 | [
"Apache-2.0"
]
| permissive | sdk-team/aliyun-openapi-python-sdk | 4bd770718e70e31f19e1e322727c27ba74d9fb80 | 996cb07bfcf010fe3ab65daa73d26df2f3b6e97f | refs/heads/master | 2022-08-04T13:11:56.729215 | 2022-07-25T10:01:10 | 2022-07-25T10:01:10 | 183,356,741 | 0 | 0 | null | 2019-04-25T04:33:24 | 2019-04-25T04:33:24 | null | UTF-8 | Python | false | false | 2,308 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class SetLogsDownloadStatusRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Slb', '2014-05-15', 'SetLogsDownloadStatus','asdfdsf')
def get_access_key_id(self):
return self.get_query_params().get('access_key_id')
def set_access_key_id(self,access_key_id):
self.add_query_param('access_key_id',access_key_id)
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_ResourceOwnerAccount(self):
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self,ResourceOwnerAccount):
self.add_query_param('ResourceOwnerAccount',ResourceOwnerAccount)
def get_OwnerAccount(self):
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self,OwnerAccount):
self.add_query_param('OwnerAccount',OwnerAccount)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_LogsDownloadStatus(self):
return self.get_query_params().get('LogsDownloadStatus')
def set_LogsDownloadStatus(self,LogsDownloadStatus):
self.add_query_param('LogsDownloadStatus',LogsDownloadStatus)
def get_Tags(self):
return self.get_query_params().get('Tags')
def set_Tags(self,Tags):
self.add_query_param('Tags',Tags) | [
"[email protected]"
]
| |
59c3bd06e2e52ff8c563ba694f192343d83d345f | b76615ff745c6d66803506251c3d4109faf50802 | /pyobjc-framework-SceneKit/PyObjCTest/test_scnmaterial.py | f2c6d6c21c5547c3bc9103160f5ceb299b9928c3 | [
"MIT"
]
| permissive | danchr/pyobjc-git | 6ef17e472f54251e283a0801ce29e9eff9c20ac0 | 62b787fddeb381184043c7ff136f1c480755ab69 | refs/heads/master | 2021-01-04T12:24:31.581750 | 2020-02-02T20:43:02 | 2020-02-02T20:43:02 | 240,537,392 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,832 | py | from PyObjCTools.TestSupport import *
import objc
import sys
if os_level_key(os_release()) < os_level_key("10.12") or sys.maxsize >= 2 ** 32:
import SceneKit
class TestSCNMaterial(TestCase):
def testConstants(self):
self.assertIsInstance(SceneKit.SCNLightingModelPhong, unicode)
self.assertIsInstance(SceneKit.SCNLightingModelBlinn, unicode)
self.assertIsInstance(SceneKit.SCNLightingModelLambert, unicode)
self.assertIsInstance(SceneKit.SCNLightingModelConstant, unicode)
self.assertEqual(SceneKit.SCNFillModeFill, 0)
self.assertEqual(SceneKit.SCNFillModeLines, 1)
self.assertEqual(SceneKit.SCNCullBack, 0)
self.assertEqual(SceneKit.SCNCullFront, 1)
self.assertEqual(SceneKit.SCNTransparencyModeAOne, 0)
self.assertEqual(SceneKit.SCNTransparencyModeRGBZero, 1)
self.assertEqual(SceneKit.SCNTransparencyModeSingleLayer, 2)
self.assertEqual(SceneKit.SCNTransparencyModeDualLayer, 3)
self.assertEqual(
SceneKit.SCNTransparencyModeDefault, SceneKit.SCNTransparencyModeAOne
)
self.assertEqual(SceneKit.SCNBlendModeAlpha, 0)
self.assertEqual(SceneKit.SCNBlendModeAdd, 1)
self.assertEqual(SceneKit.SCNBlendModeSubtract, 2)
self.assertEqual(SceneKit.SCNBlendModeMultiply, 3)
self.assertEqual(SceneKit.SCNBlendModeScreen, 4)
self.assertEqual(SceneKit.SCNBlendModeReplace, 5)
self.assertEqual(SceneKit.SCNBlendModeMax, 6)
@min_os_level("10.12")
def testConstants10_12(self):
self.assertIsInstance(SceneKit.SCNLightingModelPhysicallyBased, unicode)
@min_os_level("10.15")
def testConstants10_15(self):
self.assertIsInstance(SceneKit.SCNLightingModelShadowOnly, unicode)
def testMethods(self):
self.assertResultIsBOOL(SceneKit.SCNMaterial.isLitPerPixel)
self.assertArgIsBOOL(SceneKit.SCNMaterial.setLitPerPixel_, 0)
self.assertResultIsBOOL(SceneKit.SCNMaterial.isDoubleSided)
self.assertArgIsBOOL(SceneKit.SCNMaterial.setDoubleSided_, 0)
self.assertResultIsBOOL(SceneKit.SCNMaterial.locksAmbientWithDiffuse)
self.assertArgIsBOOL(SceneKit.SCNMaterial.setLocksAmbientWithDiffuse_, 0)
self.assertResultIsBOOL(SceneKit.SCNMaterial.writesToDepthBuffer)
self.assertArgIsBOOL(SceneKit.SCNMaterial.setWritesToDepthBuffer_, 0)
@min_os_level("10.9")
def testMethods10_9(self):
self.assertResultIsBOOL(SceneKit.SCNMaterial.readsFromDepthBuffer)
self.assertArgIsBOOL(SceneKit.SCNMaterial.setReadsFromDepthBuffer_, 0)
if __name__ == "__main__":
main()
| [
"[email protected]"
]
| |
8411f21b811eca560091444108d42f0dc1514fce | 951a3c8d6ec3d4e5f0718b8e6c92348196e5ebbf | /mysite/polls/migrations/0003_remove_question_question_prompt.py | e82e0fccbdf3a513a36859ca9de862621ece514d | []
| no_license | aspiringguru/learnDjango | 6f3b178381cd8037f9c954e7cc49f68d6a8b3b4c | 24ac82293b109ad36bb375e32983154b4de23470 | refs/heads/master | 2020-12-10T23:00:33.479558 | 2020-01-15T08:46:18 | 2020-01-15T08:46:18 | 233,736,009 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 343 | py | # Generated by Django 2.2.9 on 2020-01-15 00:08
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('polls', '0002_question_question_prompt'),
]
operations = [
migrations.RemoveField(
model_name='question',
name='question_prompt',
),
]
| [
"[email protected]"
]
| |
5859434341568411959a48e0941bf29a6dbeaeae | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_091/ch4_2020_09_04_14_40_54_928784.py | 652261e221bca6774cbba41cd2b6e29cac4be123 | []
| no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 197 | py | def classifica_idade(idade):
if idade <= 11:
return ('crianca')
if 12<=idade<=17:
return('adolescente')
if idade => 18:
return('adulto')
a= 13
b=classica_idade(a)
print(b) | [
"[email protected]"
]
| |
512c76ab159a877dea30fe399f3220371dd2baf0 | 51de6a2a2ce8882ee6462cd1076c7b9675830531 | /0x0F-python-object_relational_mapping/2-my_filter_states.py | 20f1742598a0848dd05b4b932cf3a0fffab10e70 | []
| no_license | anamariaroman/holbertonschool-higher_level_programming | 9b479c9b1484e4388ec0a4390cda81480626725a | 5d75ccc35dfc92887d0f9a9e0b0773ed741d179e | refs/heads/master | 2023-08-17T23:40:25.164128 | 2021-09-23T04:57:43 | 2021-09-23T04:57:43 | 361,869,257 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 630 | py | #!/usr/bin/python3
"""
takes in an argument and displays all values in the
states table of hbtn_0e_0_usa where name matches the argument.
"""
import MySQLdb
from sys import argv
if __name__ == "__main__":
db = MySQLdb.connect(host="localhost", port=3306, user=argv[1],
passwd=argv[2], db=argv[3], charset="utf8")
cursor = db.cursor()
cursor.execute("SELECT * FROM states WHERE states.name = '{:s}' ORDER BY \
states.id ASC".format(argv[4]))
r = cursor.fetchall()
for row in r:
if row[1] == argv[4]:
print(row)
cursor.close()
db.close()
| [
"[email protected]"
]
| |
b5faf82ad73aadaff1bd1970efa1a7fe32bb250f | bf15a97a377bc49495a8c278cd247387a08361fd | /intersight/models/hcl_exempted_catalog.py | 03d80031ad546ce01d077c8a759720408006b260 | [
"Apache-2.0"
]
| permissive | movinalot/intersight-python | ffcb434e5fdf3f6e857dd967c794a64b2d2e05de | cdc3b082d75eac93b74029ab610e16d3008fdd8c | refs/heads/master | 2020-12-18T15:46:06.780834 | 2019-10-29T00:39:49 | 2019-10-29T00:39:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21,346 | py | # coding: utf-8
"""
Intersight REST API
This is Intersight REST API
OpenAPI spec version: 1.0.9-961
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class HclExemptedCatalog(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'account_moid': 'str',
'ancestors': 'list[MoBaseMoRef]',
'create_time': 'datetime',
'domain_group_moid': 'str',
'mod_time': 'datetime',
'moid': 'str',
'object_type': 'str',
'owners': 'list[str]',
'parent': 'MoBaseMoRef',
'shared_scope': 'str',
'tags': 'list[MoTag]',
'version_context': 'MoVersionContext',
'comments': 'str',
'name': 'str',
'os_vendor': 'str',
'os_version': 'str',
'processor_name': 'str',
'product_models': 'list[str]',
'product_type': 'str',
'server_pid': 'str',
'ucs_version': 'str',
'version_type': 'str'
}
attribute_map = {
'account_moid': 'AccountMoid',
'ancestors': 'Ancestors',
'create_time': 'CreateTime',
'domain_group_moid': 'DomainGroupMoid',
'mod_time': 'ModTime',
'moid': 'Moid',
'object_type': 'ObjectType',
'owners': 'Owners',
'parent': 'Parent',
'shared_scope': 'SharedScope',
'tags': 'Tags',
'version_context': 'VersionContext',
'comments': 'Comments',
'name': 'Name',
'os_vendor': 'OsVendor',
'os_version': 'OsVersion',
'processor_name': 'ProcessorName',
'product_models': 'ProductModels',
'product_type': 'ProductType',
'server_pid': 'ServerPid',
'ucs_version': 'UcsVersion',
'version_type': 'VersionType'
}
def __init__(self, account_moid=None, ancestors=None, create_time=None, domain_group_moid=None, mod_time=None, moid=None, object_type=None, owners=None, parent=None, shared_scope=None, tags=None, version_context=None, comments=None, name=None, os_vendor=None, os_version=None, processor_name=None, product_models=None, product_type=None, server_pid=None, ucs_version=None, version_type=None):
"""
HclExemptedCatalog - a model defined in Swagger
"""
self._account_moid = None
self._ancestors = None
self._create_time = None
self._domain_group_moid = None
self._mod_time = None
self._moid = None
self._object_type = None
self._owners = None
self._parent = None
self._shared_scope = None
self._tags = None
self._version_context = None
self._comments = None
self._name = None
self._os_vendor = None
self._os_version = None
self._processor_name = None
self._product_models = None
self._product_type = None
self._server_pid = None
self._ucs_version = None
self._version_type = None
if account_moid is not None:
self.account_moid = account_moid
if ancestors is not None:
self.ancestors = ancestors
if create_time is not None:
self.create_time = create_time
if domain_group_moid is not None:
self.domain_group_moid = domain_group_moid
if mod_time is not None:
self.mod_time = mod_time
if moid is not None:
self.moid = moid
if object_type is not None:
self.object_type = object_type
if owners is not None:
self.owners = owners
if parent is not None:
self.parent = parent
if shared_scope is not None:
self.shared_scope = shared_scope
if tags is not None:
self.tags = tags
if version_context is not None:
self.version_context = version_context
if comments is not None:
self.comments = comments
if name is not None:
self.name = name
if os_vendor is not None:
self.os_vendor = os_vendor
if os_version is not None:
self.os_version = os_version
if processor_name is not None:
self.processor_name = processor_name
if product_models is not None:
self.product_models = product_models
if product_type is not None:
self.product_type = product_type
if server_pid is not None:
self.server_pid = server_pid
if ucs_version is not None:
self.ucs_version = ucs_version
if version_type is not None:
self.version_type = version_type
@property
def account_moid(self):
"""
Gets the account_moid of this HclExemptedCatalog.
The Account ID for this managed object.
:return: The account_moid of this HclExemptedCatalog.
:rtype: str
"""
return self._account_moid
@account_moid.setter
def account_moid(self, account_moid):
"""
Sets the account_moid of this HclExemptedCatalog.
The Account ID for this managed object.
:param account_moid: The account_moid of this HclExemptedCatalog.
:type: str
"""
self._account_moid = account_moid
@property
def ancestors(self):
"""
Gets the ancestors of this HclExemptedCatalog.
The array containing the MO references of the ancestors in the object containment hierarchy.
:return: The ancestors of this HclExemptedCatalog.
:rtype: list[MoBaseMoRef]
"""
return self._ancestors
@ancestors.setter
def ancestors(self, ancestors):
"""
Sets the ancestors of this HclExemptedCatalog.
The array containing the MO references of the ancestors in the object containment hierarchy.
:param ancestors: The ancestors of this HclExemptedCatalog.
:type: list[MoBaseMoRef]
"""
self._ancestors = ancestors
@property
def create_time(self):
"""
Gets the create_time of this HclExemptedCatalog.
The time when this managed object was created.
:return: The create_time of this HclExemptedCatalog.
:rtype: datetime
"""
return self._create_time
@create_time.setter
def create_time(self, create_time):
"""
Sets the create_time of this HclExemptedCatalog.
The time when this managed object was created.
:param create_time: The create_time of this HclExemptedCatalog.
:type: datetime
"""
self._create_time = create_time
@property
def domain_group_moid(self):
"""
Gets the domain_group_moid of this HclExemptedCatalog.
The DomainGroup ID for this managed object.
:return: The domain_group_moid of this HclExemptedCatalog.
:rtype: str
"""
return self._domain_group_moid
@domain_group_moid.setter
def domain_group_moid(self, domain_group_moid):
"""
Sets the domain_group_moid of this HclExemptedCatalog.
The DomainGroup ID for this managed object.
:param domain_group_moid: The domain_group_moid of this HclExemptedCatalog.
:type: str
"""
self._domain_group_moid = domain_group_moid
@property
def mod_time(self):
"""
Gets the mod_time of this HclExemptedCatalog.
The time when this managed object was last modified.
:return: The mod_time of this HclExemptedCatalog.
:rtype: datetime
"""
return self._mod_time
@mod_time.setter
def mod_time(self, mod_time):
"""
Sets the mod_time of this HclExemptedCatalog.
The time when this managed object was last modified.
:param mod_time: The mod_time of this HclExemptedCatalog.
:type: datetime
"""
self._mod_time = mod_time
@property
def moid(self):
"""
Gets the moid of this HclExemptedCatalog.
The unique identifier of this Managed Object instance.
:return: The moid of this HclExemptedCatalog.
:rtype: str
"""
return self._moid
@moid.setter
def moid(self, moid):
"""
Sets the moid of this HclExemptedCatalog.
The unique identifier of this Managed Object instance.
:param moid: The moid of this HclExemptedCatalog.
:type: str
"""
self._moid = moid
@property
def object_type(self):
"""
Gets the object_type of this HclExemptedCatalog.
The fully-qualified type of this managed object, e.g. the class name.
:return: The object_type of this HclExemptedCatalog.
:rtype: str
"""
return self._object_type
@object_type.setter
def object_type(self, object_type):
"""
Sets the object_type of this HclExemptedCatalog.
The fully-qualified type of this managed object, e.g. the class name.
:param object_type: The object_type of this HclExemptedCatalog.
:type: str
"""
self._object_type = object_type
@property
def owners(self):
"""
Gets the owners of this HclExemptedCatalog.
The array of owners which represent effective ownership of this object.
:return: The owners of this HclExemptedCatalog.
:rtype: list[str]
"""
return self._owners
@owners.setter
def owners(self, owners):
"""
Sets the owners of this HclExemptedCatalog.
The array of owners which represent effective ownership of this object.
:param owners: The owners of this HclExemptedCatalog.
:type: list[str]
"""
self._owners = owners
@property
def parent(self):
"""
Gets the parent of this HclExemptedCatalog.
The direct ancestor of this managed object in the containment hierarchy.
:return: The parent of this HclExemptedCatalog.
:rtype: MoBaseMoRef
"""
return self._parent
@parent.setter
def parent(self, parent):
"""
Sets the parent of this HclExemptedCatalog.
The direct ancestor of this managed object in the containment hierarchy.
:param parent: The parent of this HclExemptedCatalog.
:type: MoBaseMoRef
"""
self._parent = parent
@property
def shared_scope(self):
"""
Gets the shared_scope of this HclExemptedCatalog.
Intersight provides pre-built workflows, tasks and policies to end users through global catalogs. Objects that are made available through global catalogs are said to have a 'shared' ownership. Shared objects are either made globally available to all end users or restricted to end users based on their license entitlement. Users can use this property to differentiate the scope (global or a specific license tier) to which a shared MO belongs.
:return: The shared_scope of this HclExemptedCatalog.
:rtype: str
"""
return self._shared_scope
@shared_scope.setter
def shared_scope(self, shared_scope):
"""
Sets the shared_scope of this HclExemptedCatalog.
Intersight provides pre-built workflows, tasks and policies to end users through global catalogs. Objects that are made available through global catalogs are said to have a 'shared' ownership. Shared objects are either made globally available to all end users or restricted to end users based on their license entitlement. Users can use this property to differentiate the scope (global or a specific license tier) to which a shared MO belongs.
:param shared_scope: The shared_scope of this HclExemptedCatalog.
:type: str
"""
self._shared_scope = shared_scope
@property
def tags(self):
"""
Gets the tags of this HclExemptedCatalog.
The array of tags, which allow to add key, value meta-data to managed objects.
:return: The tags of this HclExemptedCatalog.
:rtype: list[MoTag]
"""
return self._tags
@tags.setter
def tags(self, tags):
"""
Sets the tags of this HclExemptedCatalog.
The array of tags, which allow to add key, value meta-data to managed objects.
:param tags: The tags of this HclExemptedCatalog.
:type: list[MoTag]
"""
self._tags = tags
@property
def version_context(self):
"""
Gets the version_context of this HclExemptedCatalog.
The versioning info for this managed object.
:return: The version_context of this HclExemptedCatalog.
:rtype: MoVersionContext
"""
return self._version_context
@version_context.setter
def version_context(self, version_context):
"""
Sets the version_context of this HclExemptedCatalog.
The versioning info for this managed object.
:param version_context: The version_context of this HclExemptedCatalog.
:type: MoVersionContext
"""
self._version_context = version_context
@property
def comments(self):
"""
Gets the comments of this HclExemptedCatalog.
Reason for the exemption.
:return: The comments of this HclExemptedCatalog.
:rtype: str
"""
return self._comments
@comments.setter
def comments(self, comments):
"""
Sets the comments of this HclExemptedCatalog.
Reason for the exemption.
:param comments: The comments of this HclExemptedCatalog.
:type: str
"""
self._comments = comments
@property
def name(self):
"""
Gets the name of this HclExemptedCatalog.
A unique descriptive name of the exemption.
:return: The name of this HclExemptedCatalog.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this HclExemptedCatalog.
A unique descriptive name of the exemption.
:param name: The name of this HclExemptedCatalog.
:type: str
"""
self._name = name
@property
def os_vendor(self):
"""
Gets the os_vendor of this HclExemptedCatalog.
Vendor of the Operating System.
:return: The os_vendor of this HclExemptedCatalog.
:rtype: str
"""
return self._os_vendor
@os_vendor.setter
def os_vendor(self, os_vendor):
"""
Sets the os_vendor of this HclExemptedCatalog.
Vendor of the Operating System.
:param os_vendor: The os_vendor of this HclExemptedCatalog.
:type: str
"""
self._os_vendor = os_vendor
@property
def os_version(self):
"""
Gets the os_version of this HclExemptedCatalog.
Version of the Operating system.
:return: The os_version of this HclExemptedCatalog.
:rtype: str
"""
return self._os_version
@os_version.setter
def os_version(self, os_version):
"""
Sets the os_version of this HclExemptedCatalog.
Version of the Operating system.
:param os_version: The os_version of this HclExemptedCatalog.
:type: str
"""
self._os_version = os_version
@property
def processor_name(self):
"""
Gets the processor_name of this HclExemptedCatalog.
Name of the processor supported for the server.
:return: The processor_name of this HclExemptedCatalog.
:rtype: str
"""
return self._processor_name
@processor_name.setter
def processor_name(self, processor_name):
"""
Sets the processor_name of this HclExemptedCatalog.
Name of the processor supported for the server.
:param processor_name: The processor_name of this HclExemptedCatalog.
:type: str
"""
self._processor_name = processor_name
@property
def product_models(self):
"""
Gets the product_models of this HclExemptedCatalog.
Models of the product/adapter.
:return: The product_models of this HclExemptedCatalog.
:rtype: list[str]
"""
return self._product_models
@product_models.setter
def product_models(self, product_models):
"""
Sets the product_models of this HclExemptedCatalog.
Models of the product/adapter.
:param product_models: The product_models of this HclExemptedCatalog.
:type: list[str]
"""
self._product_models = product_models
@property
def product_type(self):
"""
Gets the product_type of this HclExemptedCatalog.
Type of the product/adapter say PT for Pass Through controllers.
:return: The product_type of this HclExemptedCatalog.
:rtype: str
"""
return self._product_type
@product_type.setter
def product_type(self, product_type):
"""
Sets the product_type of this HclExemptedCatalog.
Type of the product/adapter say PT for Pass Through controllers.
:param product_type: The product_type of this HclExemptedCatalog.
:type: str
"""
self._product_type = product_type
@property
def server_pid(self):
"""
Gets the server_pid of this HclExemptedCatalog.
Three part ID representing the server model as returned by UCSM/CIMC XML APIs.
:return: The server_pid of this HclExemptedCatalog.
:rtype: str
"""
return self._server_pid
@server_pid.setter
def server_pid(self, server_pid):
"""
Sets the server_pid of this HclExemptedCatalog.
Three part ID representing the server model as returned by UCSM/CIMC XML APIs.
:param server_pid: The server_pid of this HclExemptedCatalog.
:type: str
"""
self._server_pid = server_pid
@property
def ucs_version(self):
"""
Gets the ucs_version of this HclExemptedCatalog.
Version of the UCS software.
:return: The ucs_version of this HclExemptedCatalog.
:rtype: str
"""
return self._ucs_version
@ucs_version.setter
def ucs_version(self, ucs_version):
"""
Sets the ucs_version of this HclExemptedCatalog.
Version of the UCS software.
:param ucs_version: The ucs_version of this HclExemptedCatalog.
:type: str
"""
self._ucs_version = ucs_version
@property
def version_type(self):
"""
Gets the version_type of this HclExemptedCatalog.
Type of the UCS version indicating whether it is a UCSM release vesion or a IMC release.
:return: The version_type of this HclExemptedCatalog.
:rtype: str
"""
return self._version_type
@version_type.setter
def version_type(self, version_type):
"""
Sets the version_type of this HclExemptedCatalog.
Type of the UCS version indicating whether it is a UCSM release vesion or a IMC release.
:param version_type: The version_type of this HclExemptedCatalog.
:type: str
"""
self._version_type = version_type
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, HclExemptedCatalog):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| [
"[email protected]"
]
| |
96b772958a9c0a774904dcf77ee5a9f9143e17c7 | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-4/2cb4a725b4cb9be160d194f7b47df6c98709ebfd-<create_connection_team_slave>-fix.py | d3c209e5c778414dddc980ca9daa3ffc050223ca | []
| no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 532 | py | def create_connection_team_slave(self):
cmd = [self.nmcli_bin, 'connection', 'add', 'type', self.type, 'con-name']
if (self.conn_name is not None):
cmd.append(self.conn_name)
elif (self.ifname is not None):
cmd.append(self.ifname)
cmd.append('ifname')
if (self.ifname is not None):
cmd.append(self.ifname)
elif (self.conn_name is not None):
cmd.append(self.conn_name)
cmd.append('master')
if (self.conn_name is not None):
cmd.append(self.master)
return cmd | [
"[email protected]"
]
| |
e1f6740a875c434bf2e70839f5493f69bb4e96d7 | 64b6364b2cea4e49cc1768e159ceb3fb438fc096 | /src/metric_runner.py | dc4d64f00f77ed24aac17d9f471364a1a419b32d | []
| no_license | nkartashov/4genome_tester | 902828f2a4373df9888788d4cb98398700259e7b | 547446b9f38ee69177d8a12bb171c1d2ae993cad | refs/heads/master | 2016-09-06T01:08:24.565208 | 2015-06-04T22:55:17 | 2015-06-04T22:55:17 | 34,047,710 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,754 | py | __author__ = 'nikita_kartashov'
from src.graph.statistics import get_distribution_metric, \
get_simple_paths_metric, \
get_bp_distance_metric, \
get_dcj_distance_metric, \
get_ca_metric, \
get_mca_metric, \
get_cumulative_metric_batch
from .metrics.metrics import Metrics
ANNOTATED_SINGLE_METRICS = (
# (get_distribution_metric, 'D'), # Distribution
# (get_simple_paths_metric, 'SP'), # Simple Paths
# (get_bp_distance_metric, 'S_BP'),
# (get_dcj_distance_metric, 'S_DCJ'),
(get_ca_metric, 'CA'),
(get_mca_metric, 'MCA'),
)
ANNOTATED_BATCH_METRICS = ((get_cumulative_metric_batch, 'MCA+'),)
METRICS = Metrics(ANNOTATED_SINGLE_METRICS, ANNOTATED_BATCH_METRICS)
A, B, C, D = 'A', 'B', 'C', 'D'
TOPOLOGIES = [((A, B), (C, D)),
((A, C), (B, D)),
((A, D), (C, B))]
# If we have m methods and n trees then function returns score matrix of m lines and n columns
# def run_metrics(breakpoint_graph):
# return (((metric(breakpoint_graph, topology), topology) for topology in TOPOLOGIES) for metric in METRICS)
def compare_metric_results(breakpoint_graph, right_tree):
metric_results = METRICS.run_metrics(breakpoint_graph, TOPOLOGIES)
def decide_if_right(scored_trees):
scored_trees = list(scored_trees)
min_score = min(scored_trees)[0]
trees_with_min_score = list(tree for score, tree in scored_trees if score == min_score)
return int(len(trees_with_min_score) == 1 and trees_with_min_score[0] == right_tree)
return (decide_if_right(score_tuple) for score_tuple in metric_results) | [
"[email protected]"
]
| |
de8de4a17ab7c78b43d4dc1dd862aaa4d5ba5ef9 | f8f2536fa873afa43dafe0217faa9134e57c8a1e | /aliyun-python-sdk-openanalytics-open/aliyunsdkopenanalytics_open/request/v20180619/DestroyVirtualClusterRequest.py | f34fbf62e2477c455d21adcac88c4659473afa70 | [
"Apache-2.0"
]
| permissive | Sunnywillow/aliyun-openapi-python-sdk | 40b1b17ca39467e9f8405cb2ca08a85b9befd533 | 6855864a1d46f818d73f5870da0efec2b820baf5 | refs/heads/master | 2022-12-04T02:22:27.550198 | 2020-08-20T04:11:34 | 2020-08-20T04:11:34 | 288,944,896 | 1 | 0 | NOASSERTION | 2020-08-20T08:04:01 | 2020-08-20T08:04:01 | null | UTF-8 | Python | false | false | 1,456 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkopenanalytics_open.endpoint import endpoint_data
class DestroyVirtualClusterRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'openanalytics-open', '2018-06-19', 'DestroyVirtualCluster','openanalytics')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_Name(self):
return self.get_body_params().get('Name')
def set_Name(self,Name):
self.add_body_params('Name', Name) | [
"[email protected]"
]
| |
36a11457b2ad103a18565c44f60b426d4dc20b3e | 99d436394e47571160340c95d527ecadaae83541 | /algorithms_questions/ch18_graph_theory/q45_1.py | 0053a3fd5f07e6c424f2a633246622ae14a46a7f | []
| no_license | LeeSeok-Jun/Algorithms | b47ba4de5580302e9e2399bcf85d245ebeb1b93d | 0e8573bd03c50df3f89dd0ee9eed9cf8716ef8d8 | refs/heads/main | 2023-03-02T06:47:20.939235 | 2021-02-08T05:18:24 | 2021-02-08T05:18:24 | 299,840,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,090 | py | """
최종 순위 - 2회차
"""
# 풀이 제한 시간 : 60분
# 2020/12/31 11:10 ~ 11:31
# 실패 - 자료의 사용(data[i])에 실수, 큐에 처음 초기화를 안함
from collections import deque
"""
# 위상 정렬 알고리즘에서는 사용할 필요가 없다.
def find_parent(parent, x):
if parent[x] != x:
parent[x] = find_parent(parent, parent[x])
return parent[x]
def union_parent(parent, a, b):
a = find_parent(parent, a)
b = find_parent(parent, b)
if a < b:
parent[b] = a
else:
parent[a] = b
"""
for tc in range(int(input())):
n = int(input())
parent = [0] * (n + 1)
for i in range(1, n+1):
parent[i] = i
indegree = [0] * (n+1)
data = list(map(int, input().split()))
graph = [[] for _ in range(n+1)]
# data[i]와 data[j]를 사용해야함!
for i in range(n):
for j in range(i+1, n):
graph[data[j]].append(data[i])
indegree[data[i]] += 1
m = int(input())
for _ in range(m):
a, b = map(int, input().split())
if b not in graph[a]:
graph[b].remove(a)
indegree[a] -= 1
graph[a].append(b)
indegree[b] += 1
else:
graph[a].remove(b)
indegree[b] -= 1
graph[b].append(a)
indegree[a] += 1
cycle = False
certain = True
q = deque()
result = []
# 맨 처음 queue에 원소를 집어 넣는 것을 뺌
for i in range(1, n+1):
if indegree[i] == 0:
q.append(i)
for _ in range(n):
if len(q) == 0:
cycle = True
break
if len(q) >= 2:
certain = False
break
now = q.popleft()
result.append(now)
for i in graph[now]:
indegree[i] -= 1
if indegree[i] == 0:
q.append(i)
if cycle:
print("IMPOSSIBLE")
elif not certain:
print("?")
else:
for i in reversed(result):
print(i, end=" ")
print()
| [
"[email protected]"
]
| |
96cdda7deaa7720cd3559f3d0b3e5accb90e9308 | 6c597d56ab500f8d0788b803fdfb9ab4dbb37a90 | /openregistry/assets/claimrights/tests/transferring.py | 29487148a1b9b1a3825f6e85e4ebbe8f092f72a2 | [
"Apache-2.0"
]
| permissive | openprocurement/openregistry.assets.claimrights | 1671e55313aa69b073d1662a0fe16a8bd604f4fd | 8f8d59760da3b647730da9d56e656a6ef4d12302 | refs/heads/master | 2021-05-14T23:59:00.664485 | 2019-03-27T15:33:44 | 2019-03-27T15:33:44 | 104,233,542 | 0 | 10 | Apache-2.0 | 2019-02-06T11:28:28 | 2017-09-20T15:27:44 | Python | UTF-8 | Python | false | false | 549 | py | # -*- coding: utf-8 -*-
import unittest
from openregistry.assets.claimrights.tests.base import AssetTransferWebTest
from openregistry.assets.core.tests.plugins.transferring.mixins import AssetOwnershipChangeTestCaseMixin
class AssetOwnershipChangeTest(AssetTransferWebTest,
AssetOwnershipChangeTestCaseMixin):
pass
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(AssetOwnershipChangeTest))
return suite
if __name__ == "__main__":
unittest.main(defaultTest="suite")
| [
"[email protected]"
]
| |
0d4374fc859560faca1bf38f60793e519cb4ea39 | e9173667eec2576782863a51ee63672f9b419297 | /k2.py | b0c3e665770160fd0f0a9f0e424c8e55fafe7c96 | []
| no_license | sabareesh123/pythonprogamming | d41c23ddae183ded09eafde445273126c6b56fcf | 004f248aa2e25f2855d6ccafbb9244447bfb5873 | refs/heads/master | 2020-05-30T06:28:54.901030 | 2019-08-06T11:50:10 | 2019-08-06T11:50:10 | 189,580,451 | 0 | 3 | null | null | null | null | UTF-8 | Python | false | false | 66 | py | e=int(input(""))
if(e%2==0):
print("Even")
else:
print("Odd")
| [
"[email protected]"
]
| |
438a534b66b835b18edc0a542fc5499bae377670 | ca23b411c8a046e98f64b81f6cba9e47783d2584 | /cache_replacement/policy_learning/cache/main.py | 5cbddf2a4c41057f1d91d6f6838f52f0665a237d | [
"CC-BY-4.0",
"Apache-2.0"
]
| permissive | pdybczak/google-research | 1fb370a6aa4820a42a5d417a1915687a00613f9c | 0714e9a5a3934d922c0b9dd017943a8e511eb5bc | refs/heads/master | 2023-03-05T23:16:11.246574 | 2021-01-04T11:30:28 | 2021-01-04T11:30:28 | 326,629,357 | 1 | 0 | Apache-2.0 | 2021-02-01T12:39:09 | 2021-01-04T09:17:36 | Jupyter Notebook | UTF-8 | Python | false | false | 5,923 | py | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
# pylint: disable=line-too-long
r"""Runs cache simulation.
Example usage:
python3 -m cache_replacement.policy_learning.cache.main \
--experiment_base_dir=/tmp \
--experiment_name=sample_belady_llc \
--cache_configs=cache_replacement/policy_learning/cache/configs/default.json \
--cache_configs=cache_replacement/policy_learning/cache/configs/eviction_policy/belady.json \
--memtrace_file=cache_replacement/policy_learning/cache/traces/sample_trace.csv
Simulates a cache configured by the cache configs with Belady's as the
replacement policy on the sample trace.
"""
# pylint: enable=line-too-long
import os
from absl import app
from absl import flags
from absl import logging
import tensorflow.compat.v1 as tf
import tqdm
from cache_replacement.policy_learning.cache import cache as cache_mod
from cache_replacement.policy_learning.cache import evict_trace as evict
from cache_replacement.policy_learning.cache import memtrace
from cache_replacement.policy_learning.common import config as cfg
from cache_replacement.policy_learning.common import utils
FLAGS = flags.FLAGS
flags.DEFINE_multi_string(
"cache_configs",
[
"cache_replacement/policy_learning/cache/configs/default.json", # pylint: disable=line-too-long
"cache_replacement/policy_learning/cache/configs/eviction_policy/lru.json" # pylint: disable=line-too-long
],
"List of config paths merged front to back for the cache.")
flags.DEFINE_multi_string(
"config_bindings", [],
("override config with key=value pairs "
"(e.g., eviction_policy.policy_type=greedy)"))
flags.DEFINE_string(
"experiment_base_dir", "/tmp/experiments",
"Base directory to store all experiments in. Should not frequently change.")
flags.DEFINE_string(
"experiment_name", "unnamed",
"All data related to this experiment is written to"
" experiment_base_dir/experiment_name.")
flags.DEFINE_string(
"memtrace_file",
"cache_replacement/policy_learning/cache/traces/omnetpp_train.csv",
"Memory trace file path to use.")
flags.DEFINE_integer(
"tb_freq", 10000, "Number of cache reads between tensorboard logs.")
flags.DEFINE_integer(
"warmup_period", int(2e3), "Number of cache reads before recording stats.")
flags.DEFINE_bool(
"force_overwrite", False,
("If true, overwrites directory at "
" experiment_base_dir/experiment_name if it exists."))
def log_scalar(tb_writer, key, value, step):
summary = tf.Summary(value=[tf.Summary.Value(tag=key, simple_value=value)])
tb_writer.add_summary(summary, step)
def main(_):
# Set up experiment directory
exp_dir = os.path.join(FLAGS.experiment_base_dir, FLAGS.experiment_name)
utils.create_experiment_directory(exp_dir, FLAGS.force_overwrite)
tensorboard_dir = os.path.join(exp_dir, "tensorboard")
tf.disable_eager_execution()
tb_writer = tf.summary.FileWriter(tensorboard_dir)
miss_trace_path = os.path.join(exp_dir, "misses.csv")
evict_trace_path = os.path.join(exp_dir, "evictions.txt")
cache_config = cfg.Config.from_files_and_bindings(
FLAGS.cache_configs, FLAGS.config_bindings)
with open(os.path.join(exp_dir, "cache_config.json"), "w") as f:
cache_config.to_file(f)
flags_config = cfg.Config({
"memtrace_file": FLAGS.memtrace_file,
"tb_freq": FLAGS.tb_freq,
"warmup_period": FLAGS.warmup_period,
})
with open(os.path.join(exp_dir, "flags.json"), "w") as f:
flags_config.to_file(f)
logging.info("Config: %s", str(cache_config))
logging.info("Flags: %s", str(flags_config))
cache_line_size = cache_config.get("cache_line_size")
with memtrace.MemoryTrace(
FLAGS.memtrace_file, cache_line_size=cache_line_size) as trace:
with memtrace.MemoryTraceWriter(miss_trace_path) as write_trace:
with evict.EvictionTrace(evict_trace_path, False) as evict_trace:
def write_to_eviction_trace(cache_access, eviction_decision):
evict_trace.write(
evict.EvictionEntry(cache_access, eviction_decision))
cache = cache_mod.Cache.from_config(cache_config, trace=trace)
# Warm up cache
for _ in tqdm.tqdm(range(FLAGS.warmup_period), desc="Warming up cache"):
pc, address = trace.next()
hit = cache.read(pc, address, [write_to_eviction_trace])
if not hit:
write_trace.write(pc, address)
if trace.done():
raise ValueError()
# Discard warm-up cache statistics
cache.hit_rate_statistic.reset()
num_reads = 0
with tqdm.tqdm(desc="Simulating cache on MemoryTrace") as pbar:
while not trace.done():
pc, address = trace.next()
hit = cache.read(pc, address, [write_to_eviction_trace])
if not hit:
write_trace.write(pc, address)
num_reads += 1
if num_reads % FLAGS.tb_freq == 0:
log_scalar(tb_writer, "cache_hit_rate",
cache.hit_rate_statistic.success_rate(), num_reads)
pbar.update(1)
log_scalar(tb_writer, "cache_hit_rate",
cache.hit_rate_statistic.success_rate(), num_reads)
# Force flush, otherwise last writes will be lost.
tb_writer.flush()
if __name__ == "__main__":
app.run(main)
| [
"[email protected]"
]
| |
deb56472c890832c3e7ee3dae8b4a62f9590c3d3 | 74863206d868c63d73ed927c5d4559fe4e2320fd | /week 5/wk 5 q 2.py | 4e92daa6b065054e24c2e2d95ebeb2cbd758f5ac | []
| no_license | Shubhanshu-Nishad/210-Coursework-Amanjit-S-Phull | e58a622b9b0bd2da3259f318944d1164c9f3fd93 | 01ed9eb426d3af180cb486503ab8bfcdf6694e90 | refs/heads/master | 2022-12-18T06:08:58.172949 | 2020-10-01T14:27:44 | 2020-10-01T14:27:44 | 300,308,089 | 1 | 0 | null | 2020-10-01T14:26:13 | 2020-10-01T14:26:12 | null | UTF-8 | Python | false | false | 1,155 | py | class Node(object):
def __init__(self, value):
self.value=value
self.next=None
self.prev=None
class List(object):
def __init__(self):
self.head=None
self.tail=None
def insert(self,n,x):
if n!=None:
x.next=n.next
n.next=x
x.prev=n
if x.next!=None:
x.next.prev=x
if self.head==None:
self.head=self.tail=x
x.prev=x.next=None
elif self.tail==n:
self.tail=x
def delete(self,n): #Remove pointers to an element
if n.prev != None:
n.prev.next = n.next
else:
self.head = n.next
if n.next != None:
n.next.prev = n.prev
else:
self.tail = n.prev
def display(self):
values=[]
n=self.head
while n!=None:
values.append(str(n.value))
n=n.next
print ("List: ",",".join(values))
if __name__ == '__main__':
l=List()
l.insert(None, Node(4))
l.insert(l.head,Node(6))
l.insert(l.head,Node(8))
l.delete(l.tail)
l.display()
| [
"[email protected]"
]
| |
1c7ed19f2aaacdb47b9e5eefd21dd227de5cb2ed | d024ccbb4cc04af3866a4db1ac1d8c1d7395d909 | /boj/3040.py | 7340db9c6af2e68f61e4fb313c8b4a7a0a8b412e | []
| no_license | demetoir/ps-solved-code | ff0418dddd10f3b053c9b8d32af48027b10c8481 | f4d4fd2183176b083f2287c9d89c6d5a1e983cc5 | refs/heads/master | 2022-10-14T20:11:34.581439 | 2020-06-12T11:24:11 | 2020-06-12T11:24:11 | 68,782,768 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 169 | py | import itertools
l=[]
for i in range(9):l+=[input()]
for s in itertools.combinations(range(9),7):
if sum(l[i] for i in s)==100:
print "\n".join(str(l[i]) for i in s) | [
"[email protected]"
]
| |
b15b87aebf2cf07b8e065a527f31b2b55377fa13 | d7ee76b7f1d6cd038982335792f15959a58a8395 | /SWEA/4615. 재미있는 오셀로 게임.py | e557fb8ef44326abc668927b3051576baa6bd26d | []
| no_license | min1378/-algorithm | 1c5dea6b2f03e4d376275cfccbf11b240bc659d9 | bfb720277160077a816deec21469a7e597c62d14 | refs/heads/master | 2021-08-02T06:54:10.478501 | 2021-07-31T14:03:01 | 2021-07-31T14:03:01 | 202,688,865 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,638 | py | #import sys
from pprint import pprint
#sys.stdin = open('4615.txt', 'r')
# 벽체크 함수
def isWall(x, y):
if x > N or x < 1 :
return True
if y > N or y < 1 :
return True
return False
# 색을 바꿔야할 돌의 위치 체크.
def enermy_check(x, y, mode, color):
check_enermy = []
dx = [0, 1, 1, 1, 0, -1, -1, -1]
dy = [-1, -1, 0, 1, 1, 1, 0, -1]
while True:
# 전달받은 mode로 한 발자국 나아간다.
test_x = x+dx[mode]
test_y = y+dy[mode]
# 벽이라면 그 전까지 체크한 위치는 무시하고 빈 리스트 []를 반환
if isWall(test_x, test_y) == True:
return []
# 같은 색을 만났다면 그동안 체크한 좌표의 리스트를 반환
if data[test_y-1][test_x-1] == color:
return check_enermy
# 0을 만났다면 비어 있는 공간이므로 빈리스트 [] 반환
if data[test_y-1][test_x-1] == 0:
return []
# 나머지 조건들은 좌표를 체크하여 check_enermy에 저장한다.
else :
check_enermy.append([test_x, test_y])
# 좌표를 체크하였다면 갱신시킨다.
x = test_x
y = test_y
# 검사하는 함수
def inspect(x, y, color):
# 8방향 모드의 반복문을 실행한다.
for mode in range(8):
# enermy_check의 리턴 값을 받아온다.
result = enermy_check(x, y, mode, color)
# 만약 빈리스트가 아니라면
if result != []:
# result에서 좌표를 꺼내 data에 색칠한다.
for check_x, check_y in result:
data[check_y-1][check_x-1] = color
TC=int(input())
for test_case in range(1, TC+1):
N, M = map(int, input().split())
data = [[0]*N for _ in range(N)]
# 흑은 1 백은 2
data[N // 2 - 1][N // 2 - 1] = 2
data[N // 2 - 1][N // 2] = 1
data[N // 2][N // 2 - 1] = 1
data[N // 2][N // 2] = 2
check = [list(map(int, input().split())) for _ in range(M)]
while True:
if check == []:
break
#check에서 앞에서 하나씩 꺼내서 돌을 놓는다.
x, y, color = check.pop(0)
data[y-1][x-1] = color
# 돌을 놓았을 때 어떻게 변화할 지 확인한다.
inspect(x, y, color)
# 반복문이 끝나면 모든 돌을 놓았다는 것이므로 흑돌과 백돌의 개수를 체크한다.
black = 0
white = 0
for line in data:
black += line.count(1)
white += line.count(2)
print("#{} {} {}".format(test_case, black, white)) | [
"[email protected]"
]
| |
812b33798a282b1ce8b7d31e14999b7e5d629e07 | 9255068b7b45348a084555b8c413fd55a4b12013 | /odfdo/link.py | 43d15ef07e9f2067b7636723ff4a05076ec64545 | [
"Apache-2.0"
]
| permissive | mat-m/odfdo | fdf9752f0273760deb59403f23dbc20eac3de753 | a4a509a056517ecf91449e029b36fe9a8ffa8ed0 | refs/heads/master | 2020-03-18T05:04:16.263647 | 2018-05-21T21:46:13 | 2018-05-21T21:54:11 | 134,322,638 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,931 | py | # Copyright 2018 Jérôme Dumonteil
# Copyright (c) 2009-2013 Ars Aperta, Itaapy, Pierlis, Talend.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Authors (odfdo project): [email protected]
# The odfdo project is a derivative work of the lpod-python project:
# https://github.com/lpod/lpod-python
# Authors: Hervé Cauwelier <[email protected]>
# Jerome Dumonteil <[email protected]>
from .element import Element, register_element_class
from .paragraph_base import ParagraphBase
class Link(ParagraphBase):
"""Link class, <text:a> odf_element.
"""
_tag = 'text:a'
_properties = (('url', 'xlink:href'), ('name',
'office:name'), ('title',
'office:title'),
('target_frame',
'office:target-frame-name'), ('show', 'xlink:show'),
('visited_style',
'text:visited-style-name'), ('style', 'text:style-name'))
def __init__(self,
url='',
name=None,
title=None,
text=None,
target_frame=None,
style=None,
visited_style=None,
**kwargs):
"""
Arguments:
url -- str
name -- str
title -- str
text -- str
target_frame -- '_self', '_blank', '_parent', '_top'
style -- string
visited_style -- string
return: Link
"""
super().__init__(**kwargs)
if self._do_init:
self.url = url
if name is not None:
self.name = name
if title is not None:
self.title = title
if text is not None:
self.text = text
if target_frame is not None:
self.target_frame = target_frame
# show can be: 'new' or 'replace'"
if target_frame == '_blank':
self.show = 'new'
else:
self.show = 'replace'
if style is not None:
self.style = style
if visited_style is not None:
self.visited_style = visited_style
Link._define_attribut_property()
register_element_class(Link)
| [
"[email protected]"
]
| |
092653579244e4f4c095d89145e7b1090c29b97a | 8ecd899a8558ad0a644ecefa28faf93e0710f6fb | /other_practices/JOI2009_ho2.py | bf6da9af81b0852342546b9a6414ba07ece8d743 | []
| no_license | yut-inoue/AtCoder_ABC | b93885547049788d452e86b442a4a9f5ee191b0e | 3d2c4b2b2f8871c75f86040ad07ccd7736ad3dbe | refs/heads/master | 2021-07-03T09:09:20.478613 | 2021-02-21T13:20:31 | 2021-02-21T13:20:31 | 227,140,718 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 298 | py | import bisect
d = int(input())
n = int(input())
m = int(input())
dl = [int(input()) for _ in range(n-1)]
ml = [int(input()) for _ in range(m)]
dl.append(0)
dl.append(d)
dl.sort()
dis = 0
for m in ml:
ind = bisect.bisect_left(dl, m)
dis += min(abs(dl[ind]-m), abs(dl[ind-1]-m))
print(dis)
| [
"[email protected]"
]
| |
fdcfdfd429431291ef3a98faf19e4dc7d4ffcdb2 | 841c0df958129bef4ec456630203992a143c7dc7 | /src/1/1297.py | 8c9a783bb90ccd8c2f495c94b1b79838d0b82fc5 | [
"MIT"
]
| permissive | xCrypt0r/Baekjoon | da404d3e2385c3278a1acd33ae175c2c1eb82e5e | 7d858d557dbbde6603fe4e8af2891c2b0e1940c0 | refs/heads/master | 2022-12-25T18:36:35.344896 | 2021-11-22T20:01:41 | 2021-11-22T20:01:41 | 287,291,199 | 16 | 25 | MIT | 2022-12-13T05:03:49 | 2020-08-13T13:42:32 | C++ | UTF-8 | Python | false | false | 385 | py | """
1297. TV 크기
작성자: xCrypt0r
언어: Python 3
사용 메모리: 29,380 KB
소요 시간: 72 ms
해결 날짜: 2020년 9월 20일
"""
def main():
d, h, w = map(int, input().split())
hk = pow(d ** 2 * h ** 2 // (h ** 2 + w ** 2), 0.5)
wk = pow(d ** 2 * w ** 2 // (h ** 2 + w ** 2), 0.5)
print(f'{int(hk)} {int(wk)}')
if __name__ == '__main__':
main()
| [
"[email protected]"
]
| |
9ef04a08bc10dea64e0d9e928d37a877bfa39cc1 | 603ed82854e5b67af76d9bbdf4d2183419c6167c | /pages/views.py | 05b88e646d6daa37ff170c9d948d5fc2c442c219 | []
| no_license | IbrahimAlAzhar/Basic-CRUD | 26a209433fefb3da38d742602e54abeff83daa8d | 2e9d68537270fc72b44757b39eea845d78602902 | refs/heads/master | 2022-12-14T11:34:20.784724 | 2020-09-05T21:18:51 | 2020-09-05T21:18:51 | 293,155,575 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,068 | py | from django.shortcuts import render
# Create your views here.
from django.shortcuts import render
from django.http import HttpResponse
def home_view(request,*args, **kwargs):
print(args, kwargs)
print(request)
print(request.user)
# return HttpResponse("<h1>Hello world</h1>")
return render(request,"products/home.html", {})
def contact_view(request,*args, **kwargs):
# return HttpResponse("<h1>Contact page</h1>")
return render(request, "products/contact.html", {})
def about_view(request,*args, **kwargs):
print(request.user)
# return HttpResponse("<h1>Hello from the other side</h1>")
my_context = {
"my_text": "this is about us",
"my_name": "ibrahim al azhar",
"my_number": 123,
"my_list": [12,23,23,44,"abc","azhar"],
"my_html": "<h1>This one is html tag</h1>"
}
return render(request, "products/about.html", my_context)
def social_view(request,*args, **kwargs):
# return HttpResponse("<h1>Social page</h1>")
return render(request, "products/social.html", {}) | [
"[email protected]"
]
| |
ea7db6646783c4f5b7190aa6fb3fa228a8266c5b | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03061/s160632113.py | 8ebe292df9e3f2e8d6f104cfca93a5b226a41bb0 | []
| no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 402 | py | from fractions import gcd
N = int(input())
A = list(map(int, input().split()))
L = [-1] * (N-1)
L[0] = A[0]
R = [-1] * (N-1)
R[0] = A[-1]
for i in range(1, N-1):
L[i] = gcd(L[i-1], A[i])
for i in range(1, N-1):
R[i] = gcd(R[i-1], A[-i-1])
ans = 0
for i in range(1, N-1):
tmp = gcd(L[i-1], R[N-i-2])
ans = max(ans, tmp)
ans = max(ans, L[N-2])
ans = max(ans, R[N-2])
print(ans)
| [
"[email protected]"
]
| |
92a05238afd3189143bdf1d508e8b2205b46dabe | 917c0949dd410439e7f882e20a3fb744b7b4bd6e | /Pandas/obesity.py | bf7e8dceb6a5561f1b97151d830ba938469e350c | [
"MIT"
]
| permissive | daveaseeman/PyEng | 229d01df85c2959b4333d5bd19ba15029b11ee38 | 31403a7f0e557456eeaad865295213cf27847bf9 | refs/heads/master | 2020-12-28T19:11:49.210811 | 2017-05-15T23:13:42 | 2017-05-15T23:13:42 | 43,885,548 | 0 | 0 | null | 2015-10-08T12:03:50 | 2015-10-08T12:03:49 | null | UTF-8 | Python | false | false | 1,221 | py | import pandas as pd
import matplotlib.pyplot as plt
data = pd.ExcelFile("Obes-phys-acti-diet-eng-2014-tab.xls")
print data.sheet_names
# Read section 7.1 from the Excel file
# Define the columns to be read
columns1 = ['year', 'total', 'males', 'females']
data_gender = data.parse(u'7.1', skiprows=4, skipfooter=14, names=columns1)
#print data_gender
# Remove the N/A from the data
data_gender.dropna(inplace = True)
#print data_gender
data_gender.set_index('year', inplace=True)
# Plot all
data_gender.plot()
plt.show()
# Read 2nd section, by age
data_age = data.parse(u'7.2', skiprows=4, skipfooter=14)
print data_age
# Rename unames to year
data_age.rename(columns={u'Unnamed: 0': u'Year'}, inplace=True)
# Drop empties and reset index
data_age.dropna(inplace=True)
data_age.set_index('Year', inplace=True)
#plot
data_age.plot()
plt.show()
# Plotting everything cause total to override everything. So drop it.
# Drop the total column and plot
data_age_minus_total = data_age.drop('Total', axis = 1)
data_age_minus_total.plot()
plt.show()
plt.close()
#Plot children vs adults
data_age['Under 16'].plot(label = "Under 16")
data_age['25-34'].plot(label = "25-34")
plt.legend(loc="upper right")
plt.show()
| [
"[email protected]"
]
| |
976eab4c20ccc6d97267a0e261e856efb42bac17 | 9a393d5dae8147088b1c9b78987197c60a6618cf | /0828/모의2.py | 5afbfdf130cc6d42c69e1a772ee5ab0f6d43cf74 | []
| no_license | bumbum9944/bumpycharm | 5444440379f6d5142130bc8a7a4b69276f23f991 | b487eb433d41ff0d2f6e1ca4f723225b114b96c0 | refs/heads/master | 2020-07-05T16:04:35.153231 | 2019-10-02T00:14:00 | 2019-10-02T00:14:00 | 202,693,662 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 785 | py | def shuffle(cards, card_u, card_d, N):
X = list(range(N))
global cnt
if cards != card_u and cards != card_d:
if cnt > 5:
return -1
else:
cnt += 1
for x in X:
if x > N // 2:
x = x - N // 2
for change in range(N // 2 - 1 - x, N // 2 + x, 2):
cards[change], cards[change + 1] = cards[change + 1], cards[change]
return shuffle(cards, card_u, card_d, N)
else:
return cnt
T = int(input())
for tc in range(1, T+1):
cnt = 0
N = int(input())
cards = list(map(int, input().split()))
card_u = sorted(cards)
card_d = card_u[::-1]
ans = shuffle(cards, card_u, card_d, N)
print('#{} {}'.format(tc, ans))
| [
"[email protected]"
]
| |
c570ee9849bd9f6570218d86553e22d114fc0308 | 0130c8b14927097663157846adc4b146d67d2fda | /tests/common/test_op/conv_ad_v2.py | 8938d4b68efd6d2d80bc3ba6342cb407603a7729 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense",
"BSD-3-Clause",
"NCSA",
"LLVM-exception",
"Zlib",
"BSD-2-Clause",
"MIT"
]
| permissive | Shigangli/akg | e8be3e0ee1eafe3e42b4cc4d424c28f08ef4c0bc | 3766c54e0b109541932d147a6b5643a334b82403 | refs/heads/master | 2023-09-06T05:13:40.571583 | 2021-11-23T03:44:54 | 2021-11-23T03:44:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 27,379 | py | # Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""operator dsl function: conv_ad_v2"""
import akg.topi
import akg.tvm
import akg
import akg.lang.cce
from akg import dim
from akg.ops.nn import conv as conv_origin
from akg.tvm import truncdiv, truncmod, floordiv
from akg.utils import kernel_exec as utils
def set_dims(fmap_shape, filter_shape, pad_, stride_, dilation_, tile_hh,
tile_coco, tile_mm, tile_kk, tile_nn, block_size):
"""set dim info in attrs."""
in_n, in_c, in_h, in_w = fmap_shape
in_c = (in_c + block_size - 1) // block_size * block_size
in_c1 = in_c // block_size
# kernel shape (NCHW -> NC1HWC0 -> Fractal)
k_n, k_c, k_h, k_w = filter_shape
k_c = (k_c + block_size - 1) // block_size * block_size
k_n = (k_n + block_size - 1) // block_size * block_size
padding = (pad_[0], pad_[0], pad_[1], pad_[1])
p_top, p_bottom, p_left, p_right = padding
s_h, s_w = (stride_[0], stride_[1])
d_h, d_w = (dilation_[0], dilation_[1])
if (tile_hh == in_h):
tile_hh += p_top + p_bottom
tile_coco = (tile_coco + block_size - 1) // block_size * block_size
tile_mm = (tile_mm + block_size - 1) // block_size * block_size
tile_kk = (tile_kk + block_size - 1) // block_size * block_size
tile_nn = (tile_nn + block_size - 1) // block_size * block_size
k_h_d = (k_h - 1) * d_h + 1
k_w_d = (k_w - 1) * d_w + 1
out_h = (in_h + p_top + p_bottom - k_h_d) // (s_h) + 1
tile_out_h = (tile_hh - k_h_d) // s_h + 1
out_w = (in_w + p_left + p_right - k_w_d) // (s_w) + 1
out_shape_nc1hwc0 = (in_n, k_n // block_size, out_h, out_w, block_size)
out_n, out_c1, out_h, out_w, out_c0 = out_shape_nc1hwc0
if (tile_coco > 0):
c1_cut = tile_coco // block_size
else:
c1_cut = out_c1
# set dim
info = dim.Dim()
if (out_n > 1):
info.setdim(index=0, axis=0, tilel1=1, tilel0=0) # n
if (out_c1 > 1):
info.setdim(index=0, axis=0, tilel1=c1_cut, tilel0=0) # c1
if (out_h > 1):
info.setdim(index=0, axis="H", tilel1=tile_out_h, tilel0=0) # h
if (out_w > 1):
info.setdim(index=0, axis=3, tilel1=out_w, tilel0=0) # w
if (out_c0 > 1):
info.setdim(index=0, axis=4, tilel1=out_c0, tilel0=0) # c0
if (in_c1 > 1):
info.setdim(index=0, axis=5, tilel1=in_c1, tilel0=0) # kc1
if (k_h > 1):
info.setdim(index=0, axis=5, tilel1=k_h, tilel0=0) # kh
if (k_w > 1):
info.setdim(index=0, axis=5, tilel1=k_w, tilel0=0) # kw
return str(info)
def expr_to_int(A):
result = []
for i in range(len(A)):
result.append(A[i].value)
return result
@akg.tvm.register_func("akg.autodiff.conv_compute_forward")
def conv_compute_forward(fmap_shape, filter_shape, pad_, stride_, dilation_, A, B, bias_value=None,
tile_hh=0, tile_coco=0, tile_mm=0, tile_kk=0, tile_nn=0, bypass_l1=False,
use_bias=False, block_size=16, conv_dtype='float16'):
if (not isinstance(fmap_shape[0], int)):
fmap_shape = expr_to_int(fmap_shape)
if (not isinstance(filter_shape[0], int)):
filter_shape = expr_to_int(filter_shape)
if (not isinstance(pad_[0], int)):
pad_ = expr_to_int(pad_)
if (not isinstance(stride_[0], int)):
stride_ = expr_to_int(stride_)
if (not isinstance(dilation_[0], int)):
dilation_ = expr_to_int(dilation_)
# input shape (NCHW -> NC1HWC0)
in_n, in_c, in_h, in_w = fmap_shape
# kernel shape (NCHW -> NC1HWC0 -> Fractal)
k_n, k_c, k_h, k_w = filter_shape
# padding((padding_h, padding_w) -> (padding_top, padding_bottom, padding_left, padding_right))
padding = (pad_[0], pad_[0], pad_[1], pad_[1])
p_top, p_bottom, p_left, p_right = padding
# stride (stride_h, stride_w)
s_h, s_w = stride_
# dilation (dilation_h, dilation_w)
d_h, d_w = dilation_
if (tile_hh == in_h):
tile_hh += p_top + p_bottom
tile_coco = (tile_coco + block_size - 1) // block_size * block_size
tile_mm = (tile_mm + block_size - 1) // block_size * block_size
tile_kk = (tile_kk + block_size - 1) // block_size * block_size
tile_nn = (tile_nn + block_size - 1) // block_size * block_size
h_window_cut = (tile_hh - k_h) // s_h + 1
input_shape_nc1hwc0 = (in_n, in_c // block_size, in_h, in_w, block_size)
in_n, _, in_h, in_w, _ = input_shape_nc1hwc0
kernel_shape_nc1hwc0 = (k_n, k_c // block_size, k_h, k_w, block_size)
k_n, k_c1, k_h, k_w, k_c0 = kernel_shape_nc1hwc0
# bias shape
bias_shape_nc1hwc0 = (1, k_n // block_size, 1, 1, block_size)
if use_bias:
bias_name = 'input2'
bias_value = akg.tvm.placeholder(bias_shape_nc1hwc0, dtype=conv_dtype, name=bias_name)
else:
bias_name = 'None'
bias_value = None
# Create reduction variables
kc1 = akg.tvm.reduce_axis((0, k_c1), name='kc1')
kh = akg.tvm.reduce_axis((0, k_h), name='kh')
kw = akg.tvm.reduce_axis((0, k_w), name='kw')
kc0 = akg.tvm.reduce_axis((0, k_c0), name='kc0')
k_h_d = (k_h - 1) * d_h + 1
k_w_d = (k_w - 1) * d_w + 1
out_h = (in_h + p_top + p_bottom - k_h_d) // (s_h) + 1
out_w = (in_w + p_left + p_right - k_w_d) // (s_w) + 1
out_shape_nc1hwc0 = (in_n, k_n // block_size, out_h, out_w, block_size)
_, out_c1, out_h, out_w, _ = out_shape_nc1hwc0
if (tile_coco > 0):
c1_cut = tile_coco // block_size
else:
c1_cut = out_c1
# set dim
info = set_dims(fmap_shape, filter_shape, pad_, stride_, dilation_,
tile_hh, tile_coco, tile_mm, tile_kk, tile_nn, block_size)
# Compute the convolution
output_name = "output0"
output_bias_name = "output1"
C = akg.tvm.compute(out_shape_nc1hwc0,
lambda n, c1, h, w, c0: akg.lang.cce.mmad(
akg.tvm.if_then_else(akg.tvm.any((h * s_h + kh) < p_top, (h * s_h + kh) > (in_h + p_top - 1),
(w * s_w + kw) < p_left, (w * s_w + kw) > (in_w + p_left - 1)),
akg.tvm.const(0.0, 'float16'),
A[n, kc1, (h * s_h + (kh * d_h) - p_top), (w * s_w + (kw * d_w) - p_left), kc0])
* B[(kc1 * k_h + kh) * k_w + kw, c1, c0, kc0],
axis=[kc1, kh, kw, kc0]), name=output_name,
attrs={
"pragma_conv_kernel_n": k_n,
"pragma_conv_kernel_h": k_h,
"pragma_conv_kernel_w": k_w,
"pragma_conv_padding_top": p_top,
"pragma_conv_padding_bottom": p_bottom,
"pragma_conv_padding_left": p_left,
"pragma_conv_padding_right": p_right,
"pragma_conv_bypass_l1": 1 if bypass_l1 else 0,
"pragma_conv_stride_h": s_h,
"pragma_conv_stride_w": s_w,
"pragma_conv_dilation_h": d_h,
"pragma_conv_dilation_w": d_w,
"pragma_conv_fm_n": in_n,
"pragma_conv_fm_c": in_c,
"pragma_conv_fm_h": in_h,
"pragma_conv_fm_w": in_w,
"pragma_conv_h_cut": (h_window_cut - 1) * s_h + k_h_d,
"pragma_conv_w_cut": (in_w + p_left + p_right),
"pragma_conv_co_cut": c1_cut * k_c0,
"pragma_conv_m_cut": tile_mm,
"pragma_conv_k_cut": tile_kk,
"pragma_conv_n_cut": tile_nn,
"feature": A.op.name,
"filter": B.op.name,
"bias": bias_name,
"res": output_name,
"res_bias": output_bias_name})
if use_bias:
cube = akg.tvm.compute(out_shape_nc1hwc0,
lambda n, c1, h, w, c0: C[n, c1, h, w, c0] + bias_value[0, c1, 0, 0, c0],
name=output_bias_name)
else:
cube = C
return cube
def conv_01(fmap_shape, filter_shape, pad_, stride_, dilation_,
tile_hh=0, tile_coco=0, tile_mm=0, tile_kk=0, tile_nn=0,
use_bias=False, block_size=16, conv_dtype='float16'):
# input shape (NCHW -> NC1HWC0)
in_n, in_c, in_h, in_w = fmap_shape
in_c = (in_c + block_size - 1) // block_size * block_size
# kernel shape (NCHW -> NC1HWC0 -> Fractal)
k_n, k_c, k_h, k_w = filter_shape
k_c = (k_c + block_size - 1) // block_size * block_size
k_n = (k_n + block_size - 1) // block_size * block_size
input_shape_nc1hwc0 = (in_n, in_c // block_size, in_h, in_w, block_size)
kernel_shape_nc1hwc0 = (k_n, k_c // block_size, k_h, k_w, block_size)
k_n, _, k_h, k_w, _ = kernel_shape_nc1hwc0
kernel_shape_fractal = (k_c // block_size * k_h * k_w, k_n // block_size, block_size, block_size)
# A placeholder (NC1HWCO)
A = akg.tvm.placeholder(input_shape_nc1hwc0, dtype=conv_dtype, name="input0")
# B_placeholder (fractal)
B = akg.tvm.placeholder(kernel_shape_fractal, dtype=conv_dtype, name="input1")
data = [A, B]
if use_bias:
bias_shape_nc1hwc0 = (1, k_n // block_size, 1, 1, block_size)
bias_name = "input2"
bias_value = akg.tvm.placeholder(bias_shape_nc1hwc0, dtype=conv_dtype, name=bias_name)
data.append(bias_value)
else:
bias_name = 'None'
bias_value = None
conv, _ = conv_origin.conv(data, fmap_shape, filter_shape, pad_, stride_, dilation_, use_bias)
kernel_name = 'conv_ad'
k_n, k_c, k_h, k_w = filter_shape
k_c = (k_c + block_size - 1) // block_size * block_size
k_n = (k_n + block_size - 1) // block_size * block_size
k_hw = k_h * k_w
const_shift = k_hw - 1
# B in Fractal format; result in Fractal format
def flip_weight(B, k_c, k_hw, const_shift):
out_shape = (B.shape[1].value * k_hw, k_c // block_size, block_size, block_size)
B_flip = akg.tvm.compute(out_shape,
lambda i0, i1, i2, i3: B[i1 * k_hw + const_shift - truncmod(i0, k_hw),
floordiv(i0, k_hw), i3, i2],
name=B.name + "_flipped")
return B_flip
def strided_head(H, s_h, s_w):
n, c1, h, w, c0 = H.shape
out_shape = (n, c1, (h - 1) * s_h + 1, (w - 1) * s_w + 1, c0)
H_strided = akg.tvm.compute(out_shape, lambda i0, i1, i2, i3, i4:
akg.tvm.expr.Select(akg.tvm.any(truncmod(i2, s_h) != 0,
truncmod(i3, s_w) != 0),
akg.tvm.const(0.0, dtype="float16"),
H[i0, i1, floordiv(i2, s_h), floordiv(i3, s_w), i4]),
name=H.name + "_strided")
return H_strided
B_flip = flip_weight(B, k_c, k_hw, const_shift)
pld_B_flip = akg.tvm.placeholder(B_flip.shape, name="inp1_flipped", dtype='float16')
HEAD = akg.tvm.placeholder(conv.shape, name="Head", dtype='float16')
HEAD_n, HEAD_c1, HEAD_h, HEAD_w, HEAD_c0 = HEAD.shape
info = set_dims((HEAD_n.value, HEAD_c1.value * HEAD_c0.value, HEAD_h.value, HEAD_w.value),
(k_c, k_n, k_h, k_w), (2, 2), (1, 1), (1, 1),
tile_hh, tile_coco, tile_mm, tile_kk, tile_nn, block_size)
s_h, s_w = stride_
if (s_h == 1) and (s_w == 1):
ad_attrs = {"ad_conv_enable": 1, "ad_conv_reuse_conv": 1}
jacs = list(akg.differentiate(conv, [A], HEAD, ad_attrs, [HEAD, pld_B_flip, None]))
sjac = akg.tvm.create_schedule([jacs[0].op])
op_vars = [HEAD, pld_B_flip, jacs[0]]
info = set_dims((HEAD_n.value, HEAD_c1.value * HEAD_c0.value, HEAD_h.value, HEAD_w.value),
(k_c, k_n, k_h, k_w), (k_h - 1, k_w - 1), (1, 1), (1, 1),
tile_hh, tile_coco, tile_mm, tile_kk, tile_nn, block_size)
else:
Head_strided = strided_head(HEAD, s_h, s_w)
pld_Head_strided = akg.tvm.placeholder(Head_strided.shape, name="head_strided", dtype='float16')
ad_attrs = {"ad_conv_enable": 1, "ad_conv_reuse_conv": 1}
jacs = list(akg.differentiate(conv, [A], HEAD, ad_attrs, [pld_Head_strided, pld_B_flip, None]))
sjac = akg.tvm.create_schedule([jacs[0].op])
op_vars = [pld_Head_strided, pld_B_flip, jacs[0]]
h_n, h_c1, h_h, h_w, h_c0 = pld_Head_strided.shape
info = set_dims((h_n.value, h_c1.value * h_c0.value, h_h.value, h_w.value), (k_c, k_n, k_h, k_w),
(k_h - 1, k_w - 1), (1, 1), (1, 1), tile_hh, tile_coco, tile_mm, tile_kk, tile_nn, block_size)
with akg.build_config(add_lower_pass=utils.debug_mode(0), dump_pass_ir=True):
mod_backward = akg.build(sjac, op_vars, "cce", name=kernel_name, attrs={"dim": str(info)}, polyhedral=True)
def transpose_data(A):
out_shape = (A.shape[1] * block_size, truncdiv(A.shape[0], block_size), A.shape[2], A.shape[3], block_size)
A_transpose = akg.tvm.compute(out_shape,
lambda j0, j1, j2, j3, j4:
A[j1 * block_size + j4, truncdiv(j0, block_size), j2, j3, truncmod(j0, block_size)],
name=A.name + "_transposed")
return A_transpose
# Head is in 5D format
# Output is in Fractal format
def transpose_convert_head(Head):
out_shape = ((floordiv(Head.shape[0].value, block_size)) * Head.shape[2].value * Head.shape[3].value,
Head.shape[1].value, block_size, block_size)
tmp_6D_shape = (floordiv(Head.shape[0].value, block_size),
block_size, Head.shape[1].value, Head.shape[2].value, Head.shape[3].value, block_size)
Head_6D = akg.topi.reshape(Head, tmp_6D_shape)
# Transpose from (N//block_size_N, block_size_N, C//block_size_C, H, W, block_size_C)
# to (N//block_size_N, H, W, C//block_size_C, block_size_C, block_size_N,)
Head_6D_transpose = akg.topi.transpose(Head_6D, (0, 3, 4, 2, 5, 1))
Head_transpose_convert = akg.topi.reshape(Head_6D_transpose, out_shape)
return Head_transpose_convert
X_transposed = transpose_data(A)
pld_X_transposed = akg.tvm.placeholder(X_transposed.shape, name="inp0_transposed", dtype='float16')
if (s_h > 1) or (s_w > 1):
Head_transposed_converted = strided_head(HEAD, s_h, s_w)
else:
Head_transposed_converted = HEAD
strided_head_n, strided_head_c1, strided_head_h, strided_head_w, strided_head_c0 = Head_transposed_converted.shape
Head_transposed_converted = transpose_convert_head(Head_transposed_converted)
s_transposed_converted = akg.tvm.create_schedule(Head_transposed_converted.op)
pld_Head_transposed_converted = akg.tvm.placeholder(Head_transposed_converted.shape,
name="head_transposed",
dtype='float16')
ad_attrs = {"ad_conv_enable": 1, "ad_conv_reuse_conv": 1}
jacs = list(akg.differentiate(conv, [B], HEAD, ad_attrs, [pld_X_transposed, pld_Head_transposed_converted, None]))
sjac = akg.tvm.create_schedule([jacs[0].op])
op_vars = [HEAD, pld_X_transposed, pld_Head_transposed_converted, jacs[0]]
in_n, in_c1, in_h, in_w, in_c0 = A.shape
info = set_dims((in_c1.value * in_c0.value, in_n.value, in_h.value, in_w.value),
(strided_head_c1.value * strided_head_c0.value, strided_head_n.value,
strided_head_h.value, strided_head_w.value),
(0, 0), (1, 1), (1, 1),
tile_hh, tile_coco, tile_mm, tile_kk, tile_nn, block_size)
with akg.build_config(add_lower_pass=utils.debug_mode(0), dump_pass_ir=True):
mod_backward2 = akg.build(sjac, op_vars, "cce",
name="conv_backward_weight",
attrs={"dim": str(info)},
polyhedral=True)
return mod_backward, mod_backward2
def conv_02(fmap_shape, filter_shape, pad_, stride_, dilation_,
tile_hh=0, tile_coco=0, tile_mm=0, tile_kk=0, tile_nn=0, bypass_l1=False,
use_bias=False, block_size=16, conv_dtype='float16'):
# input shape (NCHW -> NC1HWC0)
in_n, in_c, in_h, in_w = fmap_shape
in_c = (in_c + block_size - 1) // block_size * block_size
# kernel shape (NCHW -> NC1HWC0 -> Fractal)
k_n, k_c, k_h, k_w = filter_shape
k_c = (k_c + block_size - 1) // block_size * block_size
k_n = (k_n + block_size - 1) // block_size * block_size
input_shape_nc1hwc0 = (in_n, in_c // block_size, in_h, in_w, block_size)
in_n, _, in_h, in_w, _ = input_shape_nc1hwc0
kernel_shape_nc1hwc0 = (k_n, k_c // block_size, k_h, k_w, block_size)
k_n, _, k_h, k_w, _ = kernel_shape_nc1hwc0
kernel_shape_fractal = (k_c // block_size * k_h * k_w, k_n // block_size, block_size, block_size)
# A placeholder (NC1HWCO)
A = akg.tvm.placeholder(input_shape_nc1hwc0, dtype=conv_dtype, name="input0")
# B_placeholder (fractal)
B = akg.tvm.placeholder(kernel_shape_fractal, dtype=conv_dtype, name="input1")
if use_bias:
bias_shape_nc1hwc0 = (1, k_n // block_size, 1, 1, block_size)
bias_name = "input2"
bias_value = akg.tvm.placeholder(bias_shape_nc1hwc0, dtype=conv_dtype, name=bias_name)
else:
bias_name = 'None'
bias_value = None
conv_forward = conv_compute_forward(fmap_shape, filter_shape, pad_, stride_, dilation_, A, B, bias_value,
tile_hh, tile_coco, tile_mm, tile_kk, tile_nn, bypass_l1,
use_bias, block_size, conv_dtype)
k_hw = k_h * k_w
const_shift = k_hw - 1
# B in Fractal format; result in Fractal format
def flip_weight(B, k_c, k_hw, const_shift):
out_shape = (B.shape[1].value * k_hw, k_c // block_size, block_size, block_size)
B_flip = akg.tvm.compute(out_shape,
lambda i0, i1, i2, i3:
B[i1 * k_hw + const_shift - truncmod(i0, k_hw), floordiv(i0, k_hw), i3, i2],
name=B.name + "_flipped")
return B_flip
# H in 5D format; result in 5D format
def strided_head(H, s_h, s_w):
n, c1, h, w, c0 = H.shape
out_shape = (n, c1, (h - 1) * s_h + 1, (w - 1) * s_w + 1, c0)
H_strided = akg.tvm.compute(out_shape,
lambda i0, i1, i2, i3, i4:
akg.tvm.expr.Select(akg.tvm.any(truncmod(i2, s_h) != 0, truncmod(i3, s_w) != 0),
akg.tvm.const(0.0, dtype="float16"),
H[i0, i1, floordiv(i2, s_h), floordiv(i3, s_w), i4]),
name=H.name + "_strided")
return H_strided
# A in 5D format; result in 5D format
def transpose_data(A):
out_shape = (A.shape[1].value * block_size, A.shape[0].value // block_size,
A.shape[2].value, A.shape[3].value, block_size)
A_transpose = akg.tvm.compute(out_shape,
lambda j0, j1, j2, j3, j4:
A[j1 * block_size + j4, floordiv(j0, block_size), j2, j3, truncmod(j0, block_size)],
name=A.name + "_transposed")
return A_transpose
# Head is in 5D format; result in Fractal format
def transpose_convert_head(Head):
out_shape = ((Head.shape[0].value // block_size) * Head.shape[2].value * Head.shape[3].value,
Head.shape[1].value, block_size, block_size)
tmp_6D_shape = (Head.shape[0].value // block_size, block_size,
Head.shape[1].value, Head.shape[2].value, Head.shape[3].value, block_size)
Head_6D = akg.topi.reshape(Head, tmp_6D_shape)
Head_6D_transpose = akg.topi.transpose(Head_6D, (0, 3, 4, 2, 5, 1))
Head_transpose_convert = akg.topi.reshape(Head_6D_transpose, out_shape)
return Head_transpose_convert
HEAD = akg.tvm.placeholder(conv_forward.shape, name="Head", dtype='float16')
Head_transposed_NCHW = (HEAD.shape[1].value * HEAD.shape[4].value, HEAD.shape[0].value,
HEAD.shape[2].value, HEAD.shape[3].value)
s_h, s_w = stride_
Head_strided_NCHW = (HEAD.shape[0].value, HEAD.shape[1].value * HEAD.shape[4].value,
(HEAD.shape[2].value - 1) * s_h + 1, (HEAD.shape[3].value - 1) * s_w + 1)
A_transposed_NCHW = (in_c, in_n, in_h, in_w)
K_flip_rot_NCHW = (k_c, k_n, k_h, k_w)
Head_transposed_converted = transpose_convert_head(HEAD)
pld_Head_transposed_converted = akg.tvm.placeholder(Head_transposed_converted.shape,
name="Head_trans_fractal", dtype=conv_dtype)
A_transposed = transpose_data(A)
pld_A_transposed = akg.tvm.placeholder(A_transposed.shape, name="A_trans", dtype=conv_dtype)
info = dim.Dim()
info.setdim(index=0, axis=0, tilel1=1, tilel0=1)
info.setdim(index=0, axis=1, tilel1=1, tilel0=1)
info.setdim(index=0, axis=2, tilel1=1, tilel0=1)
info.setdim(index=0, axis=3, tilel1=1, tilel0=1)
B_flip = flip_weight(B, k_c, k_hw, const_shift)
pld_B_flipped = akg.tvm.placeholder(B_flip.shape, name="B_flip", dtype=conv_dtype)
s_flipped = akg.tvm.create_schedule(B_flip.op)
with akg.build_config(add_lower_pass=utils.debug_mode(0), dump_pass_ir=True):
mod_weight_flipped = akg.build(s_flipped, [B, B_flip], "cce", name=B.name + "_flipped",
attrs={"dim": str(info)}, polyhedral=True)
s_transposed_converted = akg.tvm.create_schedule(Head_transposed_converted.op)
with akg.build_config(add_lower_pass=utils.debug_mode(0), dump_pass_ir=True):
mod_head_transposed_converted = akg.build(s_transposed_converted, [HEAD, Head_transposed_converted],
"cce", name="H_trans_converted",
attrs={"dim": str(info)},
polyhedral=True)
Head_strided = strided_head(HEAD, s_h, s_w)
pld_Head_strided = akg.tvm.placeholder(Head_strided.shape, name="Head_trans_5D", dtype=conv_dtype)
s_strided = akg.tvm.create_schedule(Head_strided.op)
with akg.build_config(add_lower_pass=utils.debug_mode(0), dump_pass_ir=True):
mod_head_strided = akg.build(s_strided, [HEAD, Head_strided],
"cce", name="H_strided", attrs={"dim": str(info)}, polyhedral=True)
s_transposed = akg.tvm.create_schedule(A_transposed.op)
with akg.build_config(add_lower_pass=utils.debug_mode(0), dump_pass_ir=True):
mod_transposed = akg.build(s_transposed, [A, A_transposed], "cce",
name="A_transposed", attrs={"dim": str(info)}, polyhedral=True)
ad_attrs = {"ad_conv_enable": 1, "ad_conv_reuse_conv": 1}
jacs = list(akg.differentiate(conv_forward, [A], HEAD, ad_attrs, [pld_Head_strided, pld_B_flipped, None]))
info = set_dims(Head_strided_NCHW, (k_c, k_n, k_h, k_w), (k_h - 1, k_w - 1), (1, 1), (1, 1),
tile_hh, tile_coco, tile_mm, tile_kk, tile_nn, block_size)
sjac = akg.tvm.create_schedule([jacs[0].op])
with akg.build_config(add_lower_pass=utils.debug_mode(0), dump_pass_ir=True):
mod_AD_data = akg.build(sjac, [pld_Head_strided, pld_B_flipped, jacs[0]], "cce",
name="conv_AD_data", attrs={"dim": str(info)}, polyhedral=True)
conv_data = conv_compute_forward(Head_strided_NCHW, K_flip_rot_NCHW,
(k_h - 1, k_h - 1, k_w - 1, k_w - 1), (1, 1), (1, 1),
pld_Head_strided, pld_B_flipped, None,
tile_hh, tile_coco, tile_mm, tile_kk, tile_nn, bypass_l1,
use_bias, block_size, conv_dtype)
info = set_dims(Head_strided_NCHW, (k_c, k_n, k_h, k_w), (k_h - 1, k_w - 1), (1, 1), (1, 1),
tile_hh, tile_coco, tile_mm, tile_kk, tile_nn, block_size)
s_data = akg.tvm.create_schedule(conv_data.op)
with akg.build_config(add_lower_pass=utils.debug_mode(0), dump_pass_ir=True):
mod_data = akg.build(s_data, [pld_Head_strided, pld_B_flipped, conv_data], "cce",
name="conv_data", attrs={"dim": str(info)}, polyhedral=True)
ad_attrs = {"ad_conv_enable": 1, "ad_conv_reuse_conv": 1}
jacs = list(akg.differentiate(conv_forward, [B], HEAD, ad_attrs, [pld_A_transposed, pld_Head_transposed_converted, None]))
info = set_dims(A_transposed_NCHW, Head_transposed_NCHW, (0, 0), (1, 1), (s_h, s_w),
tile_hh, tile_coco, tile_mm, tile_kk, tile_nn, block_size)
sjac = akg.tvm.create_schedule([jacs[0].op])
with akg.build_config(add_lower_pass=utils.debug_mode(0), dump_pass_ir=True):
mod_AD_weight = akg.build(sjac, [pld_A_transposed, pld_Head_transposed_converted, jacs[0]], "cce",
name="conv_AD_weight", attrs={"dim": str(info)}, polyhedral=True)
conv_weight = conv_compute_forward(A_transposed_NCHW, Head_transposed_NCHW,
(0, 0, 0, 0), (1, 1), (s_h, s_w),
pld_A_transposed, pld_Head_transposed_converted, None,
tile_hh, tile_coco, tile_mm, tile_kk, tile_nn, bypass_l1,
use_bias, block_size, conv_dtype)
info = set_dims(A_transposed_NCHW, Head_transposed_NCHW, (0, 0), (1, 1), (s_h, s_w),
tile_hh, tile_coco, tile_mm, tile_kk, tile_nn, block_size)
s_weight = akg.tvm.create_schedule(conv_weight.op)
with akg.build_config(add_lower_pass=utils.debug_mode(0), dump_pass_ir=True):
mod_weight = akg.build(s_weight, [pld_A_transposed, pld_Head_transposed_converted, conv_weight], "cce",
name="conv_weight", attrs={"dim": str(info)}, polyhedral=True)
return mod_AD_data, mod_AD_weight, mod_transposed, mod_head_transposed_converted, mod_head_strided, mod_weight_flipped
| [
"[email protected]"
]
| |
2438dc850e5d62d640bcdc86236a89bc67376373 | 1d60c5a7b8ce6277bff514e376f79848f706344c | /Data Engineer with Python/04. Writing Efficient Python Code/04. Basic pandas optimizations/08. Bringing it all together: Predict win percentage.py | cf12b3bebb30941ce6308446c58c8d8a439da8bb | []
| no_license | DidiMilikina/DataCamp | 338c6e6d3b4f5b6c541c1aba155a36e9ee24949d | 3bf2cf3c1430190a7f8e54efda7d50a5fd66f244 | refs/heads/master | 2020-12-15T13:16:54.178967 | 2020-05-06T17:30:54 | 2020-05-06T17:30:54 | 235,113,616 | 4 | 3 | null | null | null | null | UTF-8 | Python | false | false | 2,790 | py | '''
Bringing it all together: Predict win percentage
A pandas DataFrame (baseball_df) has been loaded into your session. For convenience, a dictionary describing each column within baseball_df has been printed into your console. You can reference these descriptions throughout the exercise.
You'd like to attempt to predict a team's win percentage for a given season by using the team's total runs scored in a season ('RS') and total runs allowed in a season ('RA') with the following function:
def predict_win_perc(RS, RA):
prediction = RS ** 2 / (RS ** 2 + RA ** 2)
return np.round(prediction, 2)
Let's compare the approaches you've learned to calculate a predicted win percentage for each season (or row) in your DataFrame.
Instructions 1/4
25 XP
1
Use a for loop and .itertuples() to predict the win percentage for each row of baseball_df with the predict_win_perc() function. Save each row's predicted win percentage as win_perc_pred and append each to the win_perc_preds_loop list.
2
Apply predict_win_perc() to each row of the baseball_df DataFrame using a lambda function. Save the predicted win percentage as win_perc_preds_apply.
3
Calculate the predicted win percentages by passing the underlying 'RS' and 'RA' arrays from baseball_df into predict_win_perc(). Save these predictions as win_perc_preds_np.
'''
SOLUTION
1
win_perc_preds_loop = []
# Use a loop and .itertuples() to collect each row's predicted win percentage
for row in baseball_df.itertuples():
runs_scored = row.RS
runs_allowed = row.RA
win_perc_pred = predict_win_perc(runs_scored, runs_scored)
win_perc_preds_loop.append(win_perc_pred)
2
win_perc_preds_loop = []
# Use a loop and .itertuples() to collect each row's predicted win percentage
for row in baseball_df.itertuples():
runs_scored = row.RS
runs_allowed = row.RA
win_perc_pred = predict_win_perc(runs_scored, runs_allowed)
win_perc_preds_loop.append(win_perc_pred)
# Apply predict_win_perc to each row of the DataFrame
win_perc_preds_apply = baseball_df.apply(lambda row: predict_win_perc(row['RS'], row['RA']), axis=1)
3
win_perc_preds_loop = []
# Use a loop and .itertuples() to collect each row's predicted win percentage
for row in baseball_df.itertuples():
runs_scored = row.RS
runs_allowed = row.RA
win_perc_pred = predict_win_perc(runs_scored, runs_allowed)
win_perc_preds_loop.append(win_perc_pred)
# Apply predict_win_perc to each row of the DataFrame
win_perc_preds_apply = baseball_df.apply(lambda row: predict_win_perc(row['RS'], row['RA']), axis=1)
# Calculate the win percentage predictions using NumPy arrays
win_perc_preds_np = predict_win_perc(baseball_df['RS'].values, baseball_df['RA'].values)
baseball_df['WP_preds'] = win_perc_preds_np
print(baseball_df.head()) | [
"[email protected]"
]
| |
3f126799ab9a40abdd2ebaae9d63469bf925c350 | 65381b8dffa1ade89746f6fc3a4979a7eb548d34 | /analytic_structure/models/analytic_dimension.py | 3e0c1f79cf94b69f49c82b31d834c963f9d7f218 | []
| no_license | ff4f/AISJ-13 | a4240d1952c3854dd5b21a62cf7dbfdebb16fde5 | a2f2183e0f753100842877efecc844bdc72f8bd4 | refs/heads/master | 2023-05-08T22:54:43.972954 | 2021-06-03T14:44:10 | 2021-06-03T14:48:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 967 | py | # -*- coding: utf-8 -*-
from odoo import models, fields
class AnalyticDimension(models.Model):
######################
# Private attributes #
######################
_name = "account.analytic.dimension"
###################
# Default methods #
###################
######################
# Fields declaration #
######################
name = fields.Char(string="Dimension Name",
required=True)
dependency_id = fields.Many2one(comodel_name="account.analytic.dimension",
string="Dependent On")
##############################
# Compute and search methods #
##############################
############################
# Constrains and onchanges #
############################
#########################
# CRUD method overrides #
#########################
##################
# Action methods #
##################
| [
"[email protected]"
]
| |
aff1a5f925b9a5fb61aa23bc3c7204c9d0b2fdf8 | 98f730ec6a43d8be4a34b0f2a44a9d35989d2287 | /tests/unit/entity/test_flow_file_entity.py | c96730ce6075f70da6c024829667d2c0880046c9 | []
| no_license | scottwr98/pynifi-client | 9337a4f322536ee466d419a788b8b5948cdc62d7 | 013ac2ffa591284a0d6cbb9ed552681cc6f91165 | refs/heads/master | 2020-04-18T08:47:03.680749 | 2017-11-04T23:59:58 | 2017-11-04T23:59:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,335 | py | # coding: utf-8
"""
NiFi Rest Api
The Rest Api provides programmatic access to command and control a NiFi instance in real time. Start and stop processors, monitor queues, query provenance data, and more. Each endpoint below includes a description, definitions of the expected input and output, potential response codes, and the authorizations required to invoke each service. # noqa: E501
OpenAPI spec version: 1.4.0
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import pynifi_client
from pynifi_client.models.flow_file_entity import FlowFileEntity # noqa: E501
from pynifi_client.rest import ApiException
class TestFlowFileEntity(unittest.TestCase):
"""FlowFileEntity unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testFlowFileEntity(self):
"""Test FlowFileEntity"""
# FIXME: construct object with mandatory attributes with example values
# model = pynifi_client.models.flow_file_entity.FlowFileEntity() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
]
| |
386b87b23a4abb72e8025a74ef4beb8cda822341 | c2bcf42e04a1e2146b41b250ff14e62fddcdf589 | /docs/examples/plot_gpr.py | b38412ecdc8bb8734c124690fb196f341c3f89ea | [
"Apache-2.0"
]
| permissive | onnx/sklearn-onnx | 0f958e1c090572fbe11e15f95bec975d1780cf8d | 895c3a76a315c7a6567a1a07a96dc658994ec16a | refs/heads/main | 2023-08-18T18:49:25.164433 | 2023-08-17T09:52:31 | 2023-08-17T09:52:31 | 162,340,939 | 455 | 92 | Apache-2.0 | 2023-08-31T16:04:13 | 2018-12-18T20:18:48 | Python | UTF-8 | Python | false | false | 6,674 | py | # SPDX-License-Identifier: Apache-2.0
"""
.. _l-gpr-example:
Discrepencies with GaussianProcessorRegressor: use of double
============================================================
The `GaussianProcessRegressor
<https://scikit-learn.org/stable/modules/generated/sklearn.gaussian_process.
GaussianProcessRegressor.html>`_ involves
many matrix operations which may requires double
precisions. *sklearn-onnx* is using single floats by default
but for this particular model, it is better to use double.
Let's see how to create an ONNX file using doubles.
Train a model
+++++++++++++
A very basic example using *GaussianProcessRegressor*
on the Boston dataset.
"""
import pprint
import numpy
import sklearn
from sklearn.datasets import load_diabetes
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import DotProduct, RBF
from sklearn.model_selection import train_test_split
import onnx
import onnxruntime as rt
import skl2onnx
from skl2onnx.common.data_types import FloatTensorType, DoubleTensorType
from skl2onnx import convert_sklearn
dataset = load_diabetes()
X, y = dataset.data, dataset.target
X_train, X_test, y_train, y_test = train_test_split(X, y)
gpr = GaussianProcessRegressor(DotProduct() + RBF(), alpha=1.0)
gpr.fit(X_train, y_train)
print(gpr)
###########################
# First attempt to convert a model into ONNX
# ++++++++++++++++++++++++++++++++++++++++++
#
# The documentation suggests the following way to
# convert a model into ONNX.
initial_type = [("X", FloatTensorType([None, X_train.shape[1]]))]
onx = convert_sklearn(gpr, initial_types=initial_type, target_opset=12)
sess = rt.InferenceSession(onx.SerializeToString())
try:
pred_onx = sess.run(None, {"X": X_test.astype(numpy.float32)})[0]
except RuntimeError as e:
print(str(e))
###########################
# Second attempt: variable dimensions
# +++++++++++++++++++++++++++++++++++
#
# Unfortunately, even though the conversion
# went well, the runtime fails to compute the prediction.
# The previous snippet of code imposes fixed dimension
# on the input and therefore let the runtime assume
# every node output has outputs with fixed dimensions
# And that's not the case for this model.
# We need to disable these checkings by replacing
# the fixed dimensions by an empty value.
# (see next line).
initial_type = [("X", FloatTensorType([None, None]))]
onx = convert_sklearn(gpr, initial_types=initial_type, target_opset=12)
sess = rt.InferenceSession(onx.SerializeToString())
pred_onx = sess.run(None, {"X": X_test.astype(numpy.float32)})[0]
pred_skl = gpr.predict(X_test)
print(pred_skl[:10])
print(pred_onx[0, :10])
###################################
# The differences seems quite important.
# Let's confirm that by looking at the biggest
# differences.
diff = numpy.sort(numpy.abs(numpy.squeeze(pred_skl) - numpy.squeeze(pred_onx)))[-5:]
print(diff)
print("min(Y)-max(Y):", min(y_test), max(y_test))
###########################
# Third attempt: use of double
# ++++++++++++++++++++++++++++
#
# The model uses a couple of matrix computations
# and matrices have coefficients with very different
# order of magnitude. It is difficult to approximate
# the prediction made with scikit-learn if the converted
# model sticks to float. Double precision is needed.
#
# The previous code requires two changes. The first
# one indicates that inputs are now of type
# ``DoubleTensorType``. The second change
# is the extra parameter ``dtype=numpy.float64``
# tells the conversion function that every real
# constant matrix such as the trained coefficients
# will be dumped as doubles and not as floats anymore.
initial_type = [("X", DoubleTensorType([None, None]))]
onx64 = convert_sklearn(gpr, initial_types=initial_type, target_opset=12)
sess64 = rt.InferenceSession(onx64.SerializeToString())
pred_onx64 = sess64.run(None, {"X": X_test})[0]
print(pred_onx64[0, :10])
################################
# The new differences look much better.
diff = numpy.sort(numpy.abs(numpy.squeeze(pred_skl) - numpy.squeeze(pred_onx64)))[-5:]
print(diff)
print("min(Y)-max(Y):", min(y_test), max(y_test))
####################################
# Size increase
# +++++++++++++
#
# As a result, the ONNX model is almost twice bigger
# because every coefficient is stored as double and
# and not as floats anymore.
size32 = len(onx.SerializeToString())
size64 = len(onx64.SerializeToString())
print("ONNX with floats:", size32)
print("ONNX with doubles:", size64)
#################################
# return_std=True
# +++++++++++++++
#
# `GaussianProcessRegressor <https://scikit-learn.org/stable/modules/
# generated/sklearn.gaussian_process.GaussianProcessRegressor.html>`_
# is one model which defined additional parameter to the predict function.
# If call with ``return_std=True``, the class returns one more results
# and that needs to be reflected into the generated ONNX graph.
# The converter needs to know that an extended graph is required.
# That's done through the option mechanism
# (see :ref:`l-conv-options`).
initial_type = [("X", DoubleTensorType([None, None]))]
options = {GaussianProcessRegressor: {"return_std": True}}
try:
onx64_std = convert_sklearn(
gpr, initial_types=initial_type, options=options, target_opset=12
)
except RuntimeError as e:
print(e)
######################################
# This error highlights the fact that the *scikit-learn*
# computes internal variables on first call to method predict.
# The converter needs them to be initialized by calling method
# predict at least once and then converting again.
gpr.predict(X_test[:1], return_std=True)
onx64_std = convert_sklearn(
gpr, initial_types=initial_type, options=options, target_opset=12
)
sess64_std = rt.InferenceSession(onx64_std.SerializeToString())
pred_onx64_std = sess64_std.run(None, {"X": X_test[:5]})
pprint.pprint(pred_onx64_std)
###############################
# Let's compare with *scikit-learn* prediction.
pprint.pprint(gpr.predict(X_test[:5], return_std=True))
#######################################
# It looks good. Let's do a better checks.
pred_onx64_std = sess64_std.run(None, {"X": X_test})
pred_std = gpr.predict(X_test, return_std=True)
diff = numpy.sort(
numpy.abs(numpy.squeeze(pred_onx64_std[1]) - numpy.squeeze(pred_std[1]))
)[-5:]
print(diff)
#################################
# There are some discrepencies but it seems reasonable.
#
# **Versions used for this example**
print("numpy:", numpy.__version__)
print("scikit-learn:", sklearn.__version__)
print("onnx: ", onnx.__version__)
print("onnxruntime: ", rt.__version__)
print("skl2onnx: ", skl2onnx.__version__)
| [
"[email protected]"
]
| |
67032eeb4f2ea4129a550c569f568b99688b109e | c9952dcac5658940508ddc139344a7243a591c87 | /lab/lecture/reader1.py | d3885dfb767c08b06da281dc2472b7af0b93c299 | []
| no_license | wongcyrus/ite3101_introduction_to_programming | 5da1c15212528423b3df91997327fe148abef4de | 7cd76d0861d5355db5a6e2e171735bee2e78f829 | refs/heads/master | 2023-08-31T17:27:06.193049 | 2023-08-21T08:30:26 | 2023-08-21T08:30:26 | 136,574,036 | 3 | 2 | null | 2023-08-21T08:30:28 | 2018-06-08T06:06:49 | Python | UTF-8 | Python | false | false | 44 | py | message = input('Message? ')
print(message)
| [
"[email protected]"
]
| |
bd9b3e6313ea6387c3ad74cf5a29de28b414e98c | c24cc0544ff838b8eb837b941bf2425ce7895293 | /eggs/__init__.py | 5587e8f54e093b901242ce4f0b28eb5a8d58da44 | []
| no_license | mscroggs/readthedocstest | 16d9fc35f841a4398fc2e47e36016dc86f102564 | f0750170fe8aab0e69281eb8c98d9c513f9832c8 | refs/heads/master | 2021-01-01T16:22:31.053270 | 2017-07-21T08:01:18 | 2017-07-21T08:01:18 | 97,815,926 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 37 | py | from eggs import *
from oil import *
| [
"[email protected]"
]
| |
44465a4a6db8996eacce62966259ef8c47a0909e | 1915774790a77a630c00e70738ac41a315f5a2cb | /doorscalc/migrations/0034_order.py | 0f5f4e76a216a981d62729e85601dd332467b201 | []
| no_license | coconutcake/hajduktools | 842948646d2e8d3368b4d420d73bba981d649d43 | 6f9e678a1168195d77d1163bc9145205d03bb141 | refs/heads/master | 2020-07-02T20:02:19.914649 | 2019-09-13T17:44:05 | 2019-09-13T17:44:05 | 201,648,138 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,456 | py | # Generated by Django 2.1.11 on 2019-08-21 11:08
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('doorscalc', '0033_auto_20190821_0947'),
]
operations = [
migrations.CreateModel(
name='Order',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('w', models.DecimalField(decimal_places=0, max_digits=3, verbose_name='Width')),
('h', models.DecimalField(decimal_places=0, max_digits=3, verbose_name='Height')),
('d', models.DecimalField(decimal_places=0, max_digits=3, verbose_name='Depth')),
('status', models.CharField(choices=[('Pending', 'Pending'), ('Accepted', 'Accepted'), ('Ordered', 'Ordred')], default='Pending', help_text='Status zamówienia', max_length=50, null=True, verbose_name='Status')),
('published_date', models.DateTimeField(blank=True, null=True)),
('door', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='doorscalc.Door', verbose_name='Type')),
('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"[email protected]"
]
| |
c0fda12954d82dd3a44313c715b0d476d2c87363 | e5eec1428da1d24d3e9b86f5723c51cd2ca636cd | /백준 삼성역량테스트기출/시험감독.py | 4c3226fd88492be95fe560c0c9ef3c4b27668a7e | []
| no_license | jamwomsoo/Algorithm_prac | 3c36c381f59277721517d331a8f1640399d80c1d | 8393f3cc2f950214c47f3cf0b2c1271791f115d0 | refs/heads/master | 2023-06-09T06:49:14.739255 | 2021-06-18T06:41:01 | 2021-06-18T06:41:01 | 325,227,295 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 284 | py |
n = int(input())
a_lst = list(map(int, input().split()))
b, c = map(int, input().split())
total = 0
for i in range(n):
total+=1
a_lst[i] -= b
if a_lst[i] > 0:
total += a_lst[i]//c
z = a_lst[i] % c
if z > 0:
total+=1
print(total) | [
"[email protected]"
]
| |
a6681169fe270861ab20c12bb9dd080537671d0c | 80ae9b5cfb45b6e9cf7873ef7c46e17e117e4019 | /data/HackerRank-ProblemSolving/Is This a Binary Search Tree.py | 731ae39593eed79cc53c99eb8fef64bfffb5dc12 | []
| no_license | Ritvik19/CodeBook | ef7764d89b790e902ede5802f36d5ca910d8a50e | 2b4ed7938bbf156553d6ba5cba6216449528f0fc | refs/heads/master | 2021-07-04T08:25:52.478719 | 2020-08-08T06:54:14 | 2020-08-08T06:54:14 | 138,744,302 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 437 | py | """ Node is defined as
class node:
def __init__(self, data):
self.data = data
self.left = None
self.right = None
"""
def check_binary_search_tree_(root):
return check_node(root, -1, 10001)
def check_node(node, Min, Max):
if not node:
return True
if Min < node.data < Max:
return check_node(node.left, Min, node.data) and check_node(node.right, node.data, Max)
return False
| [
"[email protected]"
]
| |
1094a919ee00b4136d877401a04011ef4e3c2f08 | 7b358c3af9b1d10ace466d492909c90b8937bb38 | /models/utils.py | 865c6c5c51e79993de84c555ce0805b820531d9a | [
"Apache-2.0"
]
| permissive | shibaji7/model.CODE_BASE | b3090b0aa88c62e0fe62cb1b6c8bbca196b9e674 | 1ef8cffbbde1dbb05c405aedd1c0cac612ac6330 | refs/heads/master | 2023-04-11T13:07:52.722081 | 2021-09-23T02:47:25 | 2021-09-23T02:47:25 | 276,458,519 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,464 | py | #!/usr/bin/env python
"""utils.py: utils is dedicated to utility functions."""
__author__ = "Chakraborty, S."
__copyright__ = "Copyright 2020, SuperDARN@VT"
__credits__ = []
__license__ = "MIT"
__version__ = "1.0."
__maintainer__ = "Chakraborty, S."
__email__ = "[email protected]"
__status__ = "Research"
import os
import sys
import numpy as np
import pandas as pd
from scipy.interpolate import interp1d
from scipy import array
import datetime as dt
from netCDF4 import Dataset, num2date
import scipy.integrate as intg
from pysolar.solar import get_altitude
import calendar
import copy
import verify
import xarray
from timezonefinder import TimezoneFinder
from dateutil import tz
import aacgmv2
if sys.version_info.major > 2:
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import Matern
from collision import *
from absorption import *
from constant import *
def extrap1d(x,y,kind="linear"):
""" This method is used to extrapolate 1D paramteres """
interpolator = interp1d(x,y,kind=kind)
xs = interpolator.x
ys = interpolator.y
def pointwise(x):
if x < xs[0]: return ys[0]+(x-xs[0])*(ys[1]-ys[0])/(xs[1]-xs[0])
elif x > xs[-1]: return ys[-1]+(x-xs[-1])*(ys[-1]-ys[-2])/(xs[-1]-xs[-2])
else: return interpolator(x)
def ufunclike(xs):
return array(list(map(pointwise, array(xs))))
return ufunclike
def download_goes_data(dn, sat=15, v=False):
""" Download GOES data """
def _get_month_bounds_(start_time):
""" This method is used to get the first and last date of the month """
month_start = start_time.replace(day = 1).strftime("%Y%m%d")
_, month_end = calendar.monthrange(start_time.year, start_time.month)
month_end = (start_time.replace(day = 1) + dt.timedelta(days=month_end-1)).strftime("%Y%m%d")
return month_start, month_end
fname = "data/tElec/{dnx}/goes/goes.csv".format(dnx=dn.strftime("%Y.%m.%d.%H.%M"))
if not os.path.exists(fname+".gz"):
month_start, month_end = _get_month_bounds_(dn)
url = "https://satdat.ngdc.noaa.gov/sem/goes/data/avg/{year}/{month}/goes{sat}/netcdf/"\
"g{sat}_xrs_1m_{mstart}_{mend}.nc".format(year=dn.year, month="%02d"%dn.month, sat=sat,
mstart=month_start, mend=month_end)
if v: print("\n Download file -from- " + url)
tag_vars = ["A_AVG","B_AVG"]
fn = fname.replace(".csv",".nc")
os.system("wget -O {fn} {url}".format(fn=fn, url=url))
if os.path.exists(fn):
nc = Dataset(fn)
tt = nc.variables["time_tag"]
jd = np.array(num2date(tt[:],tt.units))
data = {}
for var in tag_vars: data[var] = nc.variables[var][:]
data["date"] = jd
data_dict = pd.DataFrame(data)
data_dict.to_csv(fname, index=False, header=True)
os.system("gzip {fname}".format(fname=fname))
if v: print("\n File saved -to- " + fname)
os.remove(fn)
else: print("\n Unable to download file.")
return
def download_riometer(dn, stn, v=False):
"""
This method is used to download riometer absorption data from UCalgary ftp server.
It stores the dataset into the local drive for future run. It only downloads 1m resolution dataset.
URL - http://data.phys.ucalgary.ca/sort_by_instrument/riometer/GO-Canada_Rio/txt/
"""
fname = "data/tElec/{dnx}/rio/{stn}.csv".format(dnx=dn.strftime("%Y.%m.%d.%H.%M"), stn=stn)
if stn != "ott" and not os.path.exists(fname+".gz"):
f_name = "norstar_k2_rio-%s_%s_v01.txt" % (stn, dn.strftime("%Y%m%d"))
base_url = "http://data.phys.ucalgary.ca/sort_by_instrument/riometer/GO-Canada_Rio/txt"\
"/{year}/{month}/{day}/".format(year=dn.year, month="%02d"%dn.month, day="%02d"%dn.day)
uri = base_url + f_name
tag_vars = ["date","hf_abs"]
os.system("wget -O {fn} {url}".format(fn=f_name, url=uri))
if os.path.exists(f_name):
if v: print("\n Download file -from- " + uri)
with open(f_name) as c: lines = c.read().split("\n")
data = []
for line in lines[13:-2]:
x = np.nan
line = list(filter(None,line.replace("\n","").split(" ")))
try:
x = float(line[2])
data.append([dt.datetime.strptime(line[0]+" "+line[1],"%d/%m/%y %H:%M:%S"),x])
except: continue
if len(data) > 0:
data_dict = pd.DataFrame(data,columns=tag_vars)
data_dict.to_csv(fname, index=False, header=True)
os.system("gzip {fname}".format(fname=fname))
if v: print("\n File saved -to- " + fname)
os.remove(f_name)
else: print("\n Unable to download file.")
elif stn == "ott" and not os.path.exists(fname+".gz"):
f_name = "/home/shibaji/model_run/riometers/ott_{year}-{month}-{day}.csv".format(year=dn.year,
month="%02d"%dn.month, day="%02d"%dn.day)
if os.path.exists(f_name):
data_dict = pd.read_csv(f_name, index_col=0)
data_dict = (data_dict[["DATE","_ABS"]]).rename(columns={"DATE":"date", "_ABS":"hf_abs"})
data_dict.to_csv(fname, index=False, header=True)
os.system("gzip {fname}".format(fname=fname))
if v: print("\n File saved -to- " + fname)
else:
if v: print("\n File not exists.")
return
def get_riom_loc(stn):
""" This method is to get the location of the riometer """
_o = pd.read_csv("config/riometers.csv")
_o = _o[_o.rio==stn]
lat, lon = _o["lat"].tolist()[0], np.mod( (_o["lon"].tolist()[0] + 180), 360 ) - 180
return lat, lon
def read_goes(dn, arc=False):
""" This method is used to fetch GOES x-ray data for a given day """
gzfname = "data/tElec/{dnx}/goes/goes.csv.gz".format(dnx=dn.strftime("%Y.%m.%d.%H.%M"))
fname = "data/tElec/{dnx}/goes/goes.csv".format(dnx=dn.strftime("%Y.%m.%d.%H.%M"))
if arc:
gzfname = "data/tElec/archive/{dnx}/goes/goes.csv.gz".format(dnx=dn.strftime("%Y.%m.%d.%H.%M"))
fname = "data/tElec/archive/{dnx}/goes/goes.csv".format(dnx=dn.strftime("%Y.%m.%d.%H.%M"))
os.system("gzip -d " + gzfname)
_o = pd.read_csv(fname,parse_dates=["date"])
os.system("gzip {fname}".format(fname=fname))
return _o
def read_riometer(dn, stn, arc=False):
""" This method is used to fetch riometer absorption data for a given day and station """
gzfname = "data/tElec/{dnx}/rio/{stn}.csv.gz".format(dnx=dn.strftime("%Y.%m.%d.%H.%M"), stn=stn)
fname = "data/tElec/{dnx}/rio/{stn}.csv".format(dnx=dn.strftime("%Y.%m.%d.%H.%M"), stn=stn)
if arc:
gzfname = "data/tElec/archive/{dnx}/rio/{stn}.csv.gz".format(dnx=dn.strftime("%Y.%m.%d.%H.%M"), stn=stn)
fname = "data/tElec/archive/{dnx}/rio/{stn}.csv".format(dnx=dn.strftime("%Y.%m.%d.%H.%M"), stn=stn)
if os.path.exists(gzfname):
os.system("gzip -d " + gzfname)
_o = pd.read_csv(fname,parse_dates=["date"])
os.system("gzip {fname}".format(fname=fname))
else: _o = pd.DataFrame()
return _o
def get_height_integrated_absorption(beta, height):
""" This method is used to calculate height integrated absorption """
beta[np.isnan(beta)] = 0.
beta[beta < 0.] = 0.
beta_L = intg.trapz(beta)
return beta_L
def calculate_sza(dates, lat, lon, alts):
"""
This method is used to estimate the solar zenith angle for a specific date and
sepcific location in space. Note that this method uses skyfield api to estimate
solar zenith angle. This has been validated against NOAA website values.
"""
sza = np.zeros((len(dates), len(alts)))
for i, d in enumerate(dates):
for j, a in enumerate(alts):
d = d.replace(tzinfo=dt.timezone.utc)
sza[i,j] = 90. - get_altitude(lat, lon, d)
return sza
class PointGrid(object):
"""
This class initializes all the parameters for a lat, lon and 0,500 km altitiudes profiles. This is a 2D
grid for one latitude an longitude X axis time with 1m resolution Y axis altitude 1km resolution.
"""
def __init__(self, rio, ev, stime, etime, bins = 37, freq=30, v=False, fname="data/sim/{dn}/"):
self.rio = rio
self.alts = model["alts"]
self.start_time = stime
self.end_time = etime
self.ev = ev
self.lat, self.lon = get_riom_loc(rio)
self.bins = bins
self.freq = freq
d = int((etime-stime).total_seconds()/60.)
self.dn = [stime + dt.timedelta(seconds = i*60) for i in range(d)]
fname = (fname + "bgc.{stn}.nc.gz").format(dn=self.ev.strftime("%Y.%m.%d.%H.%M"), stn=self.rio)
os.system("gzip -d "+fname)
self._nc = Dataset(fname.replace(".gz", ""))
os.system("gzip "+fname.replace(".gz", ""))
self.igrf = {
"Bx":self._nc.variables["igrf.bx"][:],
"By":self._nc.variables["igrf.by"][:],
"Bz":self._nc.variables["igrf.bz"][:],
"B":self._nc.variables["igrf.b"][:]
}
self.iri = {
"Ne":self._nc.variables["iri.ne"][:],
"Ni":self._nc.variables["iri.ni"][:],
"Te":self._nc.variables["iri.te"][:],
"Ti":self._nc.variables["iri.ti"][:],
"ions":{
"NO+":self._nc.variables["iri.ions.no+"][:],
"O+":self._nc.variables["iri.ions.o+"][:],
"O2+":self._nc.variables["iri.ions.o2+"][:]
}
}
self.msis = {
"Tn":self._nc.variables["msis.tn"][:],
"rho":self._nc.variables["msis.rho"][:],
"AR":self._nc.variables["msis.ar"][:],
"H":self._nc.variables["msis.h"][:],
"HE":self._nc.variables["msis.he"][:],
"N2":self._nc.variables["msis.n2"][:],
"O":self._nc.variables["msis.o"][:],
"O2":self._nc.variables["msis.o2"][:],
"O_anomalous":self._nc.variables["msis.o_a"][:],
"nn":self._nc.variables["msis.nn"][:],
"NO":self._nc.variables["msis.no"][:],
"CO":self._nc.variables["msis.co"][:],
"H2O":self._nc.variables["msis.h2o"][:],
"CO2":self._nc.variables["msis.co2"][:],
}
self.Ne = np.zeros((len(self.dn),len(self.alts)))
self.chi = self._nc.variables["chi"][:]
self._col_ = Collision.load(self._nc)
self._abs_ = Absorption.load(self._nc)
if v: print("\n Grid point %.2f,%.2f is loaded." % (self.lat,self.lon))
return
def update_grid(self, cm, _ix_="all"):
self.ne = cm.Ne[::60, :]
self.ni = cm.Np[::60, :]
self.ni_e = cm.Nm[::60, :]
self.ni_x = cm.Nxp[::60, :]
self._abs_ = Absorption(self.igrf["B"], self._col_, self.ne, fo=self.freq*1e6)
self.drap = Absorption._drap_(self.ev, self.dn, self.rio, self.freq)
self.sato = Absorption._sato_(self.ev, self.dn, self.rio, self.freq)
return
def add_chi(ev, rio, start, end):
""" Add SZA to the Bgc file """
lat, lon = get_riom_loc(rio)
d = int((end-start).total_seconds()/60.)
dn = [start + dt.timedelta(seconds = i*60) for i in range(d)]
fname = "data/tElec/{dn}/bgc.{stn}.nc.gz".format(dn=ev.strftime("%Y.%m.%d.%H.%M"), stn=rio)
os.system("gzip -d "+fname)
rootgrp = Dataset(fname.replace(".gz",""), "a")
chi = rootgrp.createVariable("chi", "f8", ("ntimes","nalts"))
chi[:] = calculate_sza(dn, lat, lon, model["alts"])
chi.description = "Solar Zenith Angle"
chi.uints = "Deg(o)"
print(rootgrp.variables.keys())
rootgrp.close()
os.system("gzip "+fname.replace(".gz",""))
return
def extp(x, y, xlim, kind="slinear", scale="log"):
""" Extrapolate NaN values for smooth outputs. """
if scale == "log":
fn = extrap1d(x[x>xlim], np.log10(y[x>xlim]), kind=kind)
ynew = np.concatenate((10**fn(x[x<=xlim]), y[x>xlim]))
else:
fn = extrap1d(x[x>xlim], y[x>xlim], kind=kind)
ynew = np.concatenate((fn(x[x<=xlim]), y[x>xlim]))
return ynew
def int_absorption(_a, _h, extpoint=68, llim = 60, ulim = 150, method="trapz"):
""" Height integrate HF absorption """
_o = []
def line_integration(y, x, method="trapz"):
from scipy.integrate import simps, trapz
if method == "simps": z = simps(y, x)
elif method == "trapz": z = trapz(y, x)
else: z = None
return z
for _ix in range(_a.shape[0]):
_u = pd.DataFrame()
_u["h"], _u["a"] = _h, extp(_h, _a[_ix,:], xlim=extpoint)
_u = _u[(_u.h>=llim) & (_u.h<=ulim)]
_o.append(line_integration(_u["a"], _u["h"], method=method))
return np.array(_o)
def smooth(x,window_len=51,window="hanning"):
if x.ndim != 1: raise ValueError("smooth only accepts 1 dimension arrays.")
if x.size < window_len: raise ValueError("Input vector needs to be bigger than window size.")
if window_len<3: return x
if not window in ["flat", "hanning", "hamming", "bartlett", "blackman"]: raise ValueError("Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'")
s = np.r_[x[window_len-1:0:-1],x,x[-2:-window_len-1:-1]]
if window == "flat": w = numpy.ones(window_len,"d")
else: w = eval("np."+window+"(window_len)")
y = np.convolve(w/w.sum(),s,mode="valid")
d = window_len - 1
y = y[int(d/2):-int(d/2)]
return y
def estimate_error(m, d, kind="rmse"):
""" Estimate error between model and data """
xsec = [(x-m.date.tolist()[0]).total_seconds() for x in m.date]
xnsec = [(x-m.date.tolist()[0]).total_seconds() for x in d.date]
dx = interp1d(xsec, m.hf_abs)(xnsec)
e = np.sqrt(np.mean((dx-np.array(d.hf_abs.tolist()))**2))
return e
def store_cmd(args):
""" Store the commands """
return
class Performance(object):
""" Class to estimate Skillset """
def __init__(self, stn, ev, times, model, start, end, bar=4., alt=None):
""" Initialize the parameters """
self.stn = stn
self.ev = ev
self.times = times
self.model = model
self.start = start
self.end = end
self.bar = bar
self.alt = alt
self._read_data_()
return
def _read_data_(self):
""" Read data from GOES and Riometer """
gos = read_goes(self.ev, False)
rio = read_riometer(self.ev, self.stn, False)
self.gos = gos[(gos.date>=self.start) & (gos.date<self.end)]
if len(rio) > 0:
rio = rio[(rio.date>=self.start) & (rio.date<=self.end)]
if not np.isnan(self.bar): self.rio = rio[rio.hf_abs <= self.bar]
else: self.rio = rio
elif np.isnan(self.alt) and not np.isnan(self.bar): self.alt = self.bar
y = np.array(self.gos.B_AVG.tolist())
yn = (y - np.min(y)) / (np.max(y) - np.min(y))
if np.isnan(self.alt): self.mx = np.max(self.rio.hf_abs.tolist())
else: self.mx = self.alt
self.yx = self.mx * yn
return
def _skill_(self):
""" Estimate skills """
self.acc, self.attrs = {}, {}
dic = {"MSE":"MSE_{r}", "RMSE":"RMSE_{r}", "MAE":"MAE_{r}", "MdAE":"MdAE_{r}",
"nRMSE":"nRMSE_{r}", "MASE":"MASE_{r}", "MAPE":"MAPE_{r}", "MdAPE":"MdAPE_{r}",
"MdSymAcc":"MdSymAcc_{r}"}
self.acc.update({"t": {"dims": ("t"), "data":self.gos.date.tolist()}})
for k in self.model.keys():
d = pd.DataFrame()
d["date"], d["hf_abs"] = self.times, self.model[k]
d = d[(d.date>=self.start) & (d.date<self.end)]
self.attrs.update(dict((dic[m].format(r=k), v) for (m,v) in verify.accuracy(np.array(d.hf_abs), self.yx).items()))
self.attrs.update(dict((dic[m].format(r=k), v) for (m,v) in verify.scaledAccuracy(np.array(d.hf_abs), self.yx).items()))
self.attrs.update({"mRMSE_" + k: np.sqrt(np.abs(np.max(d.hf_abs)-self.mx))})
self.attrs.update({"mPeak_" + k: np.max(d.hf_abs)})
self.acc.update({"e_" + k: {"dims": ("t"), "data": self.yx - np.array(d.hf_abs)}})
self.acc.update({"m_" + k: {"dims": ("t"), "data": np.array(d.hf_abs)}})
self.acc.update({"dat": {"dims": ("t"), "data": self.yx}})
self.attrs.update({"dPeak": self.mx})
return self
def _to_mag_(self, times, lat, lon):
mlats, mlons, mlts = [], [], []
for t in times:
mlat, mlon, mlt = aacgmv2.get_aacgm_coord(lat, lon, 100, t, method="TRACE")
mlats.append(mlat)
mlons.append(mlon)
mlts.append(mlt)
return mlats, mlons, mlts
def _params_(self):
""" Extract parameters """
times = self.gos.date.tolist()
lat, lon = get_riom_loc(self.stn)
self.attrs.update({"lat":lat, "lon":lon, "stn": self.stn, "event": self.ev.strftime("%Y.%m.%d.%H.%M")})
self.acc.update({"sza": {"dims": ("t"),
"data": calculate_sza(times, lat, lon, np.array([100])).ravel()}})
tf = TimezoneFinder()
from_zone = tz.tzutc()
to_zone = tz.gettz(tf.timezone_at(lng=lon, lat=lat))
LT = [t.replace(tzinfo=from_zone).astimezone(to_zone).to_pydatetime() for t in times]
now = self.start.replace(tzinfo=from_zone).astimezone(to_zone).to_pydatetime().replace(hour=0,minute=0,second=0)
LT = [(x - now).total_seconds()/3600. for x in LT]
self.acc.update({"local_time": {"dims": ("t"), "data": LT}})
mlats, mlons, mlts = self._to_mag_(times, lat, lon)
self.acc.update({"mlt": {"dims": ("t"), "data": mlts}})
self.attrs.update({"mlat": np.mean(mlats)})
self.attrs.update({"mlon": np.mean(mlons)})
return self
def _to_netcdf_(self, fname):
""" Save to netCDF4 (.nc) file """
ds = xarray.Dataset.from_dict(self.acc)
ds.attrs = self.attrs
print("---------------------Skills----------------------")
print(ds)
print("-------------------------------------------------")
ds.to_netcdf(fname,mode="w")
return
| [
"[email protected]"
]
| |
cede73216293a8ce2fb462daf6702e71a3c0f983 | 51885da54b320351bfea42c7dd629f41985454cd | /abc023/d.py | 44e56efe280652514dbc388ca3b19c414d04f3e6 | []
| no_license | mskt4440/AtCoder | dd266247205faeda468f911bff279a792eef5113 | f22702e3932e129a13f0683e91e5cc1a0a99c8d5 | refs/heads/master | 2021-12-15T10:21:31.036601 | 2021-12-14T08:19:11 | 2021-12-14T08:19:11 | 185,161,276 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,354 | py | #
# abc023 d
#
import sys
from io import StringIO
import unittest
import bisect
class TestClass(unittest.TestCase):
def assertIO(self, input, output):
stdout, stdin = sys.stdout, sys.stdin
sys.stdout, sys.stdin = StringIO(), StringIO(input)
resolve()
sys.stdout.seek(0)
out = sys.stdout.read()[:-1]
sys.stdout, sys.stdin = stdout, stdin
self.assertEqual(out, output)
def test_入力例1(self):
input = """4
5 6
12 4
14 7
21 2"""
output = """23"""
self.assertIO(input, output)
def test_入力例2(self):
input = """6
100 1
100 1
100 1
100 1
100 1
1 30"""
output = """105"""
self.assertIO(input, output)
def resolve():
N = int(input())
ok = 0
global H, S
H = []
S = []
for _ in range(N):
h, s = map(int, input().split())
H.append(h)
S.append(s)
ok = max(ok, h+s*(N-1))
ok -= 1
ng = max(H)-1
while abs(ok-ng) > 1:
mid = (ok+ng)//2
if isOK(mid):
ok = mid
else:
ng = mid
print(ok)
def isOK(x):
time = [(x-h)/s for (h, s) in zip(H, S)]
time.sort()
for i, t in enumerate(time):
if i > t:
return False
return True
if __name__ == "__main__":
# unittest.main()
resolve()
| [
"[email protected]"
]
| |
b550022c8996e1254ad04bbc6e68d43f9a20036d | 8bbeb7b5721a9dbf40caa47a96e6961ceabb0128 | /python3/745.Find Smallest Letter Greater Than Target(寻找比目标字母大的最小字母).py | a515ae5b4ea2c96f75d6260137b0d993b0a8432c | [
"MIT"
]
| permissive | lishulongVI/leetcode | bb5b75642f69dfaec0c2ee3e06369c715125b1ba | 6731e128be0fd3c0bdfe885c1a409ac54b929597 | refs/heads/master | 2020-03-23T22:17:40.335970 | 2018-07-23T14:46:06 | 2018-07-23T14:46:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,576 | py | """
<p>
Given a list of sorted characters <code>letters</code> containing only lowercase letters, and given a target letter <code>target</code>, find the smallest element in the list that is larger than the given target.
</p><p>
Letters also wrap around. For example, if the target is <code>target = 'z'</code> and <code>letters = ['a', 'b']</code>, the answer is <code>'a'</code>.
</p>
<p><b>Examples:</b><br />
<pre>
<b>Input:</b>
letters = ["c", "f", "j"]
target = "a"
<b>Output:</b> "c"
<b>Input:</b>
letters = ["c", "f", "j"]
target = "c"
<b>Output:</b> "f"
<b>Input:</b>
letters = ["c", "f", "j"]
target = "d"
<b>Output:</b> "f"
<b>Input:</b>
letters = ["c", "f", "j"]
target = "g"
<b>Output:</b> "j"
<b>Input:</b>
letters = ["c", "f", "j"]
target = "j"
<b>Output:</b> "c"
<b>Input:</b>
letters = ["c", "f", "j"]
target = "k"
<b>Output:</b> "c"
</pre>
</p>
<p><b>Note:</b><br>
<ol>
<li><code>letters</code> has a length in range <code>[2, 10000]</code>.</li>
<li><code>letters</code> consists of lowercase letters, and contains at least 2 unique letters.</li>
<li><code>target</code> is a lowercase letter.</li>
</ol>
</p><p>给定一个只包含小写字母的有序数组<code>letters</code> 和一个目标字母 <code>target</code>,寻找有序数组里面比目标字母大的最小字母。</p>
<p>数组里字母的顺序是循环的。举个例子,如果目标字母<code>target = 'z'</code> 并且有序数组为 <code>letters = ['a', 'b']</code>,则答案返回 <code>'a'</code>。</p>
<p><strong>示例:</strong></p>
<pre>
<strong>输入:</strong>
letters = ["c", "f", "j"]
target = "a"
<strong>输出:</strong> "c"
<strong>输入:</strong>
letters = ["c", "f", "j"]
target = "c"
<strong>输出:</strong> "f"
<strong>输入:</strong>
letters = ["c", "f", "j"]
target = "d"
<strong>输出:</strong> "f"
<strong>输入:</strong>
letters = ["c", "f", "j"]
target = "g"
<strong>输出:</strong> "j"
<strong>输入:</strong>
letters = ["c", "f", "j"]
target = "j"
<strong>输出:</strong> "c"
<strong>输入:</strong>
letters = ["c", "f", "j"]
target = "k"
<strong>输出:</strong> "c"
</pre>
<p><strong>注:</strong></p>
<ol>
<li><code>letters</code>长度范围在<code>[2, 10000]</code>区间内。</li>
<li><code>letters</code> 仅由小写字母组成,最少包含两个不同的字母。</li>
<li>目标字母<code>target</code> 是一个小写字母。</li>
</ol>
<p>给定一个只包含小写字母的有序数组<code>letters</code> 和一个目标字母 <code>target</code>,寻找有序数组里面比目标字母大的最小字母。</p>
<p>数组里字母的顺序是循环的。举个例子,如果目标字母<code>target = 'z'</code> 并且有序数组为 <code>letters = ['a', 'b']</code>,则答案返回 <code>'a'</code>。</p>
<p><strong>示例:</strong></p>
<pre>
<strong>输入:</strong>
letters = ["c", "f", "j"]
target = "a"
<strong>输出:</strong> "c"
<strong>输入:</strong>
letters = ["c", "f", "j"]
target = "c"
<strong>输出:</strong> "f"
<strong>输入:</strong>
letters = ["c", "f", "j"]
target = "d"
<strong>输出:</strong> "f"
<strong>输入:</strong>
letters = ["c", "f", "j"]
target = "g"
<strong>输出:</strong> "j"
<strong>输入:</strong>
letters = ["c", "f", "j"]
target = "j"
<strong>输出:</strong> "c"
<strong>输入:</strong>
letters = ["c", "f", "j"]
target = "k"
<strong>输出:</strong> "c"
</pre>
<p><strong>注:</strong></p>
<ol>
<li><code>letters</code>长度范围在<code>[2, 10000]</code>区间内。</li>
<li><code>letters</code> 仅由小写字母组成,最少包含两个不同的字母。</li>
<li>目标字母<code>target</code> 是一个小写字母。</li>
</ol>
"""
class Solution:
def nextGreatestLetter(self, letters, target):
"""
:type letters: List[str]
:type target: str
:rtype: str
"""
| [
"[email protected]"
]
| |
10ba96abd7fbec0f39742d29991a6863ac7d558b | 17c14b758959cdceec0dce8f783346fdeee8e111 | /chap05_nlp/sequence_labeling/eng_model/main.py | 9bc95c095d4eb0780ca8db2ad4280e23fd2c0801 | []
| no_license | yurimkoo/tensormsa_jupyter | b0a340119339936d347d12fbd88fb017599a0029 | 0e75784114ec6dc8ee7eff8094aef9cf37131a5c | refs/heads/master | 2021-07-18T12:22:31.396433 | 2017-10-25T01:42:24 | 2017-10-25T01:42:24 | 109,469,220 | 1 | 0 | null | 2017-11-04T05:20:15 | 2017-11-04T05:20:15 | null | UTF-8 | Python | false | false | 1,871 | py | import os
from eng_model.data_utils import get_trimmed_glove_vectors, load_vocab, \
get_processing_word, CoNLLDataset
from eng_model.general_utils import get_logger
from eng_model.model import NERModel
from eng_model.config import config
try :
# directory for training outputs
if not os.path.exists(config.output_path):
os.makedirs(config.output_path)
# load vocabs
vocab_words = load_vocab(config.words_filename)
vocab_tags = load_vocab(config.tags_filename)
vocab_chars = load_vocab(config.chars_filename)
# get processing functions
processing_word = get_processing_word(vocab_words, vocab_chars,
lowercase=config.lowercase, chars=config.chars)
processing_tag = get_processing_word(vocab_tags, lowercase=False)
# get pre trained embeddings
embeddings = get_trimmed_glove_vectors(config.trimmed_filename)
# create dataset
dev = CoNLLDataset(config.dev_filename, processing_word,
processing_tag, config.max_iter)
test = CoNLLDataset(config.test_filename, processing_word,
processing_tag, config.max_iter)
train = CoNLLDataset(config.train_filename, processing_word,
processing_tag, config.max_iter)
# get logger
logger = get_logger(config.log_path)
# build model
model = NERModel(config, embeddings, ntags=len(vocab_tags),
nchars=len(vocab_chars), logger=logger)
model.build()
# train, evaluate and interact
model.train(train, dev, vocab_tags)
model.evaluate(test, vocab_tags)
model.predict(vocab_tags, processing_word, "Germany 's representative")
model.predict(vocab_tags, processing_word, "Germany")
model.predict(vocab_tags, processing_word, "Hello Germany 's representative")
except Exception as e :
raise Exception (e) | [
"[email protected]"
]
| |
2cbd45af7d26fd7efc079cde6e33ae3cf3e2f982 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_143/ch118_2020_03_29_04_35_07_099201.py | d8962c300590a574cb248be19c49e2d9ef558047 | []
| no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 275 | py | import math
def snell_descartes(n1, n2, o):
y=(n1/n2)
x=math.sin(math.radians(o))
z=x*y
o2= math.asin(z)
o2=math.degrees(o2)
return o2
def reflexao_total_interna (n1, n2, o2):
if (n2*o2)/n1 == o2:
return True
else:
return False | [
"[email protected]"
]
| |
178910e4f15626f235806824e33a9222ee63e9b0 | 308953409e1a3b828ac49b7301c1e751cbf762cf | /suite_EETc 12/tst_Open_Import_Export/test.py | 4453463efcc939e846f44d4a6859e0aa61a262cf | []
| no_license | asthagaur1/danfoss-automation | 4dcc7d8f000917b67e4d6f46ff862a525ddcbc5e | 213a99d3375889cd0e0c801421a50e9fe6085879 | refs/heads/main | 2023-03-31T23:26:56.956107 | 2021-04-01T08:52:37 | 2021-04-01T08:52:37 | 353,627,845 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 408 | py | def main():
excel = r"C:\gitworkspace\KoolProg-TestAutomation\Master_Functions\Test_Automation\SourceCode\suite_EETc 12\shared\testdata\Open_Import_Export.xls";
#Mapping with Global scripts for Function library and key action.
source(findFile("scripts", "Functions.py"))
source(findFile("scripts", "Actions.py"))
#source(findFile("scripts", "object_id.py"))
keyAction(excel)
| [
"[email protected]"
]
| |
14b48bbbf62470ff68ffb9122f28308444f5f2f1 | 25873da962b0acdcf2c46b60695866d29008c11d | /src/programr/clients/events/console/config.py | 16a2c9b254edf08455d0a327b7f522385af6cbbc | []
| no_license | LombeC/program-r | 79f81fa82a617f053ccde1115af3344369b1cfa5 | a7eb6820696a2e5314d29f8d82aaad45a0dc0362 | refs/heads/master | 2022-12-01T14:40:40.208360 | 2020-08-10T21:10:30 | 2020-08-10T21:10:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,186 | py | from programr.config.client.config import ClientConfigurationData
class ConsoleConfiguration(ClientConfigurationData):
def __init__(self):
super().__init__("console")
self._default_userid = "console"
self._prompt = ">>>"
@property
def default_userid(self):
return self._default_userid
@property
def prompt(self):
return self._prompt
def load_configuration(self, configuration_file, bot_root):
console = configuration_file.get_section(self.section_name)
if console is not None:
self._default_userid = configuration_file.get_option(console, "default_userid", missing_value="Console")
self._prompt = configuration_file.get_option(console, "prompt", missing_value=">>>")
super().load_configuration(configuration_file, console, bot_root)
def to_yaml(self, data, defaults=True):
if defaults is True:
data['default_userid'] = "console"
data['prompt'] = ">>>"
else:
data['default_userid'] = self._default_userid
data['prompt'] = self._prompt
super(ConsoleConfiguration, self).to_yaml(data, defaults) | [
"[email protected]"
]
| |
56c90b4716f1cc14341f23413d49aaa8b0682632 | a9e3f3ad54ade49c19973707d2beb49f64490efd | /Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/lms/djangoapps/bulk_email/tests/test_views.py | d2ec21c3ba6ac57f01f91d77bfab7dc4daf89163 | [
"AGPL-3.0-only",
"AGPL-3.0-or-later",
"MIT"
]
| permissive | luque/better-ways-of-thinking-about-software | 8c3dda94e119f0f96edbfe5ba60ca6ec3f5f625d | 5809eaca7079a15ee56b0b7fcfea425337046c97 | refs/heads/master | 2021-11-24T15:10:09.785252 | 2021-11-22T12:14:34 | 2021-11-22T12:14:34 | 163,850,454 | 3 | 1 | MIT | 2021-11-22T12:12:31 | 2019-01-02T14:21:30 | JavaScript | UTF-8 | Python | false | false | 3,247 | py | """
Test the bulk email opt out view.
"""
import ddt
import pytest
from django.http import Http404
from django.test.client import RequestFactory
from django.test.utils import override_settings
from django.urls import reverse
from common.djangoapps.student.tests.factories import UserFactory
from lms.djangoapps.bulk_email.models import Optout
from lms.djangoapps.bulk_email.views import opt_out_email_updates
from lms.djangoapps.discussion.notification_prefs.views import UsernameCipher
from openedx.core.lib.tests import attr
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
@attr(shard=1)
@ddt.ddt
@override_settings(SECRET_KEY="test secret key")
class OptOutEmailUpdatesViewTest(ModuleStoreTestCase):
"""
Check the opt out email functionality.
"""
def setUp(self):
super().setUp()
self.user = UserFactory.create(username="testuser1", email='[email protected]')
self.course = CourseFactory.create(run='testcourse1', display_name='Test Course Title')
self.token = UsernameCipher.encrypt('testuser1')
self.request_factory = RequestFactory()
self.url = reverse('bulk_email_opt_out', args=[self.token, str(self.course.id)])
# Ensure we start with no opt-out records
assert Optout.objects.count() == 0
def test_opt_out_email_confirm(self):
"""
Ensure that the default GET view asks for confirmation.
"""
response = self.client.get(self.url)
self.assertContains(response, "confirm unsubscribe from")
assert Optout.objects.count() == 0
def test_opt_out_email_unsubscribe(self):
"""
Ensure that the POSTing "confirm" creates the opt-out record.
"""
response = self.client.post(self.url, {'unsubscribe': True})
self.assertContains(response, "You have successfully unsubscribed from")
assert Optout.objects.count() == 1
def test_opt_out_email_cancel(self):
"""
Ensure that the POSTing "cancel" does not create the opt-out record
"""
response = self.client.post(self.url)
self.assertContains(response, "You have not been unsubscribed from")
assert Optout.objects.count() == 0
@ddt.data(
("ZOMG INVALID BASE64 CHARS!!!", "base64url", False),
("Non-ASCII\xff".encode(), "base64url", False),
("D6L8Q01ztywqnr3coMOlq0C3DG05686lXX_1ArEd0ok", "base64url", False),
("AAAAAAAAAAA=", "initialization_vector", False),
("nMXVK7PdSlKPOovci-M7iqS09Ux8VoCNDJixLBmj", "aes", False),
("AAAAAAAAAAAAAAAAAAAAAMoazRI7ePLjEWXN1N7keLw=", "padding", False),
("AAAAAAAAAAAAAAAAAAAAACpyUxTGIrUjnpuUsNi7mAY=", "username", False),
("_KHGdCAUIToc4iaRGy7K57mNZiiXxO61qfKT08ExlY8=", "course", 'course-v1:testcourse'),
)
@ddt.unpack
def test_unsubscribe_invalid_token(self, token, message, course):
"""
Make sure that view returns 404 in case token is not valid
"""
request = self.request_factory.get("dummy")
with pytest.raises(Http404) as err:
opt_out_email_updates(request, token, course)
assert message in err
| [
"[email protected]"
]
| |
bd548c6e28569374dce6cece185f426673c7f3d6 | 8d0eec5c051cf902df1ef004b537115b888fe5c6 | /async_dev/generators_two_way.py | 7483829ccf1ffe0d0ef3648065fd504c53c26ea0 | []
| no_license | MadhuV99/complete_py_course | 494300225eef49470a92290f908c1d6f1296cb4f | ade2ac8c5722c45196b700d3ad99f37c9deb76d8 | refs/heads/main | 2023-02-24T06:57:57.441762 | 2021-02-04T03:49:58 | 2021-02-04T03:49:58 | 329,334,980 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 439 | py | from collections import deque
# friends = ['Rolf', 'Jose', 'Charlie', 'Jen', 'Anna']
friends = deque(('Rolf', 'Jose', 'Charlie', 'Jen', 'Anna'))
def get_friend():
yield from friends
def greet(g):
while True:
try:
friend = next(g)
yield f'HELLO {friend}'
except StopIteration:
pass
friends_generator = get_friend()
g = greet(friends_generator)
print(next(g))
print(next(g)) | [
"[email protected]"
]
| |
9b64afa65c9d6ded04f35a8e66d55c8a70318c62 | d488f052805a87b5c4b124ca93494bc9b78620f7 | /google-cloud-sdk/.install/.backup/lib/googlecloudsdk/third_party/apis/serviceuser/v1/serviceuser_v1_client.py | 81860d607ce811e4c113893404142a4427ea51cd | [
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
]
| permissive | PacktPublishing/DevOps-Fundamentals | 5ce1fc938db66b420691aa8106ecfb3f9ceb1ace | 60597e831e08325c7e51e8557591917f7c417275 | refs/heads/master | 2023-02-02T04:48:15.346907 | 2023-01-30T08:33:35 | 2023-01-30T08:33:35 | 131,293,311 | 13 | 19 | null | null | null | null | UTF-8 | Python | false | false | 7,177 | py | """Generated client library for serviceuser version v1."""
# NOTE: This file is autogenerated and should not be edited by hand.
from apitools.base.py import base_api
from googlecloudsdk.third_party.apis.serviceuser.v1 import serviceuser_v1_messages as messages
class ServiceuserV1(base_api.BaseApiClient):
"""Generated client library for service serviceuser version v1."""
MESSAGES_MODULE = messages
BASE_URL = u'https://serviceuser.googleapis.com/'
_PACKAGE = u'serviceuser'
_SCOPES = [u'https://www.googleapis.com/auth/cloud-platform', u'https://www.googleapis.com/auth/cloud-platform.read-only', u'https://www.googleapis.com/auth/service.management']
_VERSION = u'v1'
_CLIENT_ID = '1042881264118.apps.googleusercontent.com'
_CLIENT_SECRET = 'x_Tw5K8nnjoRAqULM9PFAC2b'
_USER_AGENT = 'x_Tw5K8nnjoRAqULM9PFAC2b'
_CLIENT_CLASS_NAME = u'ServiceuserV1'
_URL_VERSION = u'v1'
_API_KEY = None
def __init__(self, url='', credentials=None,
get_credentials=True, http=None, model=None,
log_request=False, log_response=False,
credentials_args=None, default_global_params=None,
additional_http_headers=None, response_encoding=None):
"""Create a new serviceuser handle."""
url = url or self.BASE_URL
super(ServiceuserV1, self).__init__(
url, credentials=credentials,
get_credentials=get_credentials, http=http, model=model,
log_request=log_request, log_response=log_response,
credentials_args=credentials_args,
default_global_params=default_global_params,
additional_http_headers=additional_http_headers,
response_encoding=response_encoding)
self.projects_services = self.ProjectsServicesService(self)
self.projects = self.ProjectsService(self)
self.services = self.ServicesService(self)
class ProjectsServicesService(base_api.BaseApiService):
"""Service class for the projects_services resource."""
_NAME = u'projects_services'
def __init__(self, client):
super(ServiceuserV1.ProjectsServicesService, self).__init__(client)
self._upload_configs = {
}
def Disable(self, request, global_params=None):
"""Disable a service so it can no longer be used with a.
project. This prevents unintended usage that may cause unexpected billing
charges or security leaks.
Operation<response: google.protobuf.Empty>
Args:
request: (ServiceuserProjectsServicesDisableRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Disable')
return self._RunMethod(
config, request, global_params=global_params)
Disable.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'serviceuser.projects.services.disable',
ordered_params=[u'projectsId', u'servicesId'],
path_params=[u'projectsId', u'servicesId'],
query_params=[],
relative_path=u'v1/projects/{projectsId}/services/{servicesId}:disable',
request_field=u'disableServiceRequest',
request_type_name=u'ServiceuserProjectsServicesDisableRequest',
response_type_name=u'Operation',
supports_download=False,
)
def Enable(self, request, global_params=None):
"""Enable a service so it can be used with a project.
See [Cloud Auth Guide](https://cloud.google.com/docs/authentication) for
more information.
Operation<response: google.protobuf.Empty>
Args:
request: (ServiceuserProjectsServicesEnableRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Enable')
return self._RunMethod(
config, request, global_params=global_params)
Enable.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'serviceuser.projects.services.enable',
ordered_params=[u'projectsId', u'servicesId'],
path_params=[u'projectsId', u'servicesId'],
query_params=[],
relative_path=u'v1/projects/{projectsId}/services/{servicesId}:enable',
request_field=u'enableServiceRequest',
request_type_name=u'ServiceuserProjectsServicesEnableRequest',
response_type_name=u'Operation',
supports_download=False,
)
def List(self, request, global_params=None):
"""List enabled services for the specified consumer.
Args:
request: (ServiceuserProjectsServicesListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ListEnabledServicesResponse) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
List.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'serviceuser.projects.services.list',
ordered_params=[u'projectsId'],
path_params=[u'projectsId'],
query_params=[u'pageSize', u'pageToken'],
relative_path=u'v1/projects/{projectsId}/services',
request_field='',
request_type_name=u'ServiceuserProjectsServicesListRequest',
response_type_name=u'ListEnabledServicesResponse',
supports_download=False,
)
class ProjectsService(base_api.BaseApiService):
"""Service class for the projects resource."""
_NAME = u'projects'
def __init__(self, client):
super(ServiceuserV1.ProjectsService, self).__init__(client)
self._upload_configs = {
}
class ServicesService(base_api.BaseApiService):
"""Service class for the services resource."""
_NAME = u'services'
def __init__(self, client):
super(ServiceuserV1.ServicesService, self).__init__(client)
self._upload_configs = {
}
def Search(self, request, global_params=None):
"""Search available services.
When no filter is specified, returns all accessible services. For
authenticated users, also returns all services the calling user has
"servicemanagement.services.bind" permission for.
Args:
request: (ServiceuserServicesSearchRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(SearchServicesResponse) The response message.
"""
config = self.GetMethodConfig('Search')
return self._RunMethod(
config, request, global_params=global_params)
Search.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'serviceuser.services.search',
ordered_params=[],
path_params=[],
query_params=[u'pageSize', u'pageToken'],
relative_path=u'v1/services:search',
request_field='',
request_type_name=u'ServiceuserServicesSearchRequest',
response_type_name=u'SearchServicesResponse',
supports_download=False,
)
| [
"[email protected]"
]
| |
097bcb484e898145895118958d891df3c5377fe3 | 183e4126b2fdb9c4276a504ff3ace42f4fbcdb16 | /I семестр/Програмування (Python)/Лабораторні/Братун 6305/Приклади/34/Ex26.py | 4c5ba37cc50a32792e7f969423731ecf7a45162d | []
| no_license | Computer-engineering-FICT/Computer-engineering-FICT | ab625e2ca421af8bcaff74f0d37ac1f7d363f203 | 80b64b43d2254e15338060aa4a6d946e8bd43424 | refs/heads/master | 2023-08-10T08:02:34.873229 | 2019-06-22T22:06:19 | 2019-06-22T22:06:19 | 193,206,403 | 3 | 0 | null | 2023-07-22T09:01:05 | 2019-06-22T07:41:22 | HTML | UTF-8 | Python | false | false | 296 | py | import re
p1 = re.compile(r"[0-9]+")
print(p1.findall("2012, 2013, 2014, 2015, 2016"))
p2 = re.compile(r"[a-z]+")
print(p2.findall("2012, 2013, 2014, 2015, 2016"))
t = r"[0-9]{3}-[0-9]{2}-[0-9]{2}"
p = re.compile(t)
print(p.findall("322-55-98"))
print(p.findall("322-55-98, 678-56-12"))
| [
"[email protected]"
]
| |
f0ae052c5b0b8463da08c228210c0886e7c2f4a6 | 2fa12cde6a091a1559617e8f825b00f2a5c7f8ba | /src/007.py | 7441d7fc81f6d9ddf193b5423fc9674d9eb1bc6f | []
| no_license | yeasellllllllll/bioinfo-lecture-2021-07 | b9b333183047ddac4436180cd7c679e3cc0e399a | ce695c4535f9d83e5c9b4a1a8a3fb5857d2a984f | refs/heads/main | 2023-06-15T20:31:35.101747 | 2021-07-18T14:31:27 | 2021-07-18T14:31:27 | 382,995,460 | 0 | 0 | null | 2021-07-05T06:06:35 | 2021-07-05T02:45:29 | Python | UTF-8 | Python | false | false | 87 | py |
for i in range(2,9,2):
for j in range(1,10,1):
print(i, "*", j, '=', i*j)
| [
"[email protected]"
]
| |
fc1a2897b55e9c6109a9729b245562e9d13b8022 | 347c70d4851b568e03e83387f77ae81071ab739e | /older/rc-query-rest/tests/test_rest_query.py | 5974c1291876236f288ae59b86951e2be8b4d673 | [
"MIT"
]
| permissive | neetinkandhare/resilient-community-apps | 59d276b5fb7a92872143ce2b94edd680738693ce | 3ecdabe6bf2fc08f0f8e58cbe92553270d8da42f | refs/heads/master | 2021-12-27T09:05:36.563404 | 2021-09-29T13:04:56 | 2021-09-29T13:04:56 | 159,804,866 | 1 | 0 | MIT | 2021-08-03T19:45:45 | 2018-11-30T10:07:32 | Python | UTF-8 | Python | false | false | 2,446 | py | """System Integration Tests for REST Query component"""
from __future__ import print_function
import os.path
import pytest
from circuits.core.handlers import handler
data_dir = os.path.join(os.path.dirname(__file__), "rest_sample_data")
config_data = """[rest]
queue = rest
query_definitions_dir = %s
test_endpoint = http://httpbin.org/post
""" % (data_dir)
@pytest.mark.usefixtures("configure_resilient")
class TestRESTIntegrationTests:
""" System tests for the REST Query component """
# Appliance Configuration Requirements
destinations = ("rest",)
automatic_actions = {"Payload String Test": ("rest", "Incident",
({u"value": u"Payload Is String",
u"field_name": u"incident.name",
u"method": u"equals"},)),
"Payload Dict Test": ("rest", "Incident",
({u"value": u"Payload Is Dict",
u"field_name": u"incident.name",
u"method": u"equals"},))}
payload_testdata = [pytest.param("Payload Is String", "payload_string_test",
id="string_payload"),
pytest.param("Payload Is Dict", "payload_dict_test",
id="dict_payload")]
@pytest.mark.parametrize("inc_name,rule_name", payload_testdata)
def test_payload_string_or_dict(self, inc_name, rule_name, circuits_app, new_incident):
""" http-body is a string to render or a dict"""
# Incident data will be posted to HTTP Bin and then the incident name will be
# changed to the incident ID that was posted.
new_incident["name"] = inc_name
inc = circuits_app.app.action_component.rest_client().post("/incidents", new_incident)
event = circuits_app.watcher.wait(rule_name + "_success", timeout=10, channel='actions.rest')
assert event
pytest.wait_for(event, "complete", True)
event = circuits_app.watcher.wait("QueryEvent", timeout=10, channel='actions.rest')
assert event
pytest.wait_for(event, "complete", True)
updated_inc = circuits_app.app.action_component.rest_client().get("/incidents/%d" % inc["id"])
assert updated_inc["name"] == str(inc["id"])
| [
"[email protected]"
]
| |
58abc4b1b7819ca83c47d829f036934ed54e49e7 | bf7959048edc0005e04431a0864c719adc5ea9ea | /python版本/451-FrequencySort.py | def3b0ce4fd72584a4725058697bf09520d70677 | []
| no_license | Yohager/Leetcode | 7c24f490cfa5fd8e3cdb09e5a2305a134a064a93 | 585af82ff2c2d534053f6886714406019ed0c7d1 | refs/heads/master | 2022-12-07T23:51:16.347174 | 2022-11-28T02:30:53 | 2022-11-28T02:30:53 | 178,201,848 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 223 | py | class Solution:
def frequencySort(self, s: str) -> str:
c = collections.Counter(s)
n = len(c.keys())
ans = ''
for x in c.most_common(n):
ans += x[0] * x[1]
return ans | [
"[email protected]"
]
| |
1d1bce381708be4fc64b894ae43fcf0a22f2e34e | 6ee9a46a95a504cf91eb5031b180f2d6c6cc9d98 | /cut_rod.py | f4f900ef0683dad36b563fa62f8a127caac380dd | []
| no_license | rohitmungre/dynamic_programming | 8dc952f9f83e15a9b6eae8eef0e509da1c2add97 | 1d1f8036f5f6066bdc39436ace8132208466541e | refs/heads/master | 2020-08-01T22:37:25.817167 | 2019-11-20T05:33:11 | 2019-11-20T05:33:11 | 211,140,758 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 689 | py | rod = 7
sz = [1,2,3,4]
vl = [2,5,7,8]
def cut_rod_dp(sz, vl, rod, idx, memo):
if rod<= 0:
return 0
if idx <0:
return 0
tval = 0
varr = []
while rod >= 0:
varr.append(tval+cut_rod_dp(sz, vl, rod, idx-1, memo))
rod = rod - sz[idx]
tval = tval + vl[idx]
return max(varr)
def cut_rod(sz, vl, rod, idx):
if rod<= 0:
return 0
if idx <0:
return 0
tval = 0
varr = []
while rod >= 0:
varr.append(tval+cut_rod(sz, vl, rod, idx-1))
rod = rod - sz[idx]
tval = tval + vl[idx]
return max(varr)
print(cut_rod_dp(sz, vl, rod, 3, {}))
| [
"[email protected]"
]
| |
b83ad2d4e1821a822a0a025c4c8ac3d98b9ceca2 | e87aec694108cb1f76716260daf569bcb8091958 | /fluo/db/backends/postgresql_psycopg2.py | 0dc6fcb482eacb73871660aaf300340fe45c5048 | [
"MIT"
]
| permissive | rsalmaso/django-fluo | a283b8f75769ac6e57fa321c607819899e0c31c8 | 340e3b4f9c1b4b09feccefb9b3ab2d26d59fac2b | refs/heads/master | 2023-01-12T01:37:06.975318 | 2020-12-01T17:13:11 | 2020-12-01T17:13:11 | 48,948,936 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,233 | py | # Copyright (C) 2007-2020, Raffaele Salmaso <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from .postgresql import Backend as Postgresql
__all__ = ["Backend"]
class Backend(Postgresql):
pass
| [
"[email protected]"
]
| |
53db8753d8c4e718450caf4aedd4c34c6bf8bbe6 | 24d8cf871b092b2d60fc85d5320e1bc761a7cbe2 | /BitPim/rev2895-2929/rev2895-2929/playlist.py | 80a59694380a0966cc13538dfbfcc9752a490f64 | []
| no_license | joliebig/featurehouse_fstmerge_examples | af1b963537839d13e834f829cf51f8ad5e6ffe76 | 1a99c1788f0eb9f1e5d8c2ced3892d00cd9449ad | refs/heads/master | 2016-09-05T10:24:50.974902 | 2013-03-28T16:28:47 | 2013-03-28T16:28:47 | 9,080,611 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 13,937 | py | """
Code to handle Playlist items.
The playlist data includes 2 components: the list of available songs, and
the playlist items.
The format of the Playlist items is standardized. It is a list of dict which
has the following standard fields:
name: string=the name of the play list
type: string=the type of this play list. Current supported types are mp3 and wma.
songs: [ 'song name', ... ]
To implement Playlist read/write for a phone module:
1. Add 2 entries into Profile._supportedsyncs:
...
('playlist', 'read', 'OVERWRITE'),
('playlist', 'write', 'OVERWRITE'),
2. Implement the following 2 methods in your Phone class:
def getplaylist(self, result)
def saveplaylist(self, result, merge)
The result dict should have:
results[playlist.masterlist_key]=['song name 1', 'song name 2', ...]
results[playlist.playlist_key=[playlist.PlaylistEntry, playlist.PlaylistEntry, ..]
"""
import wx
import wx.gizmos as gizmos
import database
import helpids
playlist_key='playlist'
masterlist_key='masterlist'
playlists_list='playlists'
mp3_type='mp3'
wma_type='wma'
playlist_type=(mp3_type, wma_type)
class MasterListDataObject (database.basedataobject) :
_knownproperties=[]
_knownlistproperties=database.basedataobject._knownlistproperties.copy()
_knownlistproperties.update({ 'masterlist': ['name'] })
def __init__(self, data=None):
if data is None or not isinstance(data, (list, tuple)):
return
self.update({'masterlist': [{ 'name': x } for x in data] })
_knownlistproperties.update({ 'masterlist': ['name'] })
masterlistobjectfactory=database.dataobjectfactory(MasterListDataObject)
class PlaylistDataObject (database.basedataobject) :
_knownproperties=[]
_knownlistproperties=database.basedataobject._knownlistproperties.copy()
_knownlistproperties.update( { 'playlist': ['name'] })
def __init__(self, data=None):
if data is None or not isinstance(data, (list, tuple)):
return
self.update({'playlist': [{'name': x} for x in data]})
_knownlistproperties.update( { 'playlist': ['name'] })
playlistobjectfactory=database.dataobjectfactory(PlaylistDataObject)
class PlaylistEntryDataObject (database.basedataobject) :
_knownproperties=['type']
_knownlistproperties=database.basedataobject._knownlistproperties.copy()
_knownlistproperties.update({ 'songs': ['name']})
def __init__(self, data=None):
if data is None or not isinstance(data, PlaylistEntry):
return
self.update(data.get_db_dict())
_knownlistproperties.update({ 'songs': ['name']})
playlistentryobjectfactory=database.dataobjectfactory(PlaylistEntryDataObject)
class PlaylistEntry (object) :
def __init__(self):
self._data={ 'serials': [] }
def get(self):
return copy.deepcopy(self._data, {})
def set(self, d):
self._data={}
self._data.update(d)
def get_db_dict(self):
return { 'type': self.pl_type,
'songs': [{ 'name': x } for x in self.songs] }
def set_db_dict(self, d):
self.pl_type=d.get('type', None)
self.songs=[x['name'] for x in d.get('songs', [])]
def _set_or_del(self, key, v, v_list=[]):
if v is None or v in v_list:
if self._data.has_key(key):
del self._data[key]
else:
self._data[key]=v
def _get_name(self):
return self._data.get('name', '')
def _set_name(self, v):
self._set_or_del('name', v, [''])
name=property(fget=_get_name, fset=_set_name)
def _get_type(self):
return self._data.get('type', '')
def _set_type(self, v):
self._set_or_del('type', v, [''])
pl_type=property(fget=_get_type, fset=_set_type)
def _get_songs(self):
return self._data.get('songs', [])
def _set_songs(self, v):
self._set_or_del('songs', v, [[]])
songs=property(fget=_get_songs, fset=_set_songs)
class PlaylistWidget (wx.Panel) :
def __init__(self, mainwindow, parent):
super(PlaylistWidget, self).__init__(parent, -1)
self._mw=mainwindow
self._data=[]
self._master=[]
self.ignoredirty=False
self.dirty=False
vbs=wx.BoxSizer(wx.VERTICAL)
hbs=wx.BoxSizer(wx.HORIZONTAL)
self._item_list=gizmos.EditableListBox(self, -1, 'Play Lists:',
style=gizmos.EL_ALLOW_NEW|\
gizmos.EL_ALLOW_EDIT|\
gizmos.EL_ALLOW_DELETE)
self._item_list.GetUpButton().Show(False)
self._item_list.GetDownButton().Show(False)
self._item_list_w=self._item_list.GetListCtrl()
hbs.Add(self._item_list, 1, wx.EXPAND|wx.ALL, border=5)
hbs.Add(wx.StaticLine(self, -1, style=wx.LI_VERTICAL), 0,
wx.EXPAND|wx.ALL, 5)
hbs1=wx.BoxSizer(wx.HORIZONTAL)
self._pl_list=gizmos.EditableListBox(self, -1, "Play List Content:",
style=gizmos.EL_ALLOW_DELETE)
self._pl_list_w=self._pl_list.GetListCtrl()
hbs1.Add(self._pl_list, 1, wx.EXPAND|wx.ALL, 5)
_add_btn=wx.Button(self, -1, '<-Add')
hbs1.Add(_add_btn, 0, wx.ALL, 5)
self._master_list=gizmos.EditableListBox(self, -1, 'Available Songs:', style=0)
self._master_list_w=self._master_list.GetListCtrl()
self._master_list.GetUpButton().Show(False)
self._master_list.GetDownButton().Show(False)
hbs1.Add(self._master_list, 1, wx.EXPAND|wx.ALL, 5)
hbs.Add(hbs1, 3, wx.EXPAND|wx.ALL, 5)
hbs1=wx.BoxSizer(wx.HORIZONTAL)
self._save_btn=wx.Button(self, wx.NewId(), "Save")
self._revert_btn=wx.Button(self, wx.NewId(), "Revert")
help_btn=wx.Button(self, wx.ID_HELP, "Help")
hbs1.Add(self._save_btn, 0, wx.ALIGN_CENTRE|wx.ALL, 5)
hbs1.Add(help_btn, 0, wx.ALIGN_CENTRE|wx.ALL, 5)
hbs1.Add(self._revert_btn, 0, wx.ALIGN_CENTRE|wx.ALL, 5)
vbs.Add(hbs, 1, wx.EXPAND|wx.ALL, 5)
vbs.Add(wx.StaticLine(self, -1), 0, wx.EXPAND|wx.TOP|wx.BOTTOM, 5)
vbs.Add(hbs1, 0, wx.ALIGN_CENTRE|wx.ALL, 5)
self.SetSizer(vbs)
self.SetAutoLayout(True)
vbs.Fit(self)
wx.EVT_LIST_ITEM_SELECTED(self._item_list, self._item_list_w.GetId(),
self.OnPlaylistSelected)
wx.EVT_LIST_BEGIN_LABEL_EDIT(self._item_list, self._item_list_w.GetId(),
self.OnStartLabelChanged)
wx.EVT_LIST_END_LABEL_EDIT(self._item_list, self._item_list_w.GetId(),
self.OnLabelChanged)
wx.EVT_BUTTON(self, _add_btn.GetId(), self.OnAdd2Playlist)
wx.EVT_BUTTON(self, self._save_btn.GetId(), self.OnSave)
wx.EVT_BUTTON(self, self._revert_btn.GetId(), self.OnRevert)
wx.EVT_LIST_DELETE_ITEM(self._item_list, self._item_list_w.GetId(),
self.OnMakeDirty)
wx.EVT_LIST_DELETE_ITEM(self._pl_list, self._pl_list_w.GetId(),
self.OnMakeDirty)
wx.EVT_BUTTON(self, wx.ID_HELP,
lambda _: wx.GetApp().displayhelpid(helpids.ID_TAB_PLAYLIST))
self._populate()
self.setdirty(False)
def setdirty(self, val):
if self.ignoredirty:
return
self.dirty=val
self._item_list.Enable(not self.dirty)
self._save_btn.Enable(self.dirty)
self._revert_btn.Enable(self.dirty)
def _clear(self, clear_master=True):
self._item_list_w.DeleteAllItems()
self._pl_list_w.DeleteAllItems()
if clear_master:
self._master_list_w.DeleteAllItems()
def _populate_master(self):
self._master_list.SetStrings(self._master)
def _populate_pl_list(self):
self._item_list_w.DeleteAllItems()
if self._data:
self._item_list.SetStrings([e.name for e in self._data])
else:
self._item_list.SetStrings([])
def _name2idx(self, name):
for i,e in enumerate(self._data):
if e.name==name:
return i
def _populate_each(self, name):
self._pl_list_w.DeleteAllItems()
if name is None:
return
self.ignoredirty=True
_list_idx=self._name2idx(name)
if _list_idx is not None:
self._pl_list.SetStrings(self._data[_list_idx].songs)
self.ignoredirty=False
if not self.dirty:
self.setdirty(False)
def _populate(self):
self._populate_master()
self._populate_pl_list()
def populate(self, dict):
self._data=dict.get(playlist_key, [])
self._master=dict.get(masterlist_key, [])
self._clear()
self._populate()
def _save_to_db(self, dict):
db_rr={ masterlist_key: MasterListDataObject(dict.get(masterlist_key, [])) }
database.ensurerecordtype(db_rr, masterlistobjectfactory)
self._mw.database.savemajordict(masterlist_key, db_rr)
_pl_list=dict.get(playlist_key, [])
db_rr={ playlists_list: PlaylistDataObject([x.name for x in _pl_list]) }
database.ensurerecordtype(db_rr, playlistobjectfactory)
self._mw.database.savemajordict(playlists_list, db_rr)
db_rr={ }
for e in _pl_list:
db_rr[e.name]=PlaylistEntryDataObject(e)
database.ensurerecordtype(db_rr, playlistentryobjectfactory)
self._mw.database.savemajordict(playlist_key, db_rr)
def populatefs(self, dict):
self._save_to_db(dict)
return dict
def getfromfs(self, result):
_master_dict=self._mw.database.getmajordictvalues(masterlist_key,
masterlistobjectfactory)
_master_dict=_master_dict.get(masterlist_key, {})
result.update( { masterlist_key: \
[x['name'] for x in _master_dict.get(masterlist_key, [])] })
_pl_list_dict=self._mw.database.getmajordictvalues(playlists_list,
playlistobjectfactory)
_pl_list_dict=_pl_list_dict.get(playlists_list, {})
_pl_entries_dict=self._mw.database.getmajordictvalues(playlist_key,
playlistentryobjectfactory)
_pl_list=[]
for e in _pl_list_dict.get(playlist_key, []):
_pl_entry=_pl_entries_dict.get(e['name'], None)
if _pl_entry:
_entry=PlaylistEntry()
_entry.name=e['name']
_entry.type=_pl_entry['type']
_entry.songs=[x['name'] for x in _pl_entry['songs']]
_pl_list.append(_entry)
result.update({playlist_key: _pl_list })
return result
def OnMakeDirty(self, _=None):
"""A public function you can call that will set the dirty flag"""
if self.dirty or self.ignoredirty:
return
print 'OnMakeDirty'
self.setdirty(True)
def OnPlaylistSelected(self, evt):
self._populate_each(evt.GetLabel())
evt.Skip()
def OnDirty(self, _):
self.setdirty(True)
def _change_playlist_name(self, new_name):
for e in self._data:
if e.name==self._old_name:
e.name=new_name
def _add_playlist_name(self, new_name):
_entry=PlaylistEntry()
_entry.name=new_name
self._data.append(_entry)
def OnStartLabelChanged(self, evt):
self._old_name=evt.GetLabel()
def OnLabelChanged(self, evt):
_new_name=evt.GetLabel()
if _new_name:
self.setdirty(True)
if self._old_name:
self._change_playlist_name(_new_name)
else:
self._add_playlist_name(_new_name)
evt.Skip()
def OnAdd2Playlist(self, _):
_pl_idx=self._item_list_w.GetNextItem(-1, state=wx.LIST_STATE_SELECTED)
_master_idx=self._master_list_w.GetNextItem(-1, state=wx.LIST_STATE_SELECTED)
if _pl_idx==-1 or _master_idx==-1:
return
_entry_idx=self._name2idx(self._item_list_w.GetItemText(_pl_idx))
if _entry_idx is not None:
self.setdirty(True)
self._pl_list.SetStrings(self._pl_list.GetStrings()+\
[self._master_list_w.GetItemText(_master_idx)])
def _build_playlist(self):
_pl_list=[]
for _name in self._item_list.GetStrings():
if _name:
_idx=self._name2idx(_name)
if _idx is not None:
_pl_list.append(self._data[_idx])
return _pl_list
def OnSave(self, _):
_pl_idx=self._item_list_w.GetNextItem(-1, state=wx.LIST_STATE_SELECTED)
if _pl_idx!=-1:
_entry_idx=self._name2idx(self._item_list_w.GetItemText(_pl_idx))
if _entry_idx is not None:
self._data[_entry_idx].songs=self._pl_list.GetStrings()
self._save_to_db({ masterlist_key: self._master_list.GetStrings(),
playlist_key: self._build_playlist() })
self.setdirty(False)
def OnRevert(self, _):
_pl_idx=self._item_list_w.GetNextItem(-1, state=wx.LIST_STATE_SELECTED)
_res={}
self.getfromfs(_res)
self.populate(_res)
if _pl_idx!=-1:
self._item_list_w.SetItemState(_pl_idx, wx.LIST_STATE_SELECTED,
wx.LIST_MASK_STATE)
self.setdirty(False)
def getdata(self, dict):
dict[masterlist_key]=self._master_list.GetStrings()
dict[playlist_key]=self._build_playlist()
return dict
| [
"[email protected]"
]
| |
73bbab25409bb3a778ef3dd83a746c1a3afa4f41 | f576f0ea3725d54bd2551883901b25b863fe6688 | /sdk/dnsresolver/azure-mgmt-dnsresolver/generated_samples/forwarding_rule_patch.py | ec4f075536336909b5c46cae450b85e6328d0b0b | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
]
| permissive | Azure/azure-sdk-for-python | 02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c | c2ca191e736bb06bfbbbc9493e8325763ba990bb | refs/heads/main | 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 | MIT | 2023-09-14T21:48:49 | 2012-04-24T16:46:12 | Python | UTF-8 | Python | false | false | 1,788 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
from azure.mgmt.dnsresolver import DnsResolverManagementClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-dnsresolver
# USAGE
python forwarding_rule_patch.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = DnsResolverManagementClient(
credential=DefaultAzureCredential(),
subscription_id="abdd4249-9f34-4cc6-8e42-c2e32110603e",
)
response = client.forwarding_rules.update(
resource_group_name="sampleResourceGroup",
dns_forwarding_ruleset_name="sampleDnsForwardingRuleset",
forwarding_rule_name="sampleForwardingRule",
parameters={"properties": {"forwardingRuleState": "Disabled", "metadata": {"additionalProp2": "value2"}}},
)
print(response)
# x-ms-original-file: specification/dnsresolver/resource-manager/Microsoft.Network/stable/2022-07-01/examples/ForwardingRule_Patch.json
if __name__ == "__main__":
main()
| [
"[email protected]"
]
| |
348ec2eec4d21d7506aea88f63e6a2a997a674b6 | 131caeecc070839555b95382fe9c6ea77a618dce | /.history/Classiles/scynced_lights_20210615180248.py | a220c037ce3499a5a4636818e84bdd60366e17aa | [
"Unlicense"
]
| permissive | minefarmer/Coding101-OOP | f128e34c95f5362b3d9a53bbac3d862c3f256263 | d5655977559e3bd1acf6a4f185a6121cc3b05ce4 | refs/heads/main | 2023-05-22T18:42:37.769345 | 2021-06-18T00:28:06 | 2021-06-18T00:28:06 | 376,620,545 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 104 | py | """[Scynced Lights]
Class attributes are "shared"
"""
class Light:
pass
a = Light()
b = Ligth()
| [
"[email protected]"
]
| |
6b77d8e8260bf6dcb9f443b9a700a1dfa9e73bc2 | 4678c79ba53884b8a18383d3bf5a312d2408a20a | /adanet/core/estimator.py | b46b61c933221791b9569c7f56d2058a88c14e89 | [
"LicenseRef-scancode-generic-cla",
"Apache-2.0"
]
| permissive | mlzxy/adanet | af902854b8ed79accf3f48121970524bd3283a82 | 5f30fd61457fd6fafea6e4fa9eef178e3de6b9fa | refs/heads/master | 2021-10-10T06:59:04.818230 | 2019-01-07T20:33:24 | 2019-01-07T22:35:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 51,691 | py | """An AdaNet estimator implementation in Tensorflow using a single graph.
Copyright 2018 The AdaNet Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import errno
import os
import time
from adanet.core.candidate import _CandidateBuilder
from adanet.core.ensemble import _EnsembleBuilder
from adanet.core.ensemble import MixtureWeightType
from adanet.core.iteration import _IterationBuilder
from adanet.core.report_accessor import _ReportAccessor
from adanet.core.summary import _ScopedSummary
from adanet.core.timer import _CountDownTimer
import numpy as np
import six
import tensorflow as tf
from tensorflow.python.ops import resources
class _StopAfterTrainingHook(tf.train.SessionRunHook):
"""Hook that requests stop once iteration is over."""
def __init__(self, iteration, after_fn):
"""Initializes a `_StopAfterTrainingHook`.
Args:
iteration: An `_Iteration` instance.
after_fn: A function to call after training stopped.
Returns:
A `_StopAfterTrainingHook` instance.
"""
self._iteration = iteration
self._after_fn = after_fn
def before_run(self, run_context):
"""See `SessionRunHook`."""
del run_context # Unused
return tf.train.SessionRunArgs(self._iteration.is_over_fn())
def after_run(self, run_context, run_values):
"""See `SessionRunHook`."""
is_over = run_values.results
if not is_over:
return
run_context.request_stop()
self._after_fn()
class _EvalMetricSaverHook(tf.train.SessionRunHook):
"""A hook for writing evaluation metrics as summaries to disk."""
def __init__(self, name, eval_metric_ops, output_dir):
"""Initializes a `_EvalMetricSaverHook` instance.
Args:
name: String name of candidate owner of these metrics.
eval_metric_ops: Dict of metric results keyed by name. The values of the
dict are the results of calling a metric function, namely a
`(metric_tensor, update_op)` tuple. `metric_tensor` should be evaluated
without any impact on state (typically is a pure computation based on
variables.). For example, it should not trigger the `update_op` or
require any input fetching.
output_dir: Directory for writing evaluation summaries.
Returns:
An `_EvalMetricSaverHook` instance.
"""
self._name = name
self._eval_metric_ops = eval_metric_ops
self._output_dir = output_dir
def before_run(self, run_context):
"""See `SessionRunHook`."""
del run_context # Unused
return tf.train.SessionRunArgs(self._eval_metric_ops)
def _dict_to_str(self, dictionary):
"""Get a `str` representation of a `dict`.
Args:
dictionary: The `dict` to be represented as `str`.
Returns:
A `str` representing the `dictionary`.
"""
return ", ".join("%s = %s" % (k, v) for k, v in sorted(dictionary.items()))
def end(self, session):
"""See `SessionRunHook`."""
# Forked from tensorflow/python/estimator/estimator.py function called
# _write_dict_to_summary.
eval_dict = {}
for key, metric in self._eval_metric_ops.items():
eval_dict[key] = metric[0]
current_global_step = tf.train.get_global_step()
eval_dict, current_global_step = session.run((eval_dict,
current_global_step))
tf.logging.info("Saving candidate '%s' dict for global step %d: %s",
self._name, current_global_step,
self._dict_to_str(eval_dict))
summary_writer = tf.summary.FileWriterCache.get(self._output_dir)
summary_proto = tf.summary.Summary()
for key in eval_dict:
value = eval_dict[key]
if isinstance(value, (np.float32, float)):
summary_proto.value.add(tag=key, simple_value=float(value))
elif isinstance(value, six.binary_type):
summ = tf.summary.Summary.FromString(value)
for i, _ in enumerate(summ.value):
summ.value[i].tag = "%s/%d" % (key, i)
summary_proto.value.extend(summ.value)
else:
tf.logging.warn(
"Skipping summary for %s, must be a float, np.float32, "
"or a serialized string of Summary.", key)
summary_writer.add_summary(summary_proto, current_global_step)
summary_writer.flush()
class Estimator(tf.estimator.Estimator):
# pyformat: disable
r"""The AdaNet algorithm implemented as a :class:`tf.estimator.Estimator`.
AdaNet is as defined in the paper: https://arxiv.org/abs/1607.01097.
The AdaNet algorithm uses a weak learning algorithm to iteratively generate a
set of candidate subnetworks that attempt to minimize the loss function
defined in Equation (4) as part of an ensemble. At the end of each iteration,
the best candidate is chosen based on its ensemble's complexity-regularized
train loss. New subnetworks are allowed to use any subnetwork weights within
the previous iteration's ensemble in order to improve upon them. If the
complexity-regularized loss of the new ensemble, as defined in Equation (4),
is less than that of the previous iteration's ensemble, the AdaNet algorithm
continues onto the next iteration.
AdaNet attempts to minimize the following loss function to learn the mixture
weights 'w' of each subnetwork 'h' in the ensemble with differentiable
convex non-increasing surrogate loss function Phi:
Equation (4):
.. math::
F(w) = \frac{1}{m} \sum_{i=1}^{m} \Phi \left(\sum_{j=1}^{N}w_jh_j(x_i),
y_i \right) + \sum_{j=1}^{N} \left(\lambda r(h_j) + \beta \right) |w_j|
with :math:`\lambda >= 0` and :math:`\beta >= 0`.
This implementation uses an :class:`adanet.subnetwork.Generator` as its weak
learning algorithm for generating candidate subnetworks. These are trained in
parallel using a single graph per iteration. At the end of each iteration, the
estimator saves the sub-graph of the best subnetwork ensemble and its weights
as a separate checkpoint. At the beginning of the next iteration, the
estimator imports the previous iteration's frozen graph and adds ops for the
next candidates as part of a new graph and session. This allows the estimator
have the performance of Tensorflow's static graph constraint (minus the
performance hit of reconstructing a graph between iterations), while having
the flexibility of having a dynamic graph.
NOTE: Subclassing :class:`tf.estimator.Estimator` is only necessary to work
with :meth:`tf.estimator.train_and_evaluate` which asserts that the estimator
argument is a :class:`tf.estimator.Estimator` subclass. However, all training
is delegated to a separate :class:`tf.estimator.Estimator` instance. It is
responsible for supporting both local and distributed training. As such, the
:class:`adanet.Estimator` is only responsible for bookkeeping across
iterations.
Args:
head: A :class:`tf.contrib.estimator.Head` instance for computing loss and
evaluation metrics for every candidate.
subnetwork_generator: The :class:`adanet.subnetwork.Generator` which defines
the candidate subnetworks to train and evaluate at every AdaNet iteration.
max_iteration_steps: Total number of steps for which to train candidates per
iteration. If :class:`OutOfRange` or :class:`StopIteration` occurs in the
middle, training stops before `max_iteration_steps` steps.
mixture_weight_type: The :class:`adanet.MixtureWeightType` defining which
mixture weight type to learn in the linear combination of subnetwork
outputs:
- :class:`SCALAR`: creates a rank 0 tensor mixture weight . It performs
an element- wise multiplication with its subnetwork's logits. This
mixture weight is the simplest to learn, the quickest to train, and
most likely to generalize well.
- :class:`VECTOR`: creates a tensor with shape [k] where k is the
ensemble's logits dimension as defined by `head`. It is similar to
`SCALAR` in that it performs an element-wise multiplication with its
subnetwork's logits, but is more flexible in learning a subnetworks's
preferences per class.
- :class:`MATRIX`: creates a tensor of shape [a, b] where a is the
number of outputs from the subnetwork's `last_layer` and b is the
number of outputs from the ensemble's `logits`. This weight
matrix-multiplies the subnetwork's `last_layer`. This mixture weight
offers the most flexibility and expressivity, allowing subnetworks to
have outputs of different dimensionalities. However, it also has the
most trainable parameters (a*b), and is therefore the most sensitive
to learning rates and regularization.
mixture_weight_initializer: The initializer for mixture_weights. When
`None`, the default is different according to `mixture_weight_type`:
- :class:`SCALAR`: initializes to 1/N where N is the number of
subnetworks in the ensemble giving a uniform average.
- :class:`VECTOR`: initializes each entry to 1/N where N is the number
of subnetworks in the ensemble giving a uniform average.
- :class:`MATRIX`: uses :meth:`tf.zeros_initializer`.
warm_start_mixture_weights: Whether, at the beginning of an iteration, to
initialize the mixture weights of the subnetworks from the previous
ensemble to their learned value at the previous iteration, as opposed to
retraining them from scratch. Takes precedence over the value for
`mixture_weight_initializer` for subnetworks from previous iterations.
adanet_lambda: Float multiplier 'lambda' for applying L1 regularization to
subnetworks' mixture weights 'w' in the ensemble proportional to their
complexity. See Equation (4) in the AdaNet paper.
adanet_beta: Float L1 regularization multiplier 'beta' to apply equally to
all subnetworks' weights 'w' in the ensemble regardless of their
complexity. See Equation (4) in the AdaNet paper.
evaluator: An :class:`adanet.Evaluator` for candidate selection after all
subnetworks are done training. When `None`, candidate selection uses a
moving average of their :class:`adanet.Ensemble` AdaNet loss during
training instead. In order to use the *AdaNet algorithm* as described in
[Cortes et al., '17], the given :class:`adanet.Evaluator` must be created
with the same dataset partition used during training. Otherwise, this
framework will perform *AdaNet.HoldOut* which uses a holdout set for
candidate selection, but does not benefit from learning guarantees.
report_materializer: An :class:`adanet.ReportMaterializer`. Its reports are
made available to the `subnetwork_generator` at the next iteration, so
that it can adapt its search space. When `None`, the
`subnetwork_generator` :meth:`generate_candidates` method will receive
empty Lists for their `previous_ensemble_reports` and `all_reports`
arguments.
use_bias: Whether to add a bias term to the ensemble's logits. Adding a bias
allows the ensemble to learn a shift in the data, often leading to more
stable training and better predictions.
metric_fn: A function for adding custom evaluation metrics, which should
obey the following signature:
- `Args`:
Can only have the following three arguments in any order:
- `predictions`: Predictions `Tensor` or dict of `Tensor` created by
given `head`.
- `features`: Input `dict` of `Tensor` objects created by `input_fn`
which is given to `estimator.evaluate` as an argument.
- `labels`: Labels `Tensor` or dict of `Tensor` (for multi-head)
created by `input_fn` which is given to `estimator.evaluate` as an
argument.
- `Returns`: Dict of metric results keyed by name. Final metrics are a
union of this and `head's` existing metrics. If there is a name
conflict between this and `head`s existing metrics, this will override
the existing one. The values of the dict are the results of calling a
metric function, namely a `(metric_tensor, update_op)` tuple.
force_grow: Boolean override that forces the ensemble to grow by one
subnetwork at the end of each iteration. Normally at the end of each
iteration, AdaNet selects the best candidate ensemble according to its
performance on the AdaNet objective. In some cases, the best ensemble is
the `previous_ensemble` as opposed to one that includes a newly trained
subnetwork. When `True`, the algorithm will not select the
`previous_ensemble` as the best candidate, and will ensure that after n
iterations the final ensemble is composed of n subnetworks.
replicate_ensemble_in_training: Whether to rebuild the frozen subnetworks of
the ensemble in training mode, which can change the outputs of the frozen
subnetworks in the ensemble. When `False` and during candidate training,
the frozen subnetworks in the ensemble are in prediction mode, so
training-only ops like dropout are not applied to them. When `True` and
training the candidates, the frozen subnetworks will be in training mode
as well, so they will apply training-only ops like dropout. This argument
is useful for regularizing learning mixture weights, or for making
training-only side inputs available in subsequent iterations. For most
use-cases, this should be `False`.
adanet_loss_decay: Float decay for the exponential-moving-average of the
AdaNet objective throughout training. This moving average is a data-
driven way tracking the best candidate with only the training set.
worker_wait_timeout_secs: Float number of seconds for workers to wait for
chief to prepare the next iteration during distributed training. This is
needed to prevent workers waiting indefinitely for a chief that may have
crashed or been turned down. When the timeout is exceeded, the worker
exits the train loop. In situations where the chief job is much slower
than the worker jobs, this timeout should be increased.
model_dir: Directory to save model parameters, graph and etc. This can also
be used to load checkpoints from the directory into a estimator to
continue training a previously saved model.
report_dir: Directory where the `adanet.subnetwork.MaterializedReport`s
materialized by `report_materializer` would be saved. If
`report_materializer` is None, this will not save anything. If `None` or
empty string, defaults to "<model_dir>/report".
config: `RunConfig` object to configure the runtime settings.
**kwargs: Extra keyword args passed to the parent.
Returns:
An `Estimator` instance.
Raises:
ValueError: If `subnetwork_generator` is `None`.
ValueError: If `max_iteration_steps` is <= 0.
"""
# pyformat: enable
class _Keys(object):
CURRENT_ITERATION = "current_iteration"
EVALUATE_ENSEMBLES = "evaluate_ensembles"
MATERIALIZE_REPORT = "materialize_report"
INCREMENT_ITERATION = "increment_iteration"
PREVIOUS_ENSEMBLE_ARCHITECTURE = "previous_ensemble_architecture"
SUBNETWORK_GENERATOR = "subnetwork_generator"
def __init__(self,
head,
subnetwork_generator,
max_iteration_steps,
mixture_weight_type=MixtureWeightType.SCALAR,
mixture_weight_initializer=None,
warm_start_mixture_weights=False,
adanet_lambda=0.,
adanet_beta=0.,
evaluator=None,
report_materializer=None,
use_bias=False,
metric_fn=None,
force_grow=False,
replicate_ensemble_in_training=False,
adanet_loss_decay=.9,
worker_wait_timeout_secs=7200,
model_dir=None,
report_dir=None,
config=None,
**kwargs):
# TODO: Add argument to specify how many frozen graph
# checkpoints to keep.
if subnetwork_generator is None:
raise ValueError("subnetwork_generator can't be None.")
if max_iteration_steps <= 0.:
raise ValueError("max_iteration_steps must be > 0.")
self._subnetwork_generator = subnetwork_generator
self._adanet_loss_decay = adanet_loss_decay
# Overwrite superclass's assert that members are not overwritten in order
# to overwrite public methods. Note that we are doing something that is not
# explicitly supported by the Estimator API and may break in the future.
tf.estimator.Estimator._assert_members_are_not_overridden = staticmethod( # pylint: disable=protected-access
lambda _: None)
self._evaluation_checkpoint_path = None
self._evaluator = evaluator
self._report_materializer = report_materializer
self._force_grow = force_grow
self._worker_wait_timeout_secs = worker_wait_timeout_secs
self._evaluation_name = None
self._inside_adanet_training_loop = False
# This `Estimator` is responsible for bookkeeping across iterations, and
# for training the subnetworks in both a local and distributed setting.
# Subclassing improves future-proofing against new private methods being
# added to `tf.estimator.Estimator` that are expected to be callable by
# external functions, such as in b/110435640.
super(Estimator, self).__init__(
model_fn=self._adanet_model_fn,
params={},
config=config,
model_dir=model_dir,
**kwargs)
# These are defined after base Estimator's init so that they can
# use the same temporary model_dir as the underlying Estimator even if
# model_dir is not provided.
self._ensemble_builder = _EnsembleBuilder(
head=head,
mixture_weight_type=mixture_weight_type,
mixture_weight_initializer=mixture_weight_initializer,
warm_start_mixture_weights=warm_start_mixture_weights,
checkpoint_dir=self._model_dir,
adanet_lambda=adanet_lambda,
adanet_beta=adanet_beta,
use_bias=use_bias,
metric_fn=metric_fn)
candidate_builder = _CandidateBuilder(
max_steps=max_iteration_steps,
adanet_loss_decay=self._adanet_loss_decay)
self._iteration_builder = _IterationBuilder(candidate_builder,
self._ensemble_builder,
replicate_ensemble_in_training)
report_dir = report_dir or os.path.join(self._model_dir, "report")
self._report_accessor = _ReportAccessor(report_dir)
def _latest_checkpoint_iteration_number(self):
"""Returns the iteration number from the latest checkpoint."""
latest_checkpoint = tf.train.latest_checkpoint(self.model_dir)
if latest_checkpoint is None:
return 0
return tf.contrib.framework.load_variable(latest_checkpoint,
self._Keys.CURRENT_ITERATION)
def _latest_checkpoint_architecture(self):
"""Returns the iteration number from the latest checkpoint."""
latest_checkpoint = tf.train.latest_checkpoint(self.model_dir)
if latest_checkpoint is None:
return ""
return tf.contrib.framework.load_variable(
latest_checkpoint, self._Keys.PREVIOUS_ENSEMBLE_ARCHITECTURE)
def _latest_checkpoint_global_step(self):
"""Returns the global step from the latest checkpoint."""
latest_checkpoint = tf.train.latest_checkpoint(self.model_dir)
if latest_checkpoint is None:
return 0
return tf.contrib.framework.load_variable(latest_checkpoint,
tf.GraphKeys.GLOBAL_STEP)
@contextlib.contextmanager
def _train_loop_context(self):
"""Tracks where the context is within the AdaNet train loop."""
self._inside_adanet_training_loop = True
yield
self._inside_adanet_training_loop = False
def train(self,
input_fn,
hooks=None,
steps=None,
max_steps=None,
saving_listeners=None):
if (steps is not None) and (max_steps is not None):
raise ValueError("Can not provide both steps and max_steps.")
if steps is not None and steps <= 0:
raise ValueError("Must specify steps > 0, given: {}".format(steps))
if steps is not None:
max_steps = self._latest_checkpoint_global_step() + steps
# Each iteration of this AdaNet loop represents an `_Iteration`. The
# current iteration number is stored as a variable in the checkpoint so
# that training can be stopped and started at anytime.
with self._train_loop_context():
while True:
current_iteration = self._latest_checkpoint_iteration_number()
tf.logging.info("Beginning training AdaNet iteration %s",
current_iteration)
self._iteration_ended = False
result = super(Estimator, self).train(
input_fn=input_fn,
hooks=hooks,
max_steps=max_steps,
saving_listeners=saving_listeners)
tf.logging.info("Finished training Adanet iteration %s",
current_iteration)
# If training ended because the maximum number of training steps
# occurred, exit training.
if self._latest_checkpoint_global_step() >= max_steps:
return result
# If training ended for any reason other than the iteration ending,
# exit training.
if not self._iteration_ended:
return result
tf.logging.info("Beginning bookkeeping phase for iteration %s",
current_iteration)
# The chief prepares the next AdaNet iteration, and increments the
# iteration number by 1.
if self.config.is_chief:
# As the chief, store the train hooks and make a placeholder input_fn
# in order to use them when preparing the next iteration.
self._train_hooks = hooks or ()
self._prepare_next_iteration(input_fn)
# This inner loop serves mainly for synchronizing the workers with the
# chief during distributed training. Workers that finish training early
# wait for the chief to prepare the next iteration and increment the
# iteration number. Workers that are slow to finish training quickly
# move onto the next iteration. And workers that go offline and return
# online after training ended terminate gracefully.
wait_for_chief = not self.config.is_chief
timer = _CountDownTimer(self._worker_wait_timeout_secs)
while wait_for_chief:
# If the chief hits max_steps, it will stop training itself and not
# increment the iteration number, so this is how the worker knows to
# exit if it wakes up and the chief is gone.
# TODO: Support steps parameter.
if self._latest_checkpoint_global_step() >= max_steps:
return result
# In distributed training, a worker may end training before the chief
# overwrites the checkpoint with the incremented iteration number. If
# that is the case, it should wait for the chief to do so. Otherwise
# the worker will get stuck waiting for its weights to be initialized.
next_iteration = self._latest_checkpoint_iteration_number()
if next_iteration > current_iteration:
break
# Check timeout when waiting for potentially downed chief.
if timer.secs_remaining() == 0:
tf.logging.error(
"Chief job did not prepare next iteration after %s secs. It "
"may have been preempted, been turned down, or crashed. This "
"worker is now exiting training.",
self._worker_wait_timeout_secs)
return result
tf.logging.info("Waiting for chief to finish")
time.sleep(5)
# Stagger starting workers to prevent training instability.
if not self.config.is_chief:
task_id = self.config.task_id or 0
# Wait 5 secs more for each new worker up to 60 secs.
delay_secs = min(60, task_id * 5)
tf.logging.info("Waiting %d secs before starting training.",
delay_secs)
time.sleep(delay_secs)
tf.logging.info("Finished bookkeeping phase for iteration %s",
current_iteration)
def evaluate(self,
input_fn,
steps=None,
hooks=None,
checkpoint_path=None,
name=None):
if not checkpoint_path:
checkpoint_path = tf.train.latest_checkpoint(self.model_dir)
# Ensure that the read to get the iteration number and read to restore
# variable values come from the same checkpoint during evaluation.
self._evaluation_checkpoint_path = checkpoint_path
self._evaluation_name = name
result = super(Estimator, self).evaluate(
input_fn,
steps=steps,
hooks=hooks,
checkpoint_path=checkpoint_path,
name=name)
self._evaluation_checkpoint_path = None
return result
def _call_adanet_model_fn(self, input_fn, mode, params):
"""Calls model_fn with the given mode and parameters."""
with tf.Graph().as_default():
tf.set_random_seed(self.config.tf_random_seed)
# Create global step before calling model_fn as does superclass.
tf.train.get_or_create_global_step()
features, labels = input_fn()
self._adanet_model_fn(features, labels, mode, params)
def _prepare_next_iteration(self, train_input_fn):
"""Prepares the next iteration.
This method calls model_fn up to four times:
1. To evaluate all candidate ensembles to find the best one.
2. To materialize reports and store them to disk (if report_materializer
exists).
3. To overwrite the model directory's checkpoint with the next iteration's
ops.
Args:
train_input_fn: The input_fn used during training.
"""
# First, evaluate and choose the best ensemble for this iteration.
params = self.params.copy()
params[self._Keys.EVALUATE_ENSEMBLES] = True
if self._evaluator:
evaluator_input_fn = self._evaluator.input_fn
else:
evaluator_input_fn = train_input_fn
self._call_adanet_model_fn(evaluator_input_fn, tf.estimator.ModeKeys.EVAL,
params)
# Then materialize and store the subnetwork reports.
if self._report_materializer:
params = self.params.copy()
params[self._Keys.MATERIALIZE_REPORT] = True
self._call_adanet_model_fn(self._report_materializer.input_fn,
tf.estimator.ModeKeys.EVAL, params)
self._best_ensemble_index = None
# Finally, create the graph for the next iteration and overwrite the model
# directory checkpoint with the expanded graph.
params = self.params.copy()
params[self._Keys.INCREMENT_ITERATION] = True
self._call_adanet_model_fn(train_input_fn, tf.estimator.ModeKeys.TRAIN,
params)
def _architecture_filename(self, iteration_number):
"""Returns the filename of the given iteration's frozen graph."""
frozen_checkpoint = os.path.join(self.model_dir, "architecture")
return "{}-{}.txt".format(frozen_checkpoint, iteration_number)
def _overwrite_checkpoint(self, current_iteration, iteration_number_tensor):
"""Overwrites the latest checkpoint with the current graph.
This is necessary for two reasons:
1. To add variables to the checkpoint that were newly created for the
next iteration. Otherwise Estimator will raise an exception for having a
checkpoint missing variables.
2. To increment the current iteration number so that workers know when to
begin training the next iteration.
Args:
current_iteration: Current `_Iteration` object.
iteration_number_tensor: Int variable `Tensor` storing the current
iteration number.
"""
checkpoint_state = tf.train.get_checkpoint_state(self.model_dir)
latest_checkpoint = checkpoint_state.model_checkpoint_path
if not latest_checkpoint:
return
# Run train hook 'begin' methods which can add ops to the graph, so that
# they are still present in the overwritten checkpoint.
train_hooks = tuple(self._train_hooks) or ()
for candidate in current_iteration.candidates:
if not candidate.ensemble_spec.subnetwork_train_op:
assert not candidate.ensemble_spec.ensemble_train_op
continue
train_hooks += candidate.ensemble_spec.subnetwork_train_op.chief_hooks
train_hooks += candidate.ensemble_spec.subnetwork_train_op.hooks
train_hooks += candidate.ensemble_spec.ensemble_train_op.chief_hooks
train_hooks += candidate.ensemble_spec.ensemble_train_op.hooks
for hook in train_hooks:
hook.begin()
global_step_tensor = tf.train.get_global_step()
global_step = tf.contrib.framework.load_variable(latest_checkpoint,
tf.GraphKeys.GLOBAL_STEP)
checkpoint_path = os.path.join(self.model_dir, "increment.ckpt")
with tf.Session(target=self.config.master) as sess:
init = tf.group(
tf.global_variables_initializer(), tf.local_variables_initializer(),
tf.tables_initializer(),
resources.initialize_resources(resources.shared_resources()))
sess.run(init)
coord = tf.train.Coordinator()
tf.train.start_queue_runners(sess=sess, coord=coord)
control_deps = [
tf.assign(global_step_tensor, global_step),
tf.assign(iteration_number_tensor, current_iteration.number),
]
with tf.control_dependencies(control_deps):
saver = tf.train.Saver(
sharded=True, max_to_keep=self.config.keep_checkpoint_max)
saver.recover_last_checkpoints(
checkpoint_state.all_model_checkpoint_paths)
saver.save(sess, checkpoint_path, global_step=current_iteration.number)
for hook in train_hooks:
hook.end(sess)
def _get_best_ensemble_index(self, current_iteration):
"""Returns the best candidate ensemble's index in this iteration.
Evaluates the ensembles using an `Evaluator` when provided. Otherwise,
it returns the index of the best candidate as defined by the `_Iteration`.
Args:
current_iteration: Current `_Iteration`.
Returns:
Index of the best ensemble in the iteration's list of `_Candidates`.
"""
# Skip the evaluation phase when there is only one candidate subnetwork.
if len(current_iteration.candidates) == 1:
tf.logging.info(
"As the only candidate, '%s' is moving onto the next iteration.",
current_iteration.candidates[0].ensemble_spec.name)
return 0
# The zero-th index candidate at iteration t>0 is always the
# previous_ensemble.
if current_iteration.number > 0 and self._force_grow and (len(
current_iteration.candidates) == 2):
tf.logging.info(
"As the only candidate with `force_grow` enabled, '%s' is moving"
"onto the next iteration.",
current_iteration.candidates[1].ensemble_spec.name)
return 1
latest_checkpoint = tf.train.latest_checkpoint(self.model_dir)
tf.logging.info("Starting ensemble evaluation for iteration %s",
current_iteration.number)
with tf.Session() as sess:
init = tf.group(tf.global_variables_initializer(),
tf.local_variables_initializer(), tf.tables_initializer())
sess.run(init)
saver = tf.train.Saver(sharded=True)
saver.restore(sess, latest_checkpoint)
coord = tf.train.Coordinator()
tf.train.start_queue_runners(sess=sess, coord=coord)
if self._evaluator:
adanet_losses = [
c.ensemble_spec.adanet_loss for c in current_iteration.candidates
]
adanet_losses = self._evaluator.evaluate_adanet_losses(
sess, adanet_losses)
else:
adanet_losses = sess.run(
[c.adanet_loss for c in current_iteration.candidates])
values = []
for i in range(len(current_iteration.candidates)):
metric_name = "adanet_loss"
ensemble_name = current_iteration.candidates[i].ensemble_spec.name
values.append("{}/{} = {:.6f}".format(metric_name, ensemble_name,
adanet_losses[i]))
tf.logging.info("Computed ensemble metrics: %s", ", ".join(values))
if self._force_grow and current_iteration.number > 0:
tf.logging.info(
"The `force_grow` override is enabled, so the "
"the performance of the previous ensemble will be ignored.")
# NOTE: The zero-th index candidate at iteration t>0 is always the
# previous_ensemble.
adanet_losses = adanet_losses[1:]
index = np.argmin(adanet_losses) + 1
else:
index = np.argmin(adanet_losses)
tf.logging.info("Finished ensemble evaluation for iteration %s",
current_iteration.number)
tf.logging.info("'%s' at index %s is moving onto the next iteration",
current_iteration.candidates[index].ensemble_spec.name,
index)
return index
def _materialize_report(self, current_iteration):
"""Generates reports as defined by `Builder`s.
Materializes the Tensors and metrics defined in the `Builder`s'
`build_subnetwork_report` method using `ReportMaterializer`, and stores
them to disk using `_ReportAccessor`.
Args:
current_iteration: Current `_Iteration`.
"""
latest_checkpoint = tf.train.latest_checkpoint(self.model_dir)
tf.logging.info("Starting metric logging for iteration %s",
current_iteration.number)
assert self._best_ensemble_index is not None
best_candidate = current_iteration.candidates[self._best_ensemble_index]
best_ensemble = best_candidate.ensemble_spec.ensemble
best_name = best_ensemble.weighted_subnetworks[-1].name
included_subnetwork_names = [best_name]
with tf.Session() as sess:
init = tf.group(tf.global_variables_initializer(),
tf.local_variables_initializer(), tf.tables_initializer())
sess.run(init)
saver = tf.train.Saver(sharded=True)
saver.restore(sess, latest_checkpoint)
coord = tf.train.Coordinator()
tf.train.start_queue_runners(sess=sess, coord=coord)
materialized_reports = (
self._report_materializer.materialize_subnetwork_reports(
sess, current_iteration.number,
current_iteration.subnetwork_reports, included_subnetwork_names))
self._report_accessor.write_iteration_report(current_iteration.number,
materialized_reports)
tf.logging.info("Finished saving subnetwork reports for iteration %s",
current_iteration.number)
def _training_hooks(self, current_iteration, training):
"""Returns training hooks for this iteration.
Args:
current_iteration: Current `_Iteration`.
training: Whether in training mode.
Returns:
A list of `tf.train.SessionRunHook` instances.
"""
if not training:
return []
def after_fn():
self._iteration_ended = True
training_hooks = list(current_iteration.estimator_spec.training_hooks) + [
_StopAfterTrainingHook(current_iteration, after_fn=after_fn)
]
for summary in current_iteration.summaries:
output_dir = self.model_dir
if summary.scope:
output_dir = os.path.join(output_dir, "candidate", summary.scope)
summary_saver_hook = tf.train.SummarySaverHook(
save_steps=self.config.save_summary_steps,
output_dir=output_dir,
summary_op=summary.merge_all())
training_hooks.append(summary_saver_hook)
return training_hooks
def _evaluation_hooks(self, current_iteration, training):
"""Returns evaluation hooks for this iteration.
Args:
current_iteration: Current `_Iteration`.
training: Whether in training mode.
Returns:
A list of `tf.train.SessionRunHook` instances.
"""
if training:
return []
evaluation_hooks = []
for candidate in current_iteration.candidates:
eval_subdir = "eval"
if self._evaluation_name:
eval_subdir = "eval_{}".format(self._evaluation_name)
eval_metric_hook = _EvalMetricSaverHook(
name=candidate.ensemble_spec.name,
eval_metric_ops=candidate.ensemble_spec.eval_metric_ops,
output_dir=os.path.join(self.model_dir, "candidate",
candidate.ensemble_spec.name, eval_subdir))
evaluation_hooks.append(eval_metric_hook)
return evaluation_hooks
def _save_architecture(self, filename, ensemble):
"""Persists the ensemble's architecture in a serialized format.
Writes to a text file with one subnetwork's iteration number and name
per line.
Args:
filename: String filename to persist the ensemble architecture.
ensemble: Target `adanet.Ensemble` instance.
"""
architecture = [
"{}:{}".format(w.iteration_number, w.name)
for w in ensemble.weighted_subnetworks
]
# Make directories since model_dir may not have been created yet.
tf.gfile.MakeDirs(os.path.dirname(filename))
with tf.gfile.GFile(filename, "w") as record_file:
record_file.write(os.linesep.join(architecture))
def _read_architecture(self, filename):
"""Reads an ensemble architecture from disk.
Assumes the file was written with `_save_architecture`.
Args:
filename: String filename where features were recorded.
Returns:
A list of <iteration_number>:<subnetwork name> strings.
Raises:
OSError: When file not found at `filename`.
"""
if not tf.gfile.Exists(filename):
raise OSError(errno.ENOENT, os.strerror(errno.ENOENT), filename)
architecture = []
with tf.gfile.GFile(filename, "r") as record_file:
for line in record_file:
feature_name = line.rstrip()
architecture.append(feature_name)
return architecture
# TODO: Refactor architecture building logic to its own module.
def _architecture_ensemble_spec(self, architecture, features, mode, labels):
"""Returns an `_EnsembleSpec` with the given architecture.
Creates the ensemble architecture by calling `generate_subnetworks` on
`self._subnetwork_generator` and only calling `build_subnetwork` on
`Builders` included in the architecture. Once their ops are created, their
variables are restored from the checkpoint.
Args:
architecture: A list of <iteration_number>:<subnetwork name> strings.
features: Dictionary of `Tensor` objects keyed by feature name.
mode: Defines whether this is training, evaluation or prediction. See
`ModeKeys`.
labels: Labels `Tensor` or a dictionary of string label name to `Tensor`
(for multi-head). Can be `None`.
Returns:
An `EnsembleSpec` instance for the given architecture.
Raises:
ValueError: If a subnetwork from `architecture` is not found in the
generated candidate `Builders` of the specified iteration.
"""
previous_ensemble_spec = None
previous_ensemble = None
for serialized_subnetwork in architecture:
serialized_iteration_number, name = serialized_subnetwork.split(":")
rebuild_iteration_number = int(serialized_iteration_number)
previous_ensemble_reports, all_reports = [], []
if self._report_materializer:
previous_ensemble_reports, all_reports = (
self._collate_subnetwork_reports(rebuild_iteration_number))
generated_subnetwork_builders = (
self._subnetwork_generator.generate_candidates(
previous_ensemble=previous_ensemble,
iteration_number=rebuild_iteration_number,
previous_ensemble_reports=previous_ensemble_reports,
all_reports=all_reports))
rebuild_subnetwork_builder = None
for builder in generated_subnetwork_builders:
if builder.name == name:
rebuild_subnetwork_builder = builder
break
if rebuild_subnetwork_builder is None:
raise ValueError("Required subnetwork name is missing from "
"generated candidates: {}".format(name))
previous_ensemble_summary = None
if previous_ensemble_spec:
# Always skip summaries when rebuilding previous architecture,
# since they are not useful.
previous_ensemble_summary = _ScopedSummary(
previous_ensemble_spec.name, skip_summary=True)
current_iteration = self._iteration_builder.build_iteration(
iteration_number=rebuild_iteration_number,
subnetwork_builders=[rebuild_subnetwork_builder],
features=features,
labels=labels,
mode=mode,
previous_ensemble_summary=previous_ensemble_summary,
previous_ensemble_spec=previous_ensemble_spec,
rebuilding=True)
previous_ensemble_spec = current_iteration.candidates[-1].ensemble_spec
previous_ensemble = previous_ensemble_spec.ensemble
return previous_ensemble_spec
def _collate_subnetwork_reports(self, iteration_number):
"""Prepares subnetwork.Reports to be passed to Generator.
Reads subnetwork.MaterializedReports from past iterations,
collates those that were included in previous_ensemble into
previous_ensemble_reports as a List of subnetwork.MaterializedReports,
and collates all reports from previous iterations into all_reports as
another List of subnetwork.MaterializedReports.
Args:
iteration_number: Python integer AdaNet iteration number, starting from 0.
Returns:
(previous_ensemble_reports: List<subnetwork.MaterializedReport>,
materialized_reports: List<MaterializedReport>)
"""
materialized_reports_all = (self._report_accessor.read_iteration_reports())
previous_ensemble_reports = []
all_reports = []
# Since the number of iteration reports changes after the
# MATERIALIZE_REPORT phase, we need to make sure that we always pass the
# same reports to the Generator in the same iteration,
# otherwise the graph that is built in the FREEZE_ENSEMBLE phase would be
# different from the graph built in the training phase.
# Iteration 0 should have 0 iteration reports passed to the
# Generator, since there are no previous iterations.
# Iteration 1 should have 1 list of reports for Builders
# generated in iteration 0.
# Iteration 2 should have 2 lists of reports -- one for iteration 0,
# one for iteration 1. Note that the list of reports for iteration >= 1
# should contain "previous_ensemble", in addition to the
# Builders at the start of that iteration.
# Iteration t should have t lists of reports.
for i, iteration_reports in enumerate(materialized_reports_all):
# This ensures that the FREEZE_ENSEMBLE phase does not pass the reports
# generated in the previous phase of the same iteration to the
# Generator when building the graph.
if i >= iteration_number:
break
# Assumes that only one subnetwork is added to the ensemble in
# each iteration.
chosen_subnetwork_in_this_iteration = [
subnetwork_report for subnetwork_report in iteration_reports
if subnetwork_report.included_in_final_ensemble
][0]
previous_ensemble_reports.append(chosen_subnetwork_in_this_iteration)
all_reports.extend(iteration_reports)
return previous_ensemble_reports, all_reports
def _adanet_model_fn(self, features, labels, mode, params):
"""AdaNet model_fn.
This model_fn is called at least three times per iteration:
1. The first call generates, builds, and trains the candidate subnetworks
to ensemble in this iteration.
2. Once training is over, bookkeeping begins. The next call is to evaluate
the best candidate ensembles according to the AdaNet objective.
2.b. Optionally, when a report materializer is provided, another call
creates the graph for producing subnetwork reports for the next iteration
and other AdaNet runs.
3. The final call is responsible for rebuilding the ensemble architecture
from t-1 by regenerating the best builders and warm-starting their weights,
adding ops and initialing the weights for the next candidate subnetworks,
and overwriting the latest checkpoint with its graph and variables, so that
first call of the next iteration has the right variables in the checkpoint.
Args:
features: Dictionary of `Tensor` objects keyed by feature name.
labels: Labels `Tensor` or a dictionary of string label name to `Tensor`
(for multi-head). Can be `None`.
mode: Defines whether this is training, evaluation or prediction. See
`ModeKeys`.
params: A dict of parameters.
Returns:
A `EstimatorSpec` instance.
Raises:
UserWarning: When calling model_fn directly in TRAIN mode.
"""
training = mode == tf.estimator.ModeKeys.TRAIN
if training and not self._inside_adanet_training_loop:
raise UserWarning(
"The adanet.Estimator's model_fn should not be called directly in "
"TRAIN mode, because its behavior is undefined outside the context "
"of its `train` method. If you are trying to add custom metrics "
"with `tf.contrib.estimator.add_metrics`, pass the `metric_fn` to "
"this `Estimator's` constructor instead.")
iteration_number = self._latest_checkpoint_iteration_number()
# Use the evaluation checkpoint path to get both the iteration number and
# variable values to avoid any race conditions between the first and second
# checkpoint reads.
if mode == tf.estimator.ModeKeys.EVAL and self._evaluation_checkpoint_path:
iteration_number = tf.contrib.framework.load_variable(
self._evaluation_checkpoint_path, self._Keys.CURRENT_ITERATION)
if self._Keys.INCREMENT_ITERATION in params:
iteration_number += 1
architecture_filename = self._architecture_filename(iteration_number - 1)
architecture = []
if tf.gfile.Exists(architecture_filename):
architecture = self._read_architecture(architecture_filename)
tf.logging.info(
"Importing architecture from %s: [%s].", architecture_filename,
", ".join(sorted(["'{}'".format(f) for f in architecture])))
skip_summaries = mode == tf.estimator.ModeKeys.PREDICT
with tf.variable_scope("adanet"):
previous_ensemble_spec = None
previous_ensemble = None
previous_ensemble_summary = None
if architecture:
previous_ensemble_spec = self._architecture_ensemble_spec(
architecture, features, mode, labels)
previous_ensemble = previous_ensemble_spec.ensemble
previous_ensemble_summary = _ScopedSummary(
previous_ensemble_spec.name, skip_summary=skip_summaries)
if self._Keys.INCREMENT_ITERATION in params:
latest_checkpoint = tf.train.latest_checkpoint(self.model_dir)
tf.train.warm_start(latest_checkpoint, vars_to_warm_start=[".*"])
previous_ensemble_reports, all_reports = [], []
if self._report_materializer:
previous_ensemble_reports, all_reports = (
self._collate_subnetwork_reports(iteration_number))
subnetwork_builders = self._subnetwork_generator.generate_candidates(
previous_ensemble=previous_ensemble,
iteration_number=iteration_number,
previous_ensemble_reports=previous_ensemble_reports,
all_reports=all_reports)
current_iteration = self._iteration_builder.build_iteration(
iteration_number=iteration_number,
subnetwork_builders=subnetwork_builders,
features=features,
labels=labels,
mode=mode,
previous_ensemble_summary=previous_ensemble_summary,
previous_ensemble_spec=previous_ensemble_spec)
# Variable which allows us to read the current iteration from a checkpoint.
iteration_number_tensor = tf.get_variable(
self._Keys.CURRENT_ITERATION,
shape=[],
dtype=tf.int64,
initializer=tf.zeros_initializer(),
trainable=False)
adanet_summary = _ScopedSummary("global", skip_summaries)
adanet_summary.scalar("iteration/adanet/iteration", iteration_number_tensor)
adanet_summary.scalar("iteration_step/adanet/iteration_step",
current_iteration.step)
if current_iteration.estimator_spec.loss is not None:
adanet_summary.scalar("loss", current_iteration.estimator_spec.loss)
adanet_summary.scalar("loss/adanet/adanet_weighted_ensemble",
current_iteration.estimator_spec.loss)
iteration_estimator_spec = current_iteration.estimator_spec
estimator_spec = tf.estimator.EstimatorSpec(
mode=mode,
predictions=iteration_estimator_spec.predictions,
loss=iteration_estimator_spec.loss,
train_op=iteration_estimator_spec.train_op,
eval_metric_ops=iteration_estimator_spec.eval_metric_ops,
training_chief_hooks=iteration_estimator_spec.training_chief_hooks,
training_hooks=self._training_hooks(current_iteration, training),
evaluation_hooks=self._evaluation_hooks(current_iteration, training),
scaffold=tf.train.Scaffold(summary_op=adanet_summary.merge_all()),
export_outputs=iteration_estimator_spec.export_outputs)
if self._Keys.EVALUATE_ENSEMBLES in params:
assert self.config.is_chief
self._best_ensemble_index = self._get_best_ensemble_index(
current_iteration)
ensemble = current_iteration.candidates[
self._best_ensemble_index].ensemble_spec.ensemble
new_architecture_filename = self._architecture_filename(iteration_number)
self._save_architecture(new_architecture_filename, ensemble)
elif self._Keys.MATERIALIZE_REPORT in params:
assert self.config.is_chief
assert self._best_ensemble_index is not None
self._materialize_report(current_iteration)
elif self._Keys.INCREMENT_ITERATION in params:
assert self.config.is_chief
latest_checkpoint = tf.train.latest_checkpoint(self.model_dir)
tf.logging.info(
"Overwriting checkpoint with new graph for iteration %s to %s",
iteration_number, latest_checkpoint)
self._overwrite_checkpoint(current_iteration, iteration_number_tensor)
return estimator_spec
| [
"[email protected]"
]
| |
706c4a133f112d01c765c80eac0083d6d5e90652 | 1c6283303ceb883add8de4ee07c5ffcfc2e93fab | /Jinja2/lib/python3.7/site-packages/uhd_restpy/testplatform/sessions/ixnetwork/topology/rxsakpool_22340fe5cb5d81664cab595d3e6d08ef.py | 8aea7fbb4b72c3d049aa51d15c50a9fa0db81919 | []
| no_license | pdobrinskiy/devcore | 0f5b3dfc2f3bf1e44abd716f008a01c443e14f18 | 580c7df6f5db8c118990cf01bc2b986285b9718b | refs/heads/main | 2023-07-29T20:28:49.035475 | 2021-09-14T10:02:16 | 2021-09-14T10:02:16 | 405,919,390 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,134 | py | # MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from uhd_restpy.base import Base
from uhd_restpy.files import Files
from typing import List, Any, Union
class RxSakPool(Base):
"""Rx Channels configuration.
The RxSakPool class encapsulates a required rxSakPool resource which will be retrieved from the server every time the property is accessed.
"""
__slots__ = ()
_SDM_NAME = 'rxSakPool'
_SDM_ATT_MAP = {
'Count': 'count',
'DescriptiveName': 'descriptiveName',
'Name': 'name',
'RxSak128': 'rxSak128',
'RxSak256': 'rxSak256',
'RxSalt': 'rxSalt',
'RxSsci': 'rxSsci',
}
_SDM_ENUM_MAP = {
}
def __init__(self, parent, list_op=False):
super(RxSakPool, self).__init__(parent, list_op)
@property
def Count(self):
# type: () -> int
"""
Returns
-------
- number: Number of elements inside associated multiplier-scaled container object, e.g. number of devices inside a Device Group.
"""
return self._get_attribute(self._SDM_ATT_MAP['Count'])
@property
def DescriptiveName(self):
# type: () -> str
"""
Returns
-------
- str: Longer, more descriptive name for element. It's not guaranteed to be unique like -name-, but may offer more context.
"""
return self._get_attribute(self._SDM_ATT_MAP['DescriptiveName'])
@property
def Name(self):
# type: () -> str
"""
Returns
-------
- str: Name of NGPF element, guaranteed to be unique in Scenario
"""
return self._get_attribute(self._SDM_ATT_MAP['Name'])
@Name.setter
def Name(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['Name'], value)
@property
def RxSak128(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): 128 bit value of Secure Association Key with which DUT is expected to encrypt MACsec packets.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['RxSak128']))
@property
def RxSak256(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): 256 bit value of Secure Association Key with which DUT is expected to encrypt MACsec packets.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['RxSak256']))
@property
def RxSalt(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): 12 bytes Salt value for XPN cipher suites.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['RxSalt']))
@property
def RxSsci(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): 4 bytes Short SCI for XPN cipher suites.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['RxSsci']))
def update(self, Name=None):
# type: (str) -> RxSakPool
"""Updates rxSakPool resource on the server.
This method has some named parameters with a type: obj (Multivalue).
The Multivalue class has documentation that details the possible values for those named parameters.
Args
----
- Name (str): Name of NGPF element, guaranteed to be unique in Scenario
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
def get_device_ids(self, PortNames=None, RxSak128=None, RxSak256=None, RxSalt=None, RxSsci=None):
"""Base class infrastructure that gets a list of rxSakPool device ids encapsulated by this object.
Use the optional regex parameters in the method to refine the list of device ids encapsulated by this object.
Args
----
- PortNames (str): optional regex of port names
- RxSak128 (str): optional regex of rxSak128
- RxSak256 (str): optional regex of rxSak256
- RxSalt (str): optional regex of rxSalt
- RxSsci (str): optional regex of rxSsci
Returns
-------
- list(int): A list of device ids that meets the regex criteria provided in the method parameters
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._get_ngpf_device_ids(locals())
| [
"[email protected]"
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.