blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b55b8315eaf1069c0f704f305fdcdc0cfafaf87d
|
57ea6657b4deb620c4e29b606a5ec259d22fadcd
|
/Chatbot_Web/impl/weixin/WXBizMsgCrypt.py
|
77d319c0cda95ff7b3c39c660dfa35098682ca0d
|
[
"Apache-2.0"
] |
permissive
|
orchestor/Chatbot_CN
|
021d05849257d66e8e2a65d4ead5a777e09d7d3d
|
43922d7f73946d00faad3f27d86188ec18022965
|
refs/heads/master
| 2020-05-09T12:48:48.124981 | 2019-04-09T13:54:24 | 2019-04-09T13:54:24 | 181,124,145 | 1 | 0 |
Apache-2.0
| 2019-04-13T05:11:09 | 2019-04-13T05:11:06 | null |
UTF-8
|
Python
| false | false | 9,372 |
py
|
#!/usr/bin/env python
#-*- encoding:utf-8 -*-
""" 对公众平台发送给公众账号的消息加解密示例代码.
@copyright: Copyright (c) 1998-2014 Tencent Inc.
"""
# ------------------------------------------------------------------------
import base64
import string
import random
import hashlib
import time
import struct
from crypto.Cipher import AES
import xml.etree.cElementTree as ET
import sys
import socket
import ierror
"""
关于Crypto.Cipher模块,ImportError: No module named 'Crypto'解决方案
请到官方网站 https://www.dlitz.net/software/pycrypto/ 下载pycrypto。
下载后,按照README中的“Installation”小节的提示进行pycrypto安装。
"""
class FormatException(Exception):
pass
def throw_exception(message, exception_class=FormatException):
"""my define raise exception function"""
raise exception_class(message)
class SHA1:
"""计算公众平台的消息签名接口"""
def getSHA1(self, token, timestamp, nonce, encrypt):
"""用SHA1算法生成安全签名
@param token: 票据
@param timestamp: 时间戳
@param encrypt: 密文
@param nonce: 随机字符串
@return: 安全签名
"""
try:
sortlist = [token, timestamp, nonce, encrypt]
sortlist.sort()
sha = hashlib.sha1()
sha.update("".join(sortlist))
return ierror.WXBizMsgCrypt_OK, sha.hexdigest()
except Exception as e:
#print e
return ierror.WXBizMsgCrypt_ComputeSignature_Error, None
class XMLParse:
"""提供提取消息格式中的密文及生成回复消息格式的接口"""
# xml消息模板
AES_TEXT_RESPONSE_TEMPLATE = """<xml>
<Encrypt><![CDATA[%(msg_encrypt)s]]></Encrypt>
<MsgSignature><![CDATA[%(msg_signaturet)s]]></MsgSignature>
<TimeStamp>%(timestamp)s</TimeStamp>
<Nonce><![CDATA[%(nonce)s]]></Nonce>
</xml>"""
def extract(self, xmltext):
"""提取出xml数据包中的加密消息
@param xmltext: 待提取的xml字符串
@return: 提取出的加密消息字符串
"""
try:
xml_tree = ET.fromstring(xmltext)
encrypt = xml_tree.find("Encrypt")
touser_name = xml_tree.find("ToUserName")
return ierror.WXBizMsgCrypt_OK, encrypt.text, touser_name.text
except Exception as e:
#print e
return ierror.WXBizMsgCrypt_ParseXml_Error,None,None
def generate(self, encrypt, signature, timestamp, nonce):
"""生成xml消息
@param encrypt: 加密后的消息密文
@param signature: 安全签名
@param timestamp: 时间戳
@param nonce: 随机字符串
@return: 生成的xml字符串
"""
resp_dict = {
'msg_encrypt' : encrypt,
'msg_signaturet': signature,
'timestamp' : timestamp,
'nonce' : nonce,
}
resp_xml = self.AES_TEXT_RESPONSE_TEMPLATE % resp_dict
return resp_xml
class PKCS7Encoder():
"""提供基于PKCS7算法的加解密接口"""
block_size = 32
def encode(self, text):
""" 对需要加密的明文进行填充补位
@param text: 需要进行填充补位操作的明文
@return: 补齐明文字符串
"""
text_length = len(text)
# 计算需要填充的位数
amount_to_pad = self.block_size - (text_length % self.block_size)
if amount_to_pad == 0:
amount_to_pad = self.block_size
# 获得补位所用的字符
pad = chr(amount_to_pad)
return text + pad * amount_to_pad
def decode(self, decrypted):
"""删除解密后明文的补位字符
@param decrypted: 解密后的明文
@return: 删除补位字符后的明文
"""
pad = ord(decrypted[-1])
if pad<1 or pad >32:
pad = 0
return decrypted[:-pad]
class Prpcrypt(object):
"""提供接收和推送给公众平台消息的加解密接口"""
def __init__(self,key):
#self.key = base64.b64decode(key+"=")
self.key = key
# 设置加解密模式为AES的CBC模式
self.mode = AES.MODE_CBC
def encrypt(self,text,appid):
"""对明文进行加密
@param text: 需要加密的明文
@return: 加密得到的字符串
"""
# 16位随机字符串添加到明文开头
text = self.get_random_str() + struct.pack("I",socket.htonl(len(text))) + text + appid
# 使用自定义的填充方式对明文进行补位填充
pkcs7 = PKCS7Encoder()
text = pkcs7.encode(text)
# 加密
cryptor = AES.new(self.key,self.mode,self.key[:16])
try:
ciphertext = cryptor.encrypt(text)
# 使用BASE64对加密后的字符串进行编码
return ierror.WXBizMsgCrypt_OK, base64.b64encode(ciphertext)
except Exception as e:
#print e
return ierror.WXBizMsgCrypt_EncryptAES_Error,None
def decrypt(self,text,appid):
"""对解密后的明文进行补位删除
@param text: 密文
@return: 删除填充补位后的明文
"""
try:
cryptor = AES.new(self.key,self.mode,self.key[:16])
# 使用BASE64对密文进行解码,然后AES-CBC解密
plain_text = cryptor.decrypt(base64.b64decode(text))
except Exception as e:
#print e
return ierror.WXBizMsgCrypt_DecryptAES_Error,None
try:
pad = ord(plain_text[-1])
# 去掉补位字符串
#pkcs7 = PKCS7Encoder()
#plain_text = pkcs7.encode(plain_text)
# 去除16位随机字符串
content = plain_text[16:-pad]
xml_len = socket.ntohl(struct.unpack("I",content[ : 4])[0])
xml_content = content[4 : xml_len+4]
from_appid = content[xml_len+4:]
except Exception as e:
#print e
return ierror.WXBizMsgCrypt_IllegalBuffer,None
if from_appid != appid:
return ierror.WXBizMsgCrypt_ValidateAppid_Error,None
return 0,xml_content
def get_random_str(self):
""" 随机生成16位字符串
@return: 16位字符串
"""
rule = string.letters + string.digits
str = random.sample(rule, 16)
return "".join(str)
class WXBizMsgCrypt(object):
#构造函数
#@param sToken: 公众平台上,开发者设置的Token
# @param sEncodingAESKey: 公众平台上,开发者设置的EncodingAESKey
# @param sAppId: 企业号的AppId
def __init__(self,sToken,sEncodingAESKey,sAppId):
try:
self.key = base64.b64decode(sEncodingAESKey+"=")
assert len(self.key) == 32
except:
throw_exception("[error]: EncodingAESKey unvalid !", FormatException)
#return ierror.WXBizMsgCrypt_IllegalAesKey)
self.token = sToken
self.appid = sAppId
def EncryptMsg(self, sReplyMsg, sNonce, timestamp = None):
#将公众号回复用户的消息加密打包
#@param sReplyMsg: 企业号待回复用户的消息,xml格式的字符串
#@param sTimeStamp: 时间戳,可以自己生成,也可以用URL参数的timestamp,如为None则自动用当前时间
#@param sNonce: 随机串,可以自己生成,也可以用URL参数的nonce
#sEncryptMsg: 加密后的可以直接回复用户的密文,包括msg_signature, timestamp, nonce, encrypt的xml格式的字符串,
#return:成功0,sEncryptMsg,失败返回对应的错误码None
pc = Prpcrypt(self.key)
ret,encrypt = pc.encrypt(sReplyMsg, self.appid)
if ret != 0:
return ret,None
if timestamp is None:
timestamp = str(int(time.time()))
# 生成安全签名
sha1 = SHA1()
ret,signature = sha1.getSHA1(self.token, timestamp, sNonce, encrypt)
if ret != 0:
return ret,None
xmlParse = XMLParse()
return ret,xmlParse.generate(encrypt, signature, timestamp, sNonce)
def DecryptMsg(self, sPostData, sMsgSignature, sTimeStamp, sNonce):
# 检验消息的真实性,并且获取解密后的明文
# @param sMsgSignature: 签名串,对应URL参数的msg_signature
# @param sTimeStamp: 时间戳,对应URL参数的timestamp
# @param sNonce: 随机串,对应URL参数的nonce
# @param sPostData: 密文,对应POST请求的数据
# xml_content: 解密后的原文,当return返回0时有效
# @return: 成功0,失败返回对应的错误码
# 验证安全签名
xmlParse = XMLParse()
ret,encrypt,touser_name = xmlParse.extract(sPostData)
if ret != 0:
return ret, None
sha1 = SHA1()
ret,signature = sha1.getSHA1(self.token, sTimeStamp, sNonce, encrypt)
if ret != 0:
return ret, None
if not signature == sMsgSignature:
return ierror.WXBizMsgCrypt_ValidateSignature_Error, None
pc = Prpcrypt(self.key)
ret,xml_content = pc.decrypt(encrypt,self.appid)
return ret,xml_content
|
[
"[email protected]"
] | |
d09e00e653b574f9a8970cd1e584175a9a92737e
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2811/60651/233596.py
|
adf251acd00fc9d70e91606146ff79bfffcadcaa
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 190 |
py
|
inlist=input().split()
modlist=[]
for i in inlist[2,1+list[1]]:
moi=i%list[0]
if moi is not in modlist:
list.append(i%moi)
else:
print(i)
break
|
[
"[email protected]"
] | |
724c22209cc7ae9cdd3b6fab93b7e2622f3ee760
|
5e9576c368e98927e2965bd2fb23bd35d9993d69
|
/featuretools/primitives/standard/aggregation/percent_true.py
|
32ce39a1834e8fab60536294b99d282ccf61eb9d
|
[
"BSD-3-Clause"
] |
permissive
|
alteryx/featuretools
|
c6e319e063e8e84e7684bf232376f95dc5272160
|
c284c2d27a95b81e0bae913ac90df2b02c8f3b37
|
refs/heads/main
| 2023-08-25T12:21:33.945418 | 2023-08-23T16:30:25 | 2023-08-23T16:30:25 | 102,908,804 | 1,783 | 201 |
BSD-3-Clause
| 2023-09-07T18:53:19 | 2017-09-08T22:15:17 |
Python
|
UTF-8
|
Python
| false | false | 2,090 |
py
|
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import Boolean, BooleanNullable, Double
from featuretools.primitives.base.aggregation_primitive_base import AggregationPrimitive
from featuretools.utils.gen_utils import Library, import_or_none
dd = import_or_none("dask.dataframe")
class PercentTrue(AggregationPrimitive):
"""Determines the percent of `True` values.
Description:
Given a list of booleans, return the percent
of values which are `True` as a decimal.
`NaN` values are treated as `False`,
adding to the denominator.
Examples:
>>> percent_true = PercentTrue()
>>> percent_true([True, False, True, True, None])
0.6
"""
name = "percent_true"
input_types = [
[ColumnSchema(logical_type=BooleanNullable)],
[ColumnSchema(logical_type=Boolean)],
]
return_type = ColumnSchema(logical_type=Double, semantic_tags={"numeric"})
stack_on = []
stack_on_exclude = []
default_value = 0
compatibility = [Library.PANDAS, Library.DASK]
description_template = "the percentage of true values in {}"
def get_function(self, agg_type=Library.PANDAS):
if agg_type == Library.DASK:
def chunk(s):
def format_chunk(x):
return x[:].fillna(False)
chunk_sum = s.agg(lambda x: format_chunk(x).sum())
chunk_len = s.agg(lambda x: len(format_chunk(x)))
if chunk_sum.dtype == "bool":
chunk_sum = chunk_sum.astype("int64")
if chunk_len.dtype == "bool":
chunk_len = chunk_len.astype("int64")
return (chunk_sum, chunk_len)
def agg(val, length):
return (val.sum(), length.sum())
def finalize(total, length):
return total / length
return dd.Aggregation(self.name, chunk=chunk, agg=agg, finalize=finalize)
def percent_true(s):
return s.fillna(False).mean()
return percent_true
|
[
"[email protected]"
] | |
2e1f37842c48c239ce71d64acfa606a8846c5601
|
7be67ecaee241769a69f3f5dae1bb6f99feb5e84
|
/venv/bin/xhtml2pdf
|
9de81088105b51d156f564e3a715ca49edad96a4
|
[] |
no_license
|
asadlive84/Billing-Cable-TV-Providers-
|
6e5e6412a84045749869253b49a2a53564b52c96
|
7f1927030e9cb57573e1cfe5a5110614ef251d02
|
refs/heads/dev
| 2022-03-17T21:42:41.443783 | 2019-11-22T14:48:50 | 2019-11-22T14:48:50 | 213,041,047 | 1 | 0 | null | 2019-12-05T00:29:16 | 2019-10-05T17:23:09 |
Python
|
UTF-8
|
Python
| false | false | 435 |
#!/home/asad/a/Billing-Cable-TV-Providers-/venv/bin/python3.7
# EASY-INSTALL-ENTRY-SCRIPT: 'xhtml2pdf==0.2.3','console_scripts','xhtml2pdf'
__requires__ = 'xhtml2pdf==0.2.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('xhtml2pdf==0.2.3', 'console_scripts', 'xhtml2pdf')()
)
|
[
"[email protected]"
] | ||
e034515fd347be397beff6bc24ce3093eecb2309
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03733/s782233089.py
|
46581aa9c8c3b8e38789b9a77690606af0e916cd
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 242 |
py
|
N, T = map(int, input().split())
t = list(map(int, input().split()))
cnt = T
time = T
for i in range(1, N):
if(t[i] <= time):
cnt += (t[i]-time)+T
time = t[i]+T
else:
time = t[i]+T
cnt += T
print(cnt)
|
[
"[email protected]"
] | |
5dbf7f4aad00734caf369d4faf39c9b48ef74a6d
|
a3bb8b060d992ca37ab6d97f0af8e61d43dd7b2f
|
/ufora/cumulus/test/DistributedDataTasks_test.py
|
9e86951ba212fbf34ed71153dd89beb133eff63d
|
[
"dtoa",
"MIT",
"BSD-3-Clause",
"BSL-1.0",
"Apache-2.0",
"LicenseRef-scancode-public-domain",
"CC0-1.0"
] |
permissive
|
vishnur/ufora
|
c21bd3367c2786fcb29be4cde50001b98407a0bf
|
a4265ea5d0286586de2c2b22a15e04c24171ba08
|
refs/heads/master
| 2020-12-24T17:17:29.507601 | 2016-02-04T19:17:24 | 2016-02-04T19:22:51 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 12,649 |
py
|
# Copyright 2015 Ufora Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import logging
import ufora.cumulus.test.InMemoryCumulusSimulation as InMemoryCumulusSimulation
import ufora.distributed.S3.InMemoryS3Interface as InMemoryS3Interface
import ufora.native.CallbackScheduler as CallbackScheduler
import ufora.test.PerformanceTestReporter as PerformanceTestReporter
callbackScheduler = CallbackScheduler.singletonForTesting()
TIMEOUT=120
class DistributedDataTasksTests(unittest.TestCase):
def basicTaskPathwayTest(self, sz, machines=1, memory=1000):
s3 = InMemoryS3Interface.InMemoryS3InterfaceFactory()
text = """
let N = __size__;
//let values = Vector.range(N,fun(x) { ((x * 503) % N, x) }).paged;
let values = Vector.range(N).paged;
let s1 = cached`(#ExternalIoTask(#DistributedDataOperation(#Sort(values))))
let s2 = sorting.sort(values)
if (size(s1) != size(s2))
return 'wrong size: %s != %s'.format(size(s1), size(s2))
for ix in sequence(size(s1))
if (s1[ix] != s2[ix])
return 'not equal: index=%s. %s != %s'.format(ix, s1[ix], s2[ix])
return true
""".replace("__size__", str(sz))
result = InMemoryCumulusSimulation.computeUsingSeveralWorkers(
text,
s3,
machines,
timeout=TIMEOUT,
memoryLimitMb=memory
)
self.assertTrue(result is not None)
self.assertTrue(result.isResult(), result)
self.assertTrue(result.asResult.result.pyval == True, result)
def test_basicTaskPathwaySmall(self):
self.basicTaskPathwayTest(100)
def test_basicTaskPathwayBig(self):
self.basicTaskPathwayTest(10000000)
def test_basicTaskPathwayMultibox(self):
self.basicTaskPathwayTest(10000000, 4, 250)
def weirdStringSort(self, sz, machines=1, memory=1000):
s3 = InMemoryS3Interface.InMemoryS3InterfaceFactory()
text = """
let N = __size__;
let values = Vector.range(N, fun(ix) { " " * ix }).paged;
let s1 = cached`(#ExternalIoTask(#DistributedDataOperation(#Sort(values))))
let s2 = sorting.sort(values)
if (size(s1) != size(s2))
return 'wrong size: %s != %s'.format(size(s1), size(s2))
for ix in sequence(size(s1))
if (s1[ix] != s2[ix])
return 'not equal: index=%s. %s != %s'.format(ix, s1[ix], s2[ix])
return true
""".replace("__size__", str(sz))
result = InMemoryCumulusSimulation.computeUsingSeveralWorkers(
text,
s3,
machines,
timeout=TIMEOUT,
memoryLimitMb=memory
)
self.assertTrue(result is not None)
self.assertTrue(result.isResult(), result)
self.assertTrue(result.asResult.result.pyval == True, result)
def test_weirdStringSort_1(self):
self.weirdStringSort(10000, 1, 1000)
def test_weirdStringSort_2(self):
self.weirdStringSort(10000, 4, 250)
class DISABLED:
def test_takeLookupSemantics(self):
s3 = InMemoryS3Interface.InMemoryS3InterfaceFactory()
takeText = """
let directTake = fun(v, i) {
i ~~ fun
((filters.IsInteger(...) ix1,filters.IsInteger(...) ix2)) {
try { [v][ix1][ix2] } catch(...) { nothing }
}
(ix) { try { v[ix] } catch (...) { nothing } }
};
let takeFrom = [1,2,3,4].paged;
let indices = __indices__.paged;
let result = cached`(#ExternalIoTask(#DistributedDataOperation(#Take(indices, takeFrom))));
let targetResult = directTake(takeFrom, indices)
assertions.assertEqual(size(result), size(targetResult))
for ix in sequence(size(result))
if (result[ix] is not targetResult[ix])
return "Expected %s to yield %s, but got %s".format(
indices[ix],
targetResult[ix],
result[ix]
);
return true;
"""
def takeTest(indexExpr):
result = InMemoryCumulusSimulation.computeUsingSeveralWorkers(
takeText.replace("__indices__", indexExpr),
s3,
1,
timeout=TIMEOUT,
memoryLimitMb=1000
)
self.assertTrue(result is not None)
self.assertTrue(result.isResult(), result)
self.assertTrue(result.asResult.result.pyval == True, result)
takeTest("[0,1,2,3]")
takeTest("[0,-1,2,3]")
takeTest("[0,1,2,30]")
takeTest("[(0,0),(0,1),(0,2),(0,3)]")
takeTest("[(0,0),(0,1),(0,2),(0,30)]")
takeTest("[(0,0),(0,1),(0,2),(3,0)]")
takeTest("[(0u8,0u16),(0u32,1u64),(0s32,2s8),(0s16,3s64)]")
takeTest("[0,-1,(), (0,0), (0,0.0), nothing, (1,0), (0u8,6u16), (-1,2)]")
@PerformanceTestReporter.PerfTest("python.datatasks.distributed_sort_2_boxes")
def test_multiboxDataTasksSort_1(self):
self.multiboxDataTasksSort(1000)
@PerformanceTestReporter.PerfTest("python.datatasks.distributed_sort_2_boxes_big")
def test_multiboxDataTasksSort_2(self):
self.multiboxDataTasksSort(10000000, memoryLimit=250)
@PerformanceTestReporter.PerfTest("python.datatasks.distributed_sort_8_boxes_big")
def test_multiboxDataTasksSort_3(self):
self.multiboxDataTasksSort(20000000, workers=8, memoryLimit=250)
@PerformanceTestReporter.PerfTest("python.datatasks.distributed_sort_1_boxes_big")
def test_multiboxDataTasksSort_4(self):
self.multiboxDataTasksSort(20000000, workers=1, memoryLimit=2000)
def multiboxDataTasksSort(self, ct, workers=2, memoryLimit=100, pageSizeOverrideMB=1):
s3 = InMemoryS3Interface.InMemoryS3InterfaceFactory()
text = """
let N = __ct__;
let aPrime = 503
let toSort = Vector.range(N, { ((_ * _) % aPrime, _) }).paged;
let result = cached`(#ExternalIoTask(#DistributedDataOperation(#Sort(toSort))))
sorting.isSorted(result)
""".replace("__ct__", str(ct))
result = InMemoryCumulusSimulation.computeUsingSeveralWorkers(
text,
s3,
workers,
timeout=TIMEOUT,
memoryLimitMb=memoryLimit,
pageSizeOverride=pageSizeOverrideMB*1024*1024
)
self.assertTrue(result is not None)
self.assertTrue(result.isResult(), result)
self.assertTrue(result.asResult.result.pyval == True, result)
@PerformanceTestReporter.PerfTest("python.datatasks.distributed_take_2_boxes")
def test_multiboxDataTasksTake_1(self):
s3 = InMemoryS3Interface.InMemoryS3InterfaceFactory()
text = """
let N = 10000000;
let isPrime = fun(p) {
let x = 2
while (x*x <= p) {
if (p%x == 0)
return 0
x = x + 1
}
return x
}
let takeFrom = Vector.range(N, isPrime).paged;
let indices = Vector.range(N,fun(x) { (0, (x * 503) % N ) }).paged;
cached`(#ExternalIoTask(#DistributedDataOperation(#Take(indices, takeFrom)))) ==
indices ~~ { takeFrom[_[1]] }
"""
result = InMemoryCumulusSimulation.computeUsingSeveralWorkers(
text,
s3,
2,
timeout=TIMEOUT,
memoryLimitMb=1000
)
self.assertTrue(result is not None)
self.assertTrue(result.isResult(), result)
self.assertTrue(result.asResult.result.pyval == True, result)
@PerformanceTestReporter.PerfTest("python.datatasks.distributed_take_8_boxes")
def test_multiboxDataTasksTake_2(self):
s3 = InMemoryS3Interface.InMemoryS3InterfaceFactory()
text = """
let N = 10 * 1000000;
let takeFrom = Vector.range(N)
let indices = Vector.range(N,fun(x) { (0, (x * 503) % N ) });
cached`(#ExternalIoTask(#DistributedDataOperation(#Take(indices, takeFrom))))[0]
"""
result, simulation = InMemoryCumulusSimulation.computeUsingSeveralWorkers(
text,
s3,
8,
timeout=TIMEOUT,
memoryLimitMb=200,
returnSimulation = True
)
logging.info("Simulation completed")
maxHighWatermark = 0
try:
for ix in range(8):
vdm = simulation.getWorkerVdm(ix)
vdmm = vdm.getMemoryManager()
logging.info("Total bytes: %s", vdmm.getTotalBytesMmappedHighWaterMark())
maxHighWatermark = max(maxHighWatermark, vdmm.getTotalBytesMmappedHighWaterMark())
vdm = None
vdmm = None
self.assertTrue(result is not None)
self.assertTrue(result.isResult(), result)
self.assertTrue(isinstance(result.asResult.result.pyval,int), result)
finally:
simulation.teardown()
self.assertTrue(maxHighWatermark < 265 * 1024 * 1024)
def test_takeFromLargeObjects(self):
s3 = InMemoryS3Interface.InMemoryS3InterfaceFactory()
text = """
let N = 100;
//each string is 1 MB
let takeFrom = [" " * 100 * 100 * 10 * 10 + " " * ix for ix in sequence(N)].paged;
let indices = Vector.range(N,fun(x) { x }).paged;
cached`(#ExternalIoTask(#DistributedDataOperation(#Take(indices, takeFrom))))
"""
try:
result, simulation = InMemoryCumulusSimulation.computeUsingSeveralWorkers(
text,
s3,
1,
timeout=TIMEOUT,
memoryLimitMb=1000,
returnSimulation = True,
pageSizeOverride = 1024 * 1024
)
self.assertTrue(result is not None)
self.assertTrue(result.isResult(), result)
for page in result.asResult.result.getVectorPageIds(simulation.getWorkerVdm(0)):
self.assertLess(page.bytecount / 1024.0 / 1024.0, 2.0)
finally:
simulation.teardown()
def test_takeFromLargeObjectsAsymmetric(self):
s3 = InMemoryS3Interface.InMemoryS3InterfaceFactory()
text = """
let N = 20;
//every thousandth string is 1 MB. Just take those.
let takeFrom = [
if (ix % 1000 == 0)
(" " * 100 * 100 * 10 * 10 + " " * (ix / 1000))
else
""
for ix in sequence(N * 1000)].paged;
let indices = Vector.range(N,fun(x) { x * 1000 }).paged;
let result = cached`(#ExternalIoTask(#DistributedDataOperation(#Take(indices, takeFrom))))
let targetResult = indices ~~ {takeFrom[_]};
assertions.assertEqual(size(result), size(targetResult))
assertions.assertEqual(result, targetResult)
result
"""
try:
result, simulation = InMemoryCumulusSimulation.computeUsingSeveralWorkers(
text,
s3,
1,
timeout=TIMEOUT,
memoryLimitMb=1000,
returnSimulation = True,
pageSizeOverride = 1024 * 1024
)
self.assertTrue(result is not None)
self.assertTrue(result.isResult(), result)
for page in result.asResult.result.getVectorPageIds(simulation.getWorkerVdm(0)):
self.assertLess(page.bytecount / 1024.0 / 1024.0, 5.0)
finally:
simulation.teardown()
|
[
"[email protected]"
] | |
8fee26e0cffef8bdca1787ef08b772d94ae98f5d
|
d05a59feee839a4af352b7ed2fd6cf10a288a3cb
|
/xlsxwriter/test/comparison/test_simple08.py
|
6162995e3717146a1053b12a45aae23f8f91ccc5
|
[
"BSD-2-Clause-Views"
] |
permissive
|
elessarelfstone/XlsxWriter
|
0d958afd593643f990373bd4d8a32bafc0966534
|
bb7b7881c7a93c89d6eaac25f12dda08d58d3046
|
refs/heads/master
| 2020-09-24T06:17:20.840848 | 2019-11-24T23:43:01 | 2019-11-24T23:43:01 | 225,685,272 | 1 | 0 |
NOASSERTION
| 2019-12-03T18:09:06 | 2019-12-03T18:09:05 | null |
UTF-8
|
Python
| false | false | 800 |
py
|
###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2019, John McNamara, [email protected]
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('simple08.xlsx')
def test_create_file(self):
"""Test '0' number format. GH103."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
format1 = workbook.add_format({'num_format': 1})
worksheet.write(0, 0, 1.23, format1)
workbook.close()
self.assertExcelEqual()
|
[
"[email protected]"
] | |
732fd0b2c4f87cd1ee44405eca6fe74734235e07
|
718a104a65581faa87980583bb321e093db341d3
|
/data.py
|
334b5bbc65297410a64db75e3971f1c7e9e5c1a7
|
[
"Apache-2.0"
] |
permissive
|
JHWen/Load-Forecast
|
14e3909ec48b5f1a578ee4c727dd234c498f3eb3
|
f65c623f33b4e19eb1035860c1df33926c747599
|
refs/heads/master
| 2020-04-12T08:36:43.176495 | 2018-12-20T07:22:03 | 2018-12-20T07:22:03 | 162,390,053 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,020 |
py
|
import pandas as pd
import numpy as np
import logging
import random
def load_data(path):
data = pd.read_csv(path, delimiter=',')
"""
Year,Month,Day,Hour,Value,Value1,Value2,Value3,dayOfWeek,isWorkday,isHoliday,Season,
Tem,RH,Precipitation,File,value_oneweek_before,value_oneday_before,value_onedayavg_before
"""
# names = ['Month', 'Day', 'Hour', 'dayOfWeek', 'isWorkday', 'isHoliday', 'Season', 'Tem', 'RH',
# 'value_oneweek_before', 'value_oneday_before', 'value_onedayavg_before', 'Value']
# names = ['dayOfWeek', 'isWorkday', 'isHoliday', 'Season', 'Tem', 'RH',
# 'value_oneweek_before', 'value_oneday_before', 'value_onedayavg_before', 'Value']
#
# data = df[names].values
index_zero_value = []
for i in range(data.shape[0]):
if data['Value'][i] == 0:
index_zero_value.append(i)
df = data.loc[:]
for i in index_zero_value:
df.loc[i, 'Value'] = None
df = df.dropna()
# end
max_value = np.max(df['Value'])
min_value = np.min(df['Value'])
dfy = pd.DataFrame({'Value': (df['Value'] - min_value) / (max_value - min_value)})
dfX = pd.DataFrame({'dayOfWeek': df['dayOfWeek'],
'isWorkday': df['isWorkday'], 'isHoliday': df['isHoliday'],
'Season': df['Season'],
'Tem': (df['Tem'] - np.mean(df['Tem'])) / (np.max(df['Tem']) - np.min(df['Tem'])),
'RH': (df['RH'] - np.mean(df['RH'])) / (np.max(df['RH']) - np.min(df['RH']))})
df_X = np.array(dfX)
df_y = np.array(dfy)
data_ = np.concatenate((df_X, df_y), axis=1)
return data_, max_value, min_value
def get_train_data(data, shuffle=False, input_size=9, batch_size=60, time_step=15, train_begin=0, train_end=2000):
train_data = data[train_begin:train_end]
if shuffle:
random.shuffle(data)
# 标准化
mean = np.mean(train_data, axis=0)
std = np.std(train_data, axis=0)
# normalized_train_data = (train_data - mean) / std
normalized_train_data = train_data
train_x, train_y = [], [] # 训练集
for i in range(len(normalized_train_data) - time_step):
if len(train_x) == batch_size:
yield train_x, train_y
train_x, train_y = [], []
x = normalized_train_data[i:i + time_step, :input_size]
y = normalized_train_data[i:i + time_step, input_size, np.newaxis]
train_x.append(x.tolist())
train_y.append(y.tolist())
def get_test_data(data, input_size=6, time_step=15, test_begin=2000, test_end=2500):
test_data = data[test_begin:test_end]
mean = np.mean(test_data, axis=0)
std = np.std(test_data, axis=0)
# normalized_test_data = (test_data - mean) / std
normalized_test_data = test_data
size = (len(normalized_test_data) + time_step - 1) // time_step # 有size个sample
test_x, test_y = [], []
for i in range(size - 1):
x = normalized_test_data[i * time_step:(i + 1) * time_step, :input_size]
y = normalized_test_data[i * time_step:(i + 1) * time_step, input_size]
test_x.append(x.tolist())
test_y.extend(y)
return test_x, test_y
def get_logger(filename):
logger = logging.getLogger('logger')
logger.setLevel(logging.DEBUG)
logging.basicConfig(format='%(message)s', level=logging.DEBUG)
handler = logging.FileHandler(filename)
handler.setLevel(logging.DEBUG)
handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s: %(message)s'))
if not logger.handlers:
logger.addHandler(handler)
return logger
if __name__ == '__main__':
data, max_value, min_value = load_data('./data_path/HourLoadSet.csv')
# batches = get_train_data(data)
test_x, test_y = get_test_data(data=data, test_begin=15000, test_end=17000)
test_y = np.array(test_y)
test_y_ = test_y * (max_value - min_value) + min_value
print(max_value, min_value)
for i, j in zip(test_y_, test_y):
print(i, j)
|
[
"[email protected]"
] | |
12a20fa1740ade148a3cbe7e0244b55abdca0b40
|
ebd6f68d47e192da7f81c528312358cfe8052c8d
|
/swig/Examples/test-suite/python/arrays_global_runme.py
|
fa3b9f2ec2c153899bedb2354e0ab4637717a0b9
|
[
"LicenseRef-scancode-swig",
"GPL-3.0-or-later",
"LicenseRef-scancode-unknown-license-reference",
"GPL-3.0-only",
"Apache-2.0"
] |
permissive
|
inishchith/DeepSpeech
|
965ad34d69eb4d150ddf996d30d02a1b29c97d25
|
dcb7c716bc794d7690d96ed40179ed1996968a41
|
refs/heads/master
| 2021-01-16T16:16:05.282278 | 2020-05-19T08:00:33 | 2020-05-19T08:00:33 | 243,180,319 | 1 | 0 |
Apache-2.0
| 2020-02-26T05:54:51 | 2020-02-26T05:54:50 | null |
UTF-8
|
Python
| false | false | 419 |
py
|
import arrays_global
arrays_global.cvar.array_i = arrays_global.cvar.array_const_i
from arrays_global import *
BeginString_FIX44a
cvar.BeginString_FIX44b
BeginString_FIX44c
cvar.BeginString_FIX44d
cvar.BeginString_FIX44d
cvar.BeginString_FIX44b = "12"'\0'"45"
cvar.BeginString_FIX44b
cvar.BeginString_FIX44d
cvar.BeginString_FIX44e
BeginString_FIX44f
test_a("hello", "hi", "chello", "chi")
test_b("1234567", "hi")
|
[
"[email protected]"
] | |
02b0318d190b9e1fde135410ecc2cb5fcac416c1
|
46fda2ea47f311ee7fefc6f6210811c7f4bd74ad
|
/science/py-geometer/files/patch-setup.py
|
264db73014f73c40718fa55442e380c6cf4faf73
|
[
"BSD-2-Clause"
] |
permissive
|
truenas/ports
|
ad560a8adde884dc0cfc4b292bbbcad91903b287
|
da4ed13ad08a6af5c54361f45964fa1177367c68
|
refs/heads/truenas/13.0-stable
| 2023-09-02T03:00:28.652837 | 2023-08-16T16:05:00 | 2023-08-16T16:05:00 | 8,656,293 | 18 | 9 |
NOASSERTION
| 2023-09-12T15:15:34 | 2013-03-08T17:35:37 | null |
UTF-8
|
Python
| false | false | 292 |
py
|
--- setup.py.orig 2020-07-08 15:51:34 UTC
+++ setup.py
@@ -19,7 +19,7 @@ VERSION = None
# What packages are required for this module to be executed?
REQUIRED = [
- 'numpy>=1.15,<1.20', 'sympy>=1.3,<=1.7'
+ 'numpy>=1.15,<1.21', 'sympy>=1.3,<=1.7'
]
# What packages are optional?
|
[
"[email protected]"
] | |
eb257264160ee57ec88f4c264424ccfdd9d82b1f
|
6245db4e53782ab380de287f945bc026e3c6b281
|
/python_workbook/str_formatting.py
|
4a6974a3408e59c5880a9538906397759ee6403f
|
[] |
no_license
|
rashmierande/python_exercise
|
e510174820efb793cfe435ad53c6baa34c829d86
|
9703b67a1f5614594244b4d8d2c42ebfb6a2aaec
|
refs/heads/master
| 2021-01-23T00:53:17.235391 | 2018-03-15T05:46:53 | 2018-03-15T05:46:53 | 85,846,226 | 0 | 1 | null | 2018-01-21T18:11:11 | 2017-03-22T15:42:09 |
Python
|
UTF-8
|
Python
| false | false | 722 |
py
|
'''
Question: The code is supposed to ask the user to enter their name and surname
and then it prints out those user submitted values. Instead, the code throws a TypeError.
Please fix it so that the expected output is printed out.
Expected output:
Your first name is John and your second name is Smith
'''
firstname = input("Enter first name: ")
secondname = input("Enter second name: ")
print("Your first name is %s and your second name is %s" % (firstname, secondname))
#Each of the %s placeholders expects one value after % to be replaced with,
# but you need to pass these values inside a tuple.
# So, putting variables firstname and secondname inside a tuple fixes the code.
#Python expects a tuple after % .
|
[
"[email protected]"
] | |
e99d95e4437b2da14fa498b7f5928e952b46ebb1
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02265/s085080213.py
|
540cdcdb6eb525c25d43e70a920abb364407eaae
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 240 |
py
|
import collections
d=collections.deque()
for _ in range(int(input())):
e=input()
if'i'==e[0]:d.appendleft(e.split()[1])
else:
if' '==e[6]:
m=e.split()[1]
if m in d:d.remove(m)
elif len(e)%2:d.popleft()
else:d.pop()
print(*d)
|
[
"[email protected]"
] | |
89e378268e626c1a3c788218339b4d1c759b6ea6
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/29/usersdata/109/9631/submittedfiles/atividade.py
|
bc96354444dfb5afcff956647e27a7783ced551a
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 193 |
py
|
# -*- coding: utf-8 -*-
from __future__ import division
import math
cont=0
n=int(input('Digite o valor de n:'))
while True:
s=(n//10)
cont=cont+1
n=s
if s<1:
break
print cont
|
[
"[email protected]"
] | |
3ff68cf51fa51f689c89f1ac91c1fa528c276bb3
|
a947525caa6940262099b24ebafa61900691ef22
|
/trainer/agents.py
|
28f7f1ea781b5e3a95ba15393f59060638c78c98
|
[] |
no_license
|
metal-tile/dqn-tensorflow
|
527e9b8fa89eea4a4e6375d4244b9f2b10847473
|
9552307dfb4dacde69bdb268350d8ca0b4a02693
|
refs/heads/master
| 2020-05-19T05:48:47.323835 | 2019-05-04T06:15:31 | 2019-05-04T06:15:31 | 184,858,118 | 0 | 0 | null | 2019-05-04T05:59:24 | 2019-05-04T05:59:24 | null |
UTF-8
|
Python
| false | false | 7,666 |
py
|
import numpy as np
import tensorflow as tf
from . import repmem
class DQN:
def __init__(
self,
input_shape,
n_actions,
q_fn,
learning_rate,
discount_factor=0.99
):
"""
Parameters
----------
input_shape: the shape of input stat
- type: list of int
- example: [84, 84, 4] for Atari game in the original DQN paper
n_actions: the number of actions the agent can choose
- type: int
q_fn: a function building the computation graph for q-network
- type: callable
- input of q_fn: Tensor of shape [None, input_shape[0], input_shape[1], ...] and n_actions
- output of q_fn: Tensor of shape [None, n_actions]
learning_rate: the step size of the optimization method
- type: float
"""
self.learning_rate = learning_rate
self.n_actions = n_actions
self.gamma = discount_factor
self.input_shape = input_shape
self.q_fn = q_fn
# References to graph nodes are assigned after running `build_graph` method
self.x_ph, self.y_ph, self.a_ph = None, None, None
self.q, self.loss, self.train_ops = None, None, None
self.target_x_ph, self.target_q = None, None
self.assign_ops = None
def build_graph(self):
# Create placeholders
self.x_ph = tf.placeholder(tf.float32, shape=[None]+list(self.input_shape), name="x_ph")
self.y_ph = tf.placeholder(tf.float32, shape=[None], name="y_ph")
self.a_ph = tf.placeholder(tf.int64, shape=[None], name="a_ph")
# Build q network
with tf.variable_scope("qnet"):
self.q = self.q_fn(self.x_ph, self.n_actions)
self.loss = self._build_loss(self.y_ph, self.q, self.a_ph)
# Build target q network
self.target_x_ph = tf.placeholder(tf.float32, shape=[None] + list(self.input_shape), name="target_x_ph")
with tf.variable_scope("target_qnet"):
self.target_q = self.q_fn(self.target_x_ph, self.n_actions)
# Build update target q-network ops
q_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope="qnet")
target_q_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope="target_qnet")
self.train_ops = self._build_optimizer(self.loss, self.learning_rate)
self.assign_ops = [tf.assign(target_q_vars[i], q_vars[i]) for i in range(len(q_vars))]
@staticmethod
def _build_loss(y_t_ph, q_t, a_ph):
with tf.name_scope("loss"):
a_t_one_hot = tf.one_hot(a_ph, q_t.get_shape()[1].value)
q_t_acted = tf.reduce_sum(q_t * a_t_one_hot, reduction_indices=1)
loss = tf.losses.mean_squared_error(labels=y_t_ph, predictions=q_t_acted)
return loss
@staticmethod
def _build_optimizer(loss, learning_rate):
global_step = tf.train.get_or_create_global_step()
optim = tf.train.RMSPropOptimizer(learning_rate=learning_rate, momentum=0.95, epsilon=1e-2)
train_op = optim.minimize(loss, global_step=global_step)
return train_op
def update(self, sess, x_t, a_t, r_t, x_t_plus_1, terminal):
# Compute target score
fd = {self.target_x_ph: x_t_plus_1}
q_t_plus_1 = np.max(sess.run(self.target_q, feed_dict=fd), axis=1)
y_t = r_t + q_t_plus_1 * (1-terminal) * self.gamma
# Run optimization operation
fd = {self.x_ph: x_t, self.y_ph: y_t, self.a_ph: a_t}
_, train_loss = sess.run([self.train_ops, self.loss], feed_dict=fd)
return train_loss
def act(self, sess, x_t):
return sess.run(self.q, feed_dict={self.x_ph: x_t})
def update_target_q_network(self, sess):
sess.run(self.assign_ops)
def train_and_play_game(
agent,
env,
random_action_decay,
max_episodes,
replay_memory_size,
batch_size,
update_frequency,
target_sync_frequency,
final_exploration_frame,
log_frequency=5,
action_repeat=4,
max_no_op=30,
checkpoint_dir=None,
):
replay_memory = repmem.ReplayMemory(memory_size=replay_memory_size)
total_reward_list = []
with tf.Graph().as_default() as g:
agent.build_graph()
episode_count = step_count = action_count = frame_count = 0
with tf.train.MonitoredTrainingSession(
save_summaries_steps=100,
checkpoint_dir=checkpoint_dir,
) as mon_sess:
# Training loop
while episode_count < max_episodes:
# random_action_prob = max(random_action_decay**episode_count, 0.05)
random_action_prob = max(1 - float(frame_count)/final_exploration_frame*0.95, 0.05)
# Play a new game
previous_observation = env.reset()
done = False
total_reward = 0
# Initial action
action = np.random.randint(agent.n_actions)
while not done:
# Act at random in first some frames
# for _ in range(np.random.randint(1, max_no_op)):
# previous_observation, _, _, _ = env.step(env.action_space.sample())
# print(episode_count, step_count, action_count, frame_count)
if frame_count % target_sync_frequency == 0:
agent.update_target_q_network(mon_sess)
# Frame skip
if frame_count % action_repeat == 0:
# Act at random with a fixed probability
if np.random.rand() <= random_action_prob:
action = np.random.randint(agent.n_actions)
# Act following the policy on the other games
else:
q = agent.act(mon_sess, np.array([previous_observation]))
action = q.argmax()
# print(q)
action_count += 1
# Receive the results from the game simulator
observation, reward, done, info = env.step(action)
total_reward += reward
# Store the experience
if frame_count % action_repeat == 0:
replay_memory.store(previous_observation, action, reward, observation, done)
previous_observation = observation
# Update q network every update_interval
if action_count % update_frequency == 0:
mini_batch = replay_memory.sample(size=batch_size)
train_loss = agent.update(
mon_sess, mini_batch[0], mini_batch[1], mini_batch[2], mini_batch[3], mini_batch[4]
)
step_count += 1
frame_count += 1
episode_count += 1
total_reward_list.append(total_reward)
# Show log every log_interval
if episode_count % log_frequency == 0:
print("Episode: {} Frame: {} Test: {}".format(episode_count, frame_count, len(total_reward_list)))
print(
"Average Reward: {} Training Loss: {} Epsilon: {}".format(
np.mean(total_reward_list[-50:]),
np.mean(train_loss),
random_action_prob
)
)
|
[
"[email protected]"
] | |
7bb62eea1b4ae6548c56888022465b75c9c17c5a
|
1ecb394b10e9622a5a5d8845b44e4585f464d42e
|
/nncp-api/biz/dlt.py
|
6b61757ea3ab90e5869038b48a07359ec2fefec2
|
[] |
no_license
|
dragonflylxp/lottory
|
7ec28d196f58692d9d417aa5d6963c182afe260a
|
b04f115df325a58148dc19d7cdfc21b28892a6a1
|
refs/heads/master
| 2020-04-28T08:53:09.007092 | 2020-04-17T10:50:41 | 2020-04-17T10:50:41 | 175,145,951 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,107 |
py
|
# coding: utf-8
import traceback
import ews
import define
from decimal import Decimal
from hook import Hook
from util.configer import *
from cbrpc import get_rpc_conn, RpcException
from commonEntity.Dlt import DltBean
from commonEntity.User import UserBean
import session
from util.tools import Log
logger = Log().getLog()
@ews.route_sync_func('/dlt/issue')
def dlt_issue(handler, *args, **kwargs):
ret = DltBean().get_dlt_expect_list()
return handler.ok(ret)
@ews.route_sync_func('/dlt/trade', kwargs={'ck': (UserWarning,),
'lotid': (UserWarning, 'unsigned int'),
'wtype': (UserWarning, 'unsigned int'),
'beishu': (UserWarning, 'unsigned int'),
'zhushu': (UserWarning, 'unsigned int'),
'allmoney': (UserWarning, 'unsigned int'),
'couponid': (UserWarning, 'int'),
'expect': (UserWarning,),
'selecttype': (UserWarning,),
'fileorcode': (UserWarning,)})
@Hook.pre_hook('check_lotto')
def trade(handler, *args, **kwargs):
ck = handler.json_args.get("ck", "")
uid = session.get_by_ck(ck).get('uid')
params = handler.json_args
params.update({"uid": uid})
pid = None
try:
# 账户检查
paymoney = UserBean().check_account(params)
# 下单
with get_rpc_conn("trade") as proxy:
try:
resp = proxy.call("place_order", params)
except RpcException as ex:
raise ews.EwsError(ews.STATUS_RPC_TRADE_ERROR, ex.message)
except:
logger.error(traceback.format_exc())
raise
account = UserBean().user_account({"uid": uid})
ret = {"pid": resp.get("pid"), "balance": Decimal(account.get("balance"))-Decimal(paymoney), "balance_draw": account.get("balance_draw")}
return handler.ok(ret)
|
[
"[email protected]"
] | |
8afaab8c0cea56e8f41ab9c4f1c2441543615eef
|
fdd41bb26f1e7a17f5a424fe082a46bcc355abed
|
/setup.py
|
a14bf381aa58d1657f67532225f0ea61bddeb340
|
[
"Apache-2.0",
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
SamuelMarks/ml-params-jax
|
4978d5d4c422825be748138f87422cb0d4604ec1
|
fd9ac6efe5f2c7ec6d4d41ccc9e032219992a219
|
refs/heads/master
| 2022-11-15T20:03:32.678882 | 2020-07-05T10:40:22 | 2020-07-05T10:40:22 | 276,909,573 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,339 |
py
|
# -*- coding: utf-8 -*-
from ast import parse
from distutils.sysconfig import get_python_lib
from functools import partial
from os import path, listdir
from platform import python_version_tuple
from setuptools import setup, find_packages
if python_version_tuple()[0] == '3':
imap = map
ifilter = filter
else:
from itertools import imap, ifilter
if __name__ == '__main__':
package_name = 'ml_params_jax'
with open(path.join(package_name, '__init__.py')) as f:
__author__, __version__ = imap(
lambda buf: next(imap(lambda e: e.value.s, parse(buf).body)),
ifilter(lambda line: line.startswith('__version__') or line.startswith('__author__'), f)
)
to_funcs = lambda *paths: (partial(path.join, path.dirname(__file__), package_name, *paths),
partial(path.join, get_python_lib(prefix=''), package_name, *paths))
_data_join, _data_install_dir = to_funcs('_data')
setup(
name=package_name,
author=__author__,
version=__version__,
install_requires=['pyyaml'],
test_suite=package_name + '.tests',
packages=find_packages(),
package_dir={package_name: package_name},
data_files=[
(_data_install_dir(), list(imap(_data_join, listdir(_data_join()))))
]
)
|
[
"[email protected]"
] | |
6c742dc924f39f16f10aa3ddcde7be364acf3c92
|
20fc010bcc1b23b8df29c969eee725f3083ac117
|
/mayan/apps/folders/tests/test_models.py
|
a5beb8df62ea4fc10f50dca53b49bab5128b6c5f
|
[
"Apache-2.0"
] |
permissive
|
fire-studio/mayan-edms
|
c5c943e16ea0c780a4c6c61d3bc702d00590eb61
|
6dc45a1c7f5f19219fc748e1578f200301a18f5b
|
refs/heads/master
| 2021-01-01T17:09:08.733373 | 2017-07-22T06:42:50 | 2017-07-22T06:45:18 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,780 |
py
|
from __future__ import unicode_literals
from django.test import override_settings
from common.tests import BaseTestCase
from documents.models import DocumentType
from documents.tests import TEST_DOCUMENT_PATH, TEST_DOCUMENT_TYPE
from ..models import Folder
from .literals import TEST_FOLDER_LABEL
@override_settings(OCR_AUTO_OCR=False)
class FolderTestCase(BaseTestCase):
def setUp(self):
super(FolderTestCase, self).setUp()
self.document_type = DocumentType.objects.create(
label=TEST_DOCUMENT_TYPE
)
with open(TEST_DOCUMENT_PATH) as file_object:
self.document = self.document_type.new_document(
file_object=file_object
)
def tearDown(self):
self.document_type.delete()
super(FolderTestCase, self).tearDown()
def test_folder_creation(self):
folder = Folder.objects.create(label=TEST_FOLDER_LABEL)
self.assertEqual(Folder.objects.all().count(), 1)
self.assertEqual(list(Folder.objects.all()), [folder])
def test_addition_of_documents(self):
folder = Folder.objects.create(label=TEST_FOLDER_LABEL)
folder.documents.add(self.document)
self.assertEqual(folder.documents.count(), 1)
self.assertEqual(list(folder.documents.all()), [self.document])
def test_addition_and_deletion_of_documents(self):
folder = Folder.objects.create(label=TEST_FOLDER_LABEL)
folder.documents.add(self.document)
self.assertEqual(folder.documents.count(), 1)
self.assertEqual(list(folder.documents.all()), [self.document])
folder.documents.remove(self.document)
self.assertEqual(folder.documents.count(), 0)
self.assertEqual(list(folder.documents.all()), [])
|
[
"[email protected]"
] | |
0571fbc9119a29f41920828bd13da3bfcf7922b5
|
de9172a39ff6a9c5bae44a7590d38ad312162713
|
/manage.py
|
aa359a1b51d9b70b05fd8b602414fff376e3065a
|
[
"Apache-2.0"
] |
permissive
|
CiganOliviu/CustomerRelationshipManagement
|
aa33447ddad8b59f59da4f947734aea634f7e027
|
dc29f05692b5dbcd761adea6c3f0e8f293e3bf16
|
refs/heads/main
| 2023-03-01T13:35:33.140128 | 2021-02-06T21:33:36 | 2021-02-06T21:33:36 | 334,481,597 | 0 | 0 |
Apache-2.0
| 2021-02-06T21:33:37 | 2021-01-30T18:19:00 | null |
UTF-8
|
Python
| false | false | 649 |
py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'ClientsRelationshipManagement.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
6d15dc1ae9a416953dddc829e437664a04cf34cc
|
24f664aa2344d4f5d5e7b048ac4e85231715c4c8
|
/deeplearning/deeptune/opencl/heterogeneous_mapping/models/base.py
|
7e8490aaae299610f8b1c95cea719f8d82a1ac1c
|
[] |
no_license
|
speycode/clfuzz
|
79320655e879d1e0a06a481e8ec2e293c7c10db7
|
f2a96cf84a7971f70cb982c07b84207db407b3eb
|
refs/heads/master
| 2020-12-05T13:44:55.486419 | 2020-01-03T14:14:03 | 2020-01-03T14:15:31 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,330 |
py
|
# Copyright (c) 2017, 2018, 2019 Chris Cummins.
#
# DeepTune is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# DeepTune is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with DeepTune. If not, see <https://www.gnu.org/licenses/>.
"""Base class for OpenCL heterogeneous device mapping models."""
import pathlib
import typing
import pandas as pd
from deeplearning.clgen.corpuses import atomizers
from labm8.py import app
FLAGS = app.FLAGS
class HeterogeneousMappingModel(object):
"""A model for predicting OpenCL heterogeneous device mappings.
Attributes:
__name__ (str): Model name.
__basename__ (str): Shortened name, used for files
"""
__name__ = None
__basename__ = None
def init(self, seed: int, atomizer: atomizers.AtomizerBase) -> None:
"""Initialize the model.
Do whatever is required to setup a new heterogeneous model here.
This method is called prior to training and predicting.
This method may be omitted if no initial setup is required.
Args:
seed (int): The seed value used to reproducible results. May be 'None',
indicating that no seed is to be used.
atomizer: The atomizer used to tokenize training examples.
"""
pass
# TODO(cec): Switch to exclusively pathlib.Path for argument.
def save(self, outpath: typing.Union[str, pathlib.Path]) -> None:
"""Save model state.
This must capture all of the relevant state of the model. It is up
to implementing classes to determine how best to save the model.
Args:
outpath (str): The path to save the model state to.
"""
raise NotImplementedError
# TODO(cec): Switch to exclusively pathlib.Path for argument.
def restore(self, inpath: typing.Union[str, pathlib.Path]) -> None:
"""Load a trained model from file.
This is called in place of init() if a saved model file exists. It
must restore all of the required model state.
Args:
inpath (str): The path to load the model from. This is the same path as
was passed to save() to create the file.
"""
raise NotImplementedError
def train(
self, df: pd.DataFrame, platform_name: str, verbose: bool = False
) -> None:
"""Train a model.
Args:
df: The dataframe of training data.
platform_name: The name of the gpu being trained for
verbose: Whether to print verbose status messages during training.
"""
raise NotImplementedError
def predict(
self, df: pd.DataFrame, platform_name: str, verbose: bool = False
) -> typing.Iterable[int]:
"""Make predictions for programs.
Args:
df: The dataframe of training data.
platform_name: The name of the gpu being trained for
verbose: Whether to print verbose status messages during training.
Returns:
A sequence of predicted 'y' values (optimal device mappings).
"""
raise NotImplementedError
|
[
"[email protected]"
] | |
aaef15c38545b71f401e174ffe09c7f011928a7f
|
e38e87ed5e500290ba0c2f774227920625ee5c54
|
/examples/computing_embeddings.py
|
7e84b597291985f1773e4aeaa58989017cf20739
|
[
"Python-2.0",
"Apache-2.0"
] |
permissive
|
ishine/text2vec
|
39d363b94ddbc9e664939041ae63ad4e352b894b
|
71842f67f7be9f0d4fee5b6e0e2562a1c553818e
|
refs/heads/master
| 2023-08-25T19:49:54.215412 | 2021-11-05T06:37:18 | 2021-11-05T06:37:18 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,274 |
py
|
# -*- coding: utf-8 -*-
"""
@author:XuMing([email protected])
@description:
This basic example loads a pre-trained model from the web and uses it to
generate sentence embeddings for a given list of sentences.
"""
import sys
sys.path.append('..')
from text2vec import SBert
from text2vec import Word2Vec
def compute_emb(model):
# Embed a list of sentences
sentences = ['卡',
'银行卡',
'如何更换花呗绑定银行卡',
'花呗更改绑定银行卡',
'This framework generates embeddings for each input sentence',
'Sentences are passed as a list of string.',
'The quick brown fox jumps over the lazy dog.']
sentence_embeddings = model.encode(sentences)
print(type(sentence_embeddings), sentence_embeddings.shape)
# The result is a list of sentence embeddings as numpy arrays
for sentence, embedding in zip(sentences, sentence_embeddings):
print("Sentence:", sentence)
print("Embedding:", embedding)
print("")
if __name__ == '__main__':
sbert_model = SBert('paraphrase-multilingual-MiniLM-L12-v2')
compute_emb(sbert_model)
w2v_model = Word2Vec('w2v-light-tencent-chinese')
compute_emb(w2v_model)
|
[
"[email protected]"
] | |
64a01ea72a90e118f418a461a192b0bed7834f65
|
41de4210af23a8a8a3ca7dd090bb51faecf4a0c8
|
/lib/python3.5/site-packages/statsmodels/tsa/statespace/_pykalman_smoother.py
|
fb35b84e6db940e426521726b7be524b233230a5
|
[
"Python-2.0"
] |
permissive
|
randybrown-github/ziplineMacOS
|
42a0c2bfca2a54baa03d2803dc41317647811285
|
eb5872c0903d653e19f259f0800fb7aecee0ee5c
|
refs/heads/master
| 2022-11-07T15:51:39.808092 | 2020-06-18T20:06:42 | 2020-06-18T20:06:42 | 272,631,387 | 0 | 1 | null | 2022-11-02T03:21:45 | 2020-06-16T06:48:53 |
Python
|
UTF-8
|
Python
| false | false | 11,383 |
py
|
"""
Kalman Smoother
Author: Chad Fulton
License: Simplified-BSD
"""
from __future__ import division, absolute_import, print_function
import numpy as np
import warnings
SMOOTHER_STATE = 0x01 # Durbin and Koopman (2012), Chapter 4.4.2
SMOOTHER_STATE_COV = 0x02 # ibid., Chapter 4.4.3
SMOOTHER_DISTURBANCE = 0x04 # ibid., Chapter 4.5
SMOOTHER_DISTURBANCE_COV = 0x08 # ibid., Chapter 4.5
SMOOTHER_ALL = (
SMOOTHER_STATE | SMOOTHER_STATE_COV | SMOOTHER_DISTURBANCE |
SMOOTHER_DISTURBANCE_COV
)
class _KalmanSmoother(object):
def __init__(self, model, kfilter, smoother_output):
# Save values
self.model = model
self.kfilter = kfilter
self._kfilter = model._kalman_filter
self.smoother_output = smoother_output
# Create storage
self.scaled_smoothed_estimator = None
self.scaled_smoothed_estimator_cov = None
self.smoothing_error = None
self.smoothed_state = None
self.smoothed_state_cov = None
self.smoothed_state_disturbance = None
self.smoothed_state_disturbance_cov = None
self.smoothed_measurement_disturbance = None
self.smoothed_measurement_disturbance_cov = None
# Intermediate values
self.tmp_L = np.zeros((model.k_states, model.k_states, model.nobs),
dtype=kfilter.dtype)
if smoother_output & (SMOOTHER_STATE | SMOOTHER_DISTURBANCE):
self.scaled_smoothed_estimator = (
np.zeros((model.k_states, model.nobs+1), dtype=kfilter.dtype))
self.smoothing_error = (
np.zeros((model.k_endog, model.nobs), dtype=kfilter.dtype))
if smoother_output & (SMOOTHER_STATE_COV | SMOOTHER_DISTURBANCE_COV):
self.scaled_smoothed_estimator_cov = (
np.zeros((model.k_states, model.k_states, model.nobs + 1),
dtype=kfilter.dtype))
# State smoothing
if smoother_output & SMOOTHER_STATE:
self.smoothed_state = np.zeros((model.k_states, model.nobs),
dtype=kfilter.dtype)
if smoother_output & SMOOTHER_STATE_COV:
self.smoothed_state_cov = (
np.zeros((model.k_states, model.k_states, model.nobs),
dtype=kfilter.dtype))
# Disturbance smoothing
if smoother_output & SMOOTHER_DISTURBANCE:
self.smoothed_state_disturbance = (
np.zeros((model.k_posdef, model.nobs), dtype=kfilter.dtype))
self.smoothed_measurement_disturbance = (
np.zeros((model.k_endog, model.nobs), dtype=kfilter.dtype))
if smoother_output & SMOOTHER_DISTURBANCE_COV:
self.smoothed_state_disturbance_cov = (
np.zeros((model.k_posdef, model.k_posdef, model.nobs),
dtype=kfilter.dtype))
self.smoothed_measurement_disturbance_cov = (
np.zeros((model.k_endog, model.k_endog, model.nobs),
dtype=kfilter.dtype))
def seek(self, t):
if t >= self.model.nobs:
raise IndexError("Observation index out of range")
self.t = t
def __iter__(self):
return self
def __call__(self):
self.seek(self.model.nobs-1)
# Perform backwards smoothing iterations
for i in range(self.model.nobs-1, -1, -1):
next(self)
def next(self):
# next() is required for compatibility with Python2.7.
return self.__next__()
def __next__(self):
# Check for valid iteration
if not self.t >= 0:
raise StopIteration
# Get local copies of variables
t = self.t
kfilter = self.kfilter
_kfilter = self._kfilter
model = self.model
smoother_output = self.smoother_output
scaled_smoothed_estimator = self.scaled_smoothed_estimator
scaled_smoothed_estimator_cov = self.scaled_smoothed_estimator_cov
smoothing_error = self.smoothing_error
smoothed_state = self.smoothed_state
smoothed_state_cov = self.smoothed_state_cov
smoothed_state_disturbance = self.smoothed_state_disturbance
smoothed_state_disturbance_cov = self.smoothed_state_disturbance_cov
smoothed_measurement_disturbance = (
self.smoothed_measurement_disturbance)
smoothed_measurement_disturbance_cov = (
self.smoothed_measurement_disturbance_cov)
tmp_L = self.tmp_L
# Seek the Cython Kalman filter to the right place, setup matrices
_kfilter.seek(t, False)
_kfilter.initialize_statespace_object_pointers()
_kfilter.initialize_filter_object_pointers()
_kfilter.select_missing()
missing_entire_obs = (
_kfilter.model.nmissing[t] == _kfilter.model.k_endog)
missing_partial_obs = (
not missing_entire_obs and _kfilter.model.nmissing[t] > 0)
# Get the appropriate (possibly time-varying) indices
design_t = 0 if kfilter.design.shape[2] == 1 else t
obs_cov_t = 0 if kfilter.obs_cov.shape[2] == 1 else t
transition_t = 0 if kfilter.transition.shape[2] == 1 else t
selection_t = 0 if kfilter.selection.shape[2] == 1 else t
state_cov_t = 0 if kfilter.state_cov.shape[2] == 1 else t
# Get endog dimension (can vary if there missing data)
k_endog = _kfilter.k_endog
# Get references to representation matrices and Kalman filter output
transition = model.transition[:, :, transition_t]
selection = model.selection[:, :, selection_t]
state_cov = model.state_cov[:, :, state_cov_t]
predicted_state = kfilter.predicted_state[:, t]
predicted_state_cov = kfilter.predicted_state_cov[:, :, t]
mask = ~kfilter.missing[:, t].astype(bool)
if missing_partial_obs:
design = np.array(
_kfilter.selected_design[:k_endog*model.k_states], copy=True
).reshape(k_endog, model.k_states, order='F')
obs_cov = np.array(
_kfilter.selected_obs_cov[:k_endog**2], copy=True
).reshape(k_endog, k_endog)
kalman_gain = kfilter.kalman_gain[:, mask, t]
forecasts_error_cov = np.array(
_kfilter.forecast_error_cov[:, :, t], copy=True
).ravel(order='F')[:k_endog**2].reshape(k_endog, k_endog)
forecasts_error = np.array(
_kfilter.forecast_error[:k_endog, t], copy=True)
F_inv = np.linalg.inv(forecasts_error_cov)
else:
if missing_entire_obs:
design = np.zeros(model.design.shape[:-1])
else:
design = model.design[:, :, design_t]
obs_cov = model.obs_cov[:, :, obs_cov_t]
kalman_gain = kfilter.kalman_gain[:, :, t]
forecasts_error_cov = kfilter.forecasts_error_cov[:, :, t]
forecasts_error = kfilter.forecasts_error[:, t]
F_inv = np.linalg.inv(forecasts_error_cov)
# Create a temporary matrix
tmp_L[:, :, t] = transition - kalman_gain.dot(design)
L = tmp_L[:, :, t]
# Perform the recursion
# Intermediate values
if smoother_output & (SMOOTHER_STATE | SMOOTHER_DISTURBANCE):
if missing_entire_obs:
# smoothing_error is undefined here, keep it as zeros
scaled_smoothed_estimator[:, t - 1] = (
transition.transpose().dot(scaled_smoothed_estimator[:, t])
)
else:
smoothing_error[:k_endog, t] = (
F_inv.dot(forecasts_error) -
kalman_gain.transpose().dot(
scaled_smoothed_estimator[:, t])
)
scaled_smoothed_estimator[:, t - 1] = (
design.transpose().dot(smoothing_error[:k_endog, t]) +
transition.transpose().dot(scaled_smoothed_estimator[:, t])
)
if smoother_output & (SMOOTHER_STATE_COV | SMOOTHER_DISTURBANCE_COV):
if missing_entire_obs:
scaled_smoothed_estimator_cov[:, :, t - 1] = (
L.transpose().dot(
scaled_smoothed_estimator_cov[:, :, t]
).dot(L)
)
else:
scaled_smoothed_estimator_cov[:, :, t - 1] = (
design.transpose().dot(F_inv).dot(design) +
L.transpose().dot(
scaled_smoothed_estimator_cov[:, :, t]
).dot(L)
)
# State smoothing
if smoother_output & SMOOTHER_STATE:
smoothed_state[:, t] = (
predicted_state +
predicted_state_cov.dot(scaled_smoothed_estimator[:, t - 1])
)
if smoother_output & SMOOTHER_STATE_COV:
smoothed_state_cov[:, :, t] = (
predicted_state_cov -
predicted_state_cov.dot(
scaled_smoothed_estimator_cov[:, :, t - 1]
).dot(predicted_state_cov)
)
# Disturbance smoothing
if smoother_output & (SMOOTHER_DISTURBANCE | SMOOTHER_DISTURBANCE_COV):
QR = state_cov.dot(selection.transpose())
if smoother_output & SMOOTHER_DISTURBANCE:
smoothed_state_disturbance[:, t] = (
QR.dot(scaled_smoothed_estimator[:, t])
)
# measurement disturbance is set to zero when all missing
# (unconditional distribution)
if not missing_entire_obs:
smoothed_measurement_disturbance[mask, t] = (
obs_cov.dot(smoothing_error[:k_endog, t])
)
if smoother_output & SMOOTHER_DISTURBANCE_COV:
smoothed_state_disturbance_cov[:, :, t] = (
state_cov -
QR.dot(
scaled_smoothed_estimator_cov[:, :, t]
).dot(QR.transpose())
)
if missing_entire_obs:
smoothed_measurement_disturbance_cov[:, :, t] = obs_cov
else:
# For non-missing portion, calculate as usual
ix = np.ix_(mask, mask, [t])
smoothed_measurement_disturbance_cov[ix] = (
obs_cov - obs_cov.dot(
F_inv + kalman_gain.transpose().dot(
scaled_smoothed_estimator_cov[:, :, t]
).dot(kalman_gain)
).dot(obs_cov)
)[:, :, np.newaxis]
# For missing portion, use unconditional distribution
ix = np.ix_(~mask, ~mask, [t])
mod_ix = np.ix_(~mask, ~mask, [0])
smoothed_measurement_disturbance_cov[ix] = np.copy(
model.obs_cov[:, :, obs_cov_t:obs_cov_t+1])[mod_ix]
# Advance the smoother
self.t -= 1
|
[
"[email protected]"
] | |
adf0f62316b8fa8d727bcb11f2ea9d19dc9f0b06
|
7c8c7fe5a7aea0a023624b31433f281a642bd488
|
/tslearn/tests/test_variablelength.py
|
ba22bb86e4e2cd321192159d489d56ebcc001bcb
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
wzpy/tslearn
|
918b969a69fd1a003037c94224ff31959f8b271f
|
2d7f3ea710b8d7f21ab24d212e930046b9c142ad
|
refs/heads/master
| 2020-07-15T04:38:48.217622 | 2019-08-30T22:04:05 | 2019-08-30T22:04:05 | 205,481,146 | 1 | 0 |
BSD-2-Clause
| 2019-08-31T01:53:11 | 2019-08-31T01:53:11 | null |
UTF-8
|
Python
| false | false | 3,258 |
py
|
import numpy as np
from numpy.testing import assert_allclose, assert_array_less
from sklearn.model_selection import cross_val_score, KFold
from tslearn.neighbors import KNeighborsTimeSeriesClassifier
from tslearn.svm import TimeSeriesSVC, TimeSeriesSVR
from tslearn.clustering import GlobalAlignmentKernelKMeans, TimeSeriesKMeans
from tslearn.utils import to_time_series_dataset
__author__ = 'Romain Tavenard romain.tavenard[at]univ-rennes2.fr'
def test_variable_length_knn():
X = to_time_series_dataset([[1, 2, 3, 4],
[1, 2, 3],
[2, 5, 6, 7, 8, 9],
[3, 5, 6, 7, 8]])
y = [0, 0, 1, 1]
clf = KNeighborsTimeSeriesClassifier(metric="dtw", n_neighbors=1)
clf.fit(X, y)
assert_allclose(clf.predict(X), [0, 0, 1, 1])
clf = KNeighborsTimeSeriesClassifier(metric="softdtw", n_neighbors=1)
clf.fit(X, y)
assert_allclose(clf.predict(X), [0, 0, 1, 1])
def test_variable_length_svm():
X = to_time_series_dataset([[1, 2, 3, 4],
[1, 2, 3],
[2, 5, 6, 7, 8, 9],
[3, 5, 6, 7, 8]])
y = [0, 0, 1, 1]
rng = np.random.RandomState(0)
clf = TimeSeriesSVC(kernel="gak", random_state=rng)
clf.fit(X, y)
assert_allclose(clf.predict(X), [0, 0, 1, 1])
y_reg = [-1., -1.3, 3.2, 4.1]
clf = TimeSeriesSVR(kernel="gak")
clf.fit(X, y_reg)
assert_array_less(clf.predict(X[:2]), 0.)
assert_array_less(-clf.predict(X[2:]), 0.)
def test_variable_length_clustering():
# TODO: here we just check that they can accept variable-length TS, not
# that they do clever things
X = to_time_series_dataset([[1, 2, 3, 4],
[1, 2, 3],
[2, 5, 6, 7, 8, 9],
[3, 5, 6, 7, 8]])
rng = np.random.RandomState(0)
clf = GlobalAlignmentKernelKMeans(n_clusters=2, random_state=rng)
clf.fit(X)
clf = TimeSeriesKMeans(n_clusters=2, metric="dtw", random_state=rng)
clf.fit(X)
clf = TimeSeriesKMeans(n_clusters=2, metric="softdtw", random_state=rng)
clf.fit(X)
def test_variable_cross_val():
# TODO: here we just check that they can accept variable-length TS, not
# that they do clever things
X = to_time_series_dataset([[1, 2, 3, 4],
[1, 2, 3],
[1, 2, 3, 4],
[1, 2, 3],
[2, 5, 6, 7, 8, 9],
[3, 5, 6, 7, 8],
[2, 5, 6, 7, 8, 9],
[3, 5, 6, 7, 8]])
y = [0, 0, 0, 0, 1, 1, 1, 1]
rng = np.random.RandomState(0)
cv = KFold(n_splits=2, shuffle=True)
for estimator in [
TimeSeriesSVC(kernel="gak", random_state=rng),
TimeSeriesSVR(kernel="gak"),
KNeighborsTimeSeriesClassifier(metric="dtw", n_neighbors=1),
KNeighborsTimeSeriesClassifier(metric="softdtw", n_neighbors=1)
]:
# TODO: cannot test for clustering methods since they don't have a
# score method yet
cross_val_score(estimator, X=X, y=y, cv=cv)
|
[
"[email protected]"
] | |
069516998750956b7549ff532bbaf794a91c42e7
|
e638e9fda0e672fa9a414515d0c05a24ab55ad38
|
/SparseMatrixMultiplication.py
|
73e01038302f2d5dfc6d050502ff25dfad32a2a7
|
[] |
no_license
|
zjuzpz/Algorithms
|
8d1c7d50429aa5540eb817dc5495a20fc3f11125
|
2df1a58aa9474f2ecec2ee7c45ebf12466181391
|
refs/heads/master
| 2021-01-21T05:55:48.768728 | 2020-08-04T22:44:08 | 2020-08-04T22:44:08 | 44,586,024 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,068 |
py
|
"""
311. Sparse Matrix Multiplication
Given two sparse matrices A and B, return the result of AB.
You may assume that A's column number is equal to B's row number.
Example:
A = [
[ 1, 0, 0],
[-1, 0, 3]
]
B = [
[ 7, 0, 0 ],
[ 0, 0, 0 ],
[ 0, 0, 1 ]
]
| 1 0 0 | | 7 0 0 | | 7 0 0 |
AB = | -1 0 3 | x | 0 0 0 | = | -7 0 3 |
| 0 0 1 |
"""
# O(l * m * n)
# O(l * m)
class Solution(object):
def multiply(self, A, B):
"""
:type A: List[List[int]]
:type B: List[List[int]]
:rtype: List[List[int]]
"""
if not A or not B:
return []
res = [[0 for j in range(len(B[0]))] for i in range(len(A))]
for i in range(len(A)):
for j in range(len(A[0])):
if A[i][j] != 0:
for k in range(len(res[0])):
res[i][k] += A[i][j] * B[j][k]
return res
if __name__ == "__main__":
A = [[1, 0, 0], [-1, 0, 3]]
B = [[7, 0, 0], [0, 0, 0], [0, 0, 1]]
print(Solution().multiply(A, B))
|
[
"[email protected]"
] | |
e4ae6e2272d209c56491720c42c47c6bb8eb751e
|
675989e2669b8b281b39de85dab2fe781cdaca6a
|
/macro/beam_profile/plots_beam.py
|
acd3481bec53ea81dba6acf6c9242a38deb0afa8
|
[] |
no_license
|
adamjaro/lmon
|
c439615c2aae861102e32440f823b05b7b054715
|
fd80ae93620ff12f8043f04d19538355fbdff81a
|
refs/heads/master
| 2023-06-01T06:29:09.749097 | 2023-05-08T17:55:55 | 2023-05-08T17:55:55 | 223,854,949 | 2 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,691 |
py
|
#!/usr/bin/python3
from pandas import read_csv, DataFrame
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
from scipy.stats import norm
from scipy.optimize import curve_fit
import numpy as np
#_____________________________________________________________________________
def main():
iplot = 2
funclist = []
funclist.append( plot_x ) # 0
funclist.append( plot_y ) # 1
funclist.append( plot_z ) # 2
funclist[iplot]()
#main
#_____________________________________________________________________________
def plot_x():
#x of primary vertex
infile = "data/vtx_18x275_3p3_r2.csv"
#infile = "data/vtx_18x275_3p4.csv"
inp = read_csv(infile)
#print(inp)
nbins = 60
#plt.style.use("dark_background")
#col = "lime"
col = "black"
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
set_axes_color(ax, col)
set_grid(plt, col)
hx = plt.hist(inp["x"], bins=nbins, color="blue", density=True, histtype="step", lw=2)
#Gaussian fit, bin centers and values
centers = (0.5*(hx[1][1:]+hx[1][:-1]))
fit_data = DataFrame({"E": centers, "density": hx[0]})
pars, cov = curve_fit(lambda x, mu, sig : norm.pdf(x, loc=mu, scale=sig), fit_data["E"], fit_data["density"])
#fit function
x = np.linspace(plt.xlim()[0], plt.xlim()[1], 300)
y = norm.pdf(x, pars[0], pars[1])
plt.plot(x, y, "-", label="norm", color="red")
ax.set_xlabel("$x$ (mm)")
ax.set_ylabel("Normalized counts")
leg = legend()
leg.add_entry(leg_lin("red"), "Gaussian fit:")
leg.add_entry(leg_txt(), "$\mu$ (mm): {0:.4f} $\pm$ {1:.4f}".format( pars[0], np.sqrt(cov[0,0]) ))
leg.add_entry(leg_txt(), "$\sigma$ (mm): {0:.4f} $\pm$ {1:.4f}".format( pars[1], np.sqrt(cov[1,1]) ))
leg.draw(plt, col)
fig.savefig("01fig.pdf", bbox_inches = "tight")
plt.close()
#plot_x
#_____________________________________________________________________________
def plot_y():
#y of primary vertex
infile = "data/vtx_18x275_3p3_r2.csv"
#infile = "data/vtx_18x275_3p4.csv"
inp = read_csv(infile)
nbins = 60
#plt.style.use("dark_background")
#col = "lime"
col = "black"
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
set_axes_color(ax, col)
set_grid(plt, col)
hx = plt.hist(inp["y"], bins=nbins, color="blue", density=True, histtype="step", lw=2)
#Gaussian fit, bin centers and values
centers = (0.5*(hx[1][1:]+hx[1][:-1]))
fit_data = DataFrame({"E": centers, "density": hx[0]})
pars, cov = curve_fit(lambda x, mu, sig : norm.pdf(x, loc=mu, scale=sig), fit_data["E"], fit_data["density"])
#fit function
x = np.linspace(plt.xlim()[0], plt.xlim()[1], 300)
y = norm.pdf(x, pars[0], pars[1])
plt.plot(x, y, "-", label="norm", color="red")
plt.rc("text", usetex = True)
plt.rc("text.latex", preamble=r"\usepackage{siunitx}")
ax.set_xlabel("$y$ (mm)")
ax.set_ylabel("Normalized counts")
leg = legend()
leg.add_entry(leg_lin("red"), "Gaussian fit:")
leg.add_entry(leg_txt(), "$\mu$ (\si{\micro\meter}): "+"{0:.4f} $\pm$ {1:.4f}".format( pars[0]*1e3, np.sqrt(cov[0,0]*1e3) ))
leg.add_entry(leg_txt(), "$\sigma$ (\si{\micro\meter}): "+"{0:.4f} $\pm$ {1:.4f}".format( pars[1]*1e3, np.sqrt(cov[1,1]*1e3) ))
leg.draw(plt, col)
fig.savefig("01fig.pdf", bbox_inches = "tight")
plt.close()
#plot_y
#_____________________________________________________________________________
def plot_z():
#z of primary vertex
infile = "data/vtx_18x275_3p3_r2.csv"
#infile = "data/vtx_18x275_3p4.csv"
inp = read_csv(infile)
nbins = 50
plt.style.use("dark_background")
col = "lime"
#col = "black"
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
set_axes_color(ax, col)
set_grid(plt, col)
hx = plt.hist(inp["z"], bins=nbins, color="blue", density=True, histtype="step", lw=2)
#Gaussian fit, bin centers and values
centers = (0.5*(hx[1][1:]+hx[1][:-1]))
fit_data = DataFrame({"E": centers, "density": hx[0]})
pars, cov = curve_fit(lambda x, mu, sig : norm.pdf(x, loc=mu, scale=sig), fit_data["E"], fit_data["density"])
#fit function
x = np.linspace(plt.xlim()[0], plt.xlim()[1], 300)
y = norm.pdf(x, pars[0], pars[1])
plt.plot(x, y, "-", label="norm", color="red")
ax.set_xlabel("$z$ (mm)")
ax.set_ylabel("Normalized counts")
leg = legend()
leg.add_entry(leg_lin("red"), "Gaussian fit:")
leg.add_entry(leg_txt(), "$\mu$ (mm): {0:.3f} $\pm$ {1:.3f}".format( pars[0], np.sqrt(cov[0,0]) ))
leg.add_entry(leg_txt(), "$\sigma$ (mm): {0:.3f} $\pm$ {1:.3f}".format( pars[1], np.sqrt(cov[1,1]) ))
leg.draw(plt, col)
fig.savefig("01fig.pdf", bbox_inches = "tight")
plt.close()
#plot_z
#_____________________________________________________________________________
def set_axes_color(ax, col):
#[t.set_color('red') for t in ax.xaxis.get_ticklines()]
#[t.set_color('red') for t in ax.xaxis.get_ticklabels()]
ax.xaxis.label.set_color(col)
ax.yaxis.label.set_color(col)
ax.tick_params(which = "both", colors = col)
ax.spines["bottom"].set_color(col)
ax.spines["left"].set_color(col)
ax.spines["top"].set_color(col)
ax.spines["right"].set_color(col)
#set_axes_color
#_____________________________________________________________________________
def set_grid(px, col="lime"):
px.grid(True, color = col, linewidth = 0.5, linestyle = "--")
#set_grid
#_____________________________________________________________________________
class legend:
def __init__(self):
self.items = []
self.data = []
def add_entry(self, i, d):
self.items.append(i)
self.data.append(d)
def draw(self, px, col=None, **kw):
leg = px.legend(self.items, self.data, **kw)
if col is not None:
px.setp(leg.get_texts(), color=col)
if col != "black":
leg.get_frame().set_edgecolor("orange")
return leg
#_____________________________________________________________________________
def leg_lin(col, sty="-"):
return Line2D([0], [0], lw=2, ls=sty, color=col)
#_____________________________________________________________________________
def leg_txt():
return Line2D([0], [0], lw=0)
#_____________________________________________________________________________
def leg_dot(fig, col, siz=8):
return Line2D([0], [0], marker="o", color=fig.get_facecolor(), markerfacecolor=col, markersize=siz)
#_____________________________________________________________________________
if __name__ == "__main__":
main()
|
[
"[email protected]"
] | |
95841f3405e5128f4dfbcdc76b3bc8bc7578f644
|
eafd99d910af8cdcf1ec1b96d03c342e5306af49
|
/tf-idf.py
|
803e8ec98d0ee3310165b47a2f88d53407dc526f
|
[] |
no_license
|
paulohq/cosine
|
0323db87138a1f7b34f680b20f93b0a41a2c72b4
|
f22c3c7b6af9eb149046d70654dd82a7bf69c738
|
refs/heads/master
| 2020-05-09T19:36:50.164238 | 2020-04-13T23:41:31 | 2020-04-13T23:41:31 | 181,383,427 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,179 |
py
|
# teste de geração de tfidf do corpus
import pandas as pd
from sklearn.feature_extraction.text import TfidfVectorizer
from nltk.corpus import stopwords
import math
def computeTF(wordDict, bagOfWords):
tfDict = {}
bagOfWordsCount = len(bagOfWords)
for word, count in wordDict.items():
tfDict[word] = count / float(bagOfWordsCount)
return tfDict
def computeIDF(documents):
import math
N = len(documents)
idfDict = dict.fromkeys(documents[0].keys(), 0)
for document in documents:
for word, val in document.items():
if val > 0:
idfDict[word] += 1
for word, val in idfDict.items():
idfDict[word] = math.log(N / float(val))
return idfDict
def computeTFIDF(tfBagOfWords, idfs):
tfidf = {}
for word, val in tfBagOfWords.items():
tfidf[word] = val * idfs[word]
return tfidf
print(stopwords.words('english'))
documentA = "the man keeps walking" #'the man went out for a walk'
documentB = "the children study" #'the children sat around the fire'
documentC = "the woman teach the lesson"
documentD = "the woman teach the children" #"the idiot speak shit"
bagOfWordsA = documentA.split(' ')
bagOfWordsB = documentB.split(' ')
bagOfWordsC = documentC.split(' ')
bagOfWordsD = documentD.split(' ')
uniqueWords = set(bagOfWordsA).union(set(bagOfWordsB)).union(set(bagOfWordsC)).union(set(bagOfWordsD))
numOfWordsA = dict.fromkeys(uniqueWords, 0)
for word in bagOfWordsA:
numOfWordsA[word] += 1
numOfWordsB = dict.fromkeys(uniqueWords, 0)
for word in bagOfWordsB:
numOfWordsB[word] += 1
numOfWordsC = dict.fromkeys(uniqueWords, 0)
for word in bagOfWordsC:
numOfWordsC[word] += 1
numOfWordsD = dict.fromkeys(uniqueWords, 0)
for word in bagOfWordsD:
numOfWordsD[word] += 1
print("Num Palavras A:",numOfWordsA)
print("Num Palavras B:",numOfWordsB)
print("Num Palavras C:",numOfWordsC)
print("Num Palavras D:",numOfWordsD)
tfA = computeTF(numOfWordsA, bagOfWordsA)
print("tfA", tfA)
tfB = computeTF(numOfWordsB, bagOfWordsB)
print("tfB", tfB)
tfC = computeTF(numOfWordsC, bagOfWordsC)
print("tfC", tfC)
tfD = computeTF(numOfWordsD, bagOfWordsD)
print("tfD", tfD)
idfs = computeIDF([numOfWordsA, numOfWordsB, numOfWordsC, numOfWordsD])
print("IDF:", idfs)
tfidfA = computeTFIDF(tfA, idfs)
tfidfB = computeTFIDF(tfB, idfs)
tfidfC = computeTFIDF(tfC, idfs)
tfidfD = computeTFIDF(tfD, idfs)
print("TFIDF A: ", tfidfA)
print("TFIDF B: ", tfidfB)
print("TFIDF C: ", tfidfC)
print("TFIDF D: ", tfidfD)
df = pd.DataFrame([tfidfA, tfidfB, tfidfC, tfidfD])
print("df")
print(df)
vectorizer = TfidfVectorizer()
vectors = vectorizer.fit_transform([documentA, documentB, documentC, documentD])
feature_names = vectorizer.get_feature_names()
print("feature names:", feature_names)
dense = vectors.todense()
denselist = dense.tolist()
dfsk = pd.DataFrame(denselist, columns=feature_names)
for doc in denselist:
print(doc)
n = 0.0
y = 0
for i in range(len(doc)):
n = n + doc[i] * doc[i]
n = math.sqrt(n)
print("l2 norm = ", n)
# for c in doc:
# print(c, ",")
# print("\n")
print("df sklearn:")
print(dfsk)
|
[
"="
] |
=
|
5a51db951f50d7a40e7dd7898836d24536d129dc
|
2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae
|
/python/python_26911.py
|
835706fd5386fe2807524e81564a1ceb637ffd94
|
[] |
no_license
|
AK-1121/code_extraction
|
cc812b6832b112e3ffcc2bb7eb4237fd85c88c01
|
5297a4a3aab3bb37efa24a89636935da04a1f8b6
|
refs/heads/master
| 2020-05-23T08:04:11.789141 | 2015-10-22T19:19:40 | 2015-10-22T19:19:40 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 97 |
py
|
# Cutting image after a certain point
Image.composite(hat, faceandhair, mask).save("RESULT.png")
|
[
"[email protected]"
] | |
3c69f7778504bf4fa0dba20be9624a0e8fa8483a
|
0ffdf8ab6c5a875bfd8c3e06456131a0f3abad62
|
/contrib/devtools/update-translations.py
|
9266596bcb8562a640361a6811c64b7a51f9e5df
|
[
"MIT"
] |
permissive
|
FYNCOIN-Foundation/FYNCOIN
|
d06be9163090155a540b369512b9f6ec7f2410f3
|
835ce3be2fb20632fb9443293d86caad620a1f7e
|
refs/heads/master
| 2020-03-26T11:55:35.065862 | 2018-09-02T10:26:22 | 2018-09-02T10:26:22 | 144,866,105 | 0 | 4 | null | null | null | null |
UTF-8
|
Python
| false | false | 8,148 |
py
|
#!/usr/bin/env python
# Copyright (c) 2014 Wladimir J. van der Laan
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Run this script from the root of the repository to update all translations from
transifex.
It will do the following automatically:
- fetch all translations using the tx tool
- post-process them into valid and committable format
- remove invalid control characters
- remove location tags (makes diffs less noisy)
TODO:
- auto-add new translations to the build system according to the translation process
'''
from __future__ import division, print_function
import subprocess
import re
import sys
import os
import io
import xml.etree.ElementTree as ET
# Name of transifex tool
TX = 'tx'
# Name of source language file
SOURCE_LANG = 'fyn_en.ts'
# Directory with locale files
LOCALE_DIR = 'src/qt/locale'
# Minimum number of messages for translation to be considered at all
MIN_NUM_MESSAGES = 10
def check_at_repository_root():
if not os.path.exists('.git'):
print('No .git directory found')
print('Execute this script at the root of the repository', file=sys.stderr)
exit(1)
def fetch_all_translations():
if subprocess.call([TX, 'pull', '-f', '-a']):
print('Error while fetching translations', file=sys.stderr)
exit(1)
def find_format_specifiers(s):
'''Find all format specifiers in a string.'''
pos = 0
specifiers = []
while True:
percent = s.find('%', pos)
if percent < 0:
break
try:
specifiers.append(s[percent+1])
except:
print('Failed to get specifier')
pos = percent+2
return specifiers
def split_format_specifiers(specifiers):
'''Split format specifiers between numeric (Qt) and others (strprintf)'''
numeric = []
other = []
for s in specifiers:
if s in {'1','2','3','4','5','6','7','8','9'}:
numeric.append(s)
else:
other.append(s)
# If both numeric format specifiers and "others" are used, assume we're dealing
# with a Qt-formatted message. In the case of Qt formatting (see https://doc.qt.io/qt-5/qstring.html#arg)
# only numeric formats are replaced at all. This means "(percentage: %1%)" is valid, without needing
# any kind of escaping that would be necessary for strprintf. Without this, this function
# would wrongly detect '%)' as a printf format specifier.
if numeric:
other = []
# numeric (Qt) can be present in any order, others (strprintf) must be in specified order
return set(numeric),other
def sanitize_string(s):
'''Sanitize string for printing'''
return s.replace('\n',' ')
def check_format_specifiers(source, translation, errors, numerus):
source_f = split_format_specifiers(find_format_specifiers(source))
# assert that no source messages contain both Qt and strprintf format specifiers
# if this fails, go change the source as this is hacky and confusing!
assert(not(source_f[0] and source_f[1]))
try:
translation_f = split_format_specifiers(find_format_specifiers(translation))
except IndexError:
errors.append("Parse error in translation for '%s': '%s'" % (sanitize_string(source), sanitize_string(translation)))
return False
else:
if source_f != translation_f:
if numerus and source_f == (set(), ['n']) and translation_f == (set(), []) and translation.find('%') == -1:
# Allow numerus translations to omit %n specifier (usually when it only has one possible value)
return True
errors.append("Mismatch between '%s' and '%s'" % (sanitize_string(source), sanitize_string(translation)))
return False
return True
def all_ts_files(suffix=''):
for filename in os.listdir(LOCALE_DIR):
# process only language files, and do not process source language
if not filename.endswith('.ts'+suffix) or filename == SOURCE_LANG+suffix:
continue
if suffix: # remove provided suffix
filename = filename[0:-len(suffix)]
filepath = os.path.join(LOCALE_DIR, filename)
yield(filename, filepath)
FIX_RE = re.compile(b'[\x00-\x09\x0b\x0c\x0e-\x1f]')
def remove_invalid_characters(s):
'''Remove invalid characters from translation string'''
return FIX_RE.sub(b'', s)
# Override cdata escape function to make our output match Qt's (optional, just for cleaner diffs for
# comparison, disable by default)
_orig_escape_cdata = None
def escape_cdata(text):
text = _orig_escape_cdata(text)
text = text.replace("'", ''')
text = text.replace('"', '"')
return text
def postprocess_translations(reduce_diff_hacks=False):
print('Checking and postprocessing...')
if reduce_diff_hacks:
global _orig_escape_cdata
_orig_escape_cdata = ET._escape_cdata
ET._escape_cdata = escape_cdata
for (filename,filepath) in all_ts_files():
os.rename(filepath, filepath+'.orig')
have_errors = False
for (filename,filepath) in all_ts_files('.orig'):
# pre-fixups to cope with transifex output
parser = ET.XMLParser(encoding='utf-8') # need to override encoding because 'utf8' is not understood only 'utf-8'
with open(filepath + '.orig', 'rb') as f:
data = f.read()
# remove control characters; this must be done over the entire file otherwise the XML parser will fail
data = remove_invalid_characters(data)
tree = ET.parse(io.BytesIO(data), parser=parser)
# iterate over all messages in file
root = tree.getroot()
for context in root.findall('context'):
for message in context.findall('message'):
numerus = message.get('numerus') == 'yes'
source = message.find('source').text
translation_node = message.find('translation')
# pick all numerusforms
if numerus:
translations = [i.text for i in translation_node.findall('numerusform')]
else:
translations = [translation_node.text]
for translation in translations:
if translation is None:
continue
errors = []
valid = check_format_specifiers(source, translation, errors, numerus)
for error in errors:
print('%s: %s' % (filename, error))
if not valid: # set type to unfinished and clear string if invalid
translation_node.clear()
translation_node.set('type', 'unfinished')
have_errors = True
# Remove location tags
for location in message.findall('location'):
message.remove(location)
# Remove entire message if it is an unfinished translation
if translation_node.get('type') == 'unfinished':
context.remove(message)
# check if document is (virtually) empty, and remove it if so
num_messages = 0
for context in root.findall('context'):
for message in context.findall('message'):
num_messages += 1
if num_messages < MIN_NUM_MESSAGES:
print('Removing %s, as it contains only %i messages' % (filepath, num_messages))
continue
# write fixed-up tree
# if diff reduction requested, replace some XML to 'sanitize' to qt formatting
if reduce_diff_hacks:
out = io.BytesIO()
tree.write(out, encoding='utf-8')
out = out.getvalue()
out = out.replace(b' />', b'/>')
with open(filepath, 'wb') as f:
f.write(out)
else:
tree.write(filepath, encoding='utf-8')
return have_errors
if __name__ == '__main__':
check_at_repository_root()
fetch_all_translations()
postprocess_translations()
|
[
"[email protected]"
] | |
cbd89cb2e8587a7d490be95bfed1e308b413fcbe
|
aa8fe9e165df16bd17aa5720b8043c533adde9bb
|
/init/00_lockfile.py
|
4843f9c2f344b55760f6b74481b55a42eabab1c1
|
[] |
no_license
|
teamdiamond/qtlab
|
31d2ccaee2ada84a027f2160553f54757e6f6cdf
|
67d5bbd58c5f4d4ac3914774b56071d51f121010
|
refs/heads/master
| 2022-12-14T12:07:35.223055 | 2019-06-25T06:53:57 | 2019-06-25T06:53:57 | 15,255,712 | 0 | 4 | null | 2022-12-07T23:37:45 | 2013-12-17T13:54:53 |
Python
|
UTF-8
|
Python
| false | false | 322 |
py
|
from lib import config, lockfile
import os
_lockname = os.path.join(config.get_execdir(), 'qtlab.lock')
lockfile.set_filename(_lockname)
del _lockname
msg = "QTlab already running, start with '-f' to force start.\n"
msg += "Press s<enter> to start anyway or just <enter> to quit."
lockfile.check_lockfile(msg)
|
[
"[email protected]"
] | |
4d5b0222e92f9df4bd437156b9910a3f4474331e
|
13cf11440998376d3b52a49f1e4fb8936c360ac4
|
/tests/saliency_tests/visualizer_tests/test_visualizer_utils.py
|
db7ce7e9b4dd0f1fb233f8a3f1d4c2e5d8a52f95
|
[
"MIT"
] |
permissive
|
k-ishiguro/chainer-chemistry
|
87e3db724de0e99042d9585cd4bd5fff38169339
|
aec33496def16e76bdfbefa508ba01ab9f79a592
|
refs/heads/master
| 2021-07-06T22:58:20.127907 | 2019-02-04T02:51:34 | 2019-02-04T02:51:34 | 169,345,375 | 1 | 1 |
MIT
| 2020-07-30T06:04:13 | 2019-02-06T02:27:39 |
Python
|
UTF-8
|
Python
| false | false | 1,802 |
py
|
import numpy
import pytest
from chainer_chemistry.saliency.visualizer.visualizer_utils import abs_max_scaler # NOQA
from chainer_chemistry.saliency.visualizer.visualizer_utils import min_max_scaler # NOQA
from chainer_chemistry.saliency.visualizer.visualizer_utils import normalize_scaler # NOQA
from chainer_chemistry.saliency.visualizer.visualizer_utils import red_blue_cmap # NOQA
def test_abs_max_scaler():
saliency = numpy.array([1., 2., 3.])
result = abs_max_scaler(saliency)
expected = numpy.array([1. / 3, 2. / 3., 1.])
assert numpy.allclose(result, expected)
# test with 0 arrays
saliency = numpy.array([0, 0, 0])
result = abs_max_scaler(saliency)
expected = numpy.array([0, 0, 0])
assert numpy.allclose(result, expected)
def test_min_max_scaler():
saliency = numpy.array([1., -3., 3.])
result = min_max_scaler(saliency)
expected = numpy.array([4. / 6, 0., 1.])
assert numpy.allclose(result, expected)
# test with 0 arrays
saliency = numpy.array([0, 0, 0])
result = min_max_scaler(saliency)
expected = numpy.array([0, 0, 0])
assert numpy.allclose(result, expected)
def test_normalize_scaler():
saliency = numpy.array([1., 2., 3.])
result = normalize_scaler(saliency)
expected = numpy.array([1./6., 2./6, 3./6.])
assert numpy.allclose(result, expected)
# test with 0 arrays
saliency = numpy.array([0, 0, 0])
result = normalize_scaler(saliency)
expected = numpy.array([0, 0, 0])
assert numpy.allclose(result, expected)
def test_red_blue_cmap():
assert red_blue_cmap(1) == (1., 0., 0.) # Red
assert red_blue_cmap(0) == (1., 1., 1.) # White
assert red_blue_cmap(-1) == (0., 0., 1.) # Blue
if __name__ == '__main__':
pytest.main([__file__, '-v', '-s'])
|
[
"[email protected]"
] | |
a3a4b899eeb29945f03056946232a708ce516fbb
|
36cfda71d39c79ba671b8f86d473bc8b802ae348
|
/C++问答Code/delete.py
|
0fcdd4b60511ec7c85134158caccace5d5275a59
|
[] |
no_license
|
lichangke/QuestionAndAnswer
|
b05e9b0f2ea12c61a7a27f59c81bcf7ebd903c83
|
dd89c2d786050c6b69c4ee93a9eef8d4a22fbfa6
|
refs/heads/master
| 2023-05-02T21:44:53.225373 | 2021-04-27T08:40:37 | 2021-04-27T08:40:37 | 357,549,085 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 603 |
py
|
import os
import shutil
import os.path
import stat
DeleteDirList = [".idea","cmake-build-debug"]
def funcDeleteDir(path):
for parent, dirnames, filenames in os.walk(path): # 遍历文件夹下面的所有文件夹
for dirname in dirnames:
strfilepath = parent + os.sep + dirname
if os.path.isdir(strfilepath):
if dirname in DeleteDirList:
shutil.rmtree(strfilepath) # 删除此文件夹
else:
funcDeleteDir(strfilepath)
if __name__ == '__main__':
path = str(".")
funcDeleteDir(path)
|
[
"[email protected]"
] | |
d7eb16377f4e485d6f9ced4d428c49500c312ff0
|
ac5e52a3fc52dde58d208746cddabef2e378119e
|
/exps-gsn-edf/gsn-edf_ut=3.0_rd=1_rw=0.06_rn=4_u=0.075-0.35_p=harmonic-2/sched=RUN_trial=81/params.py
|
9c610d772bb2c0361c637b278eaa66262e55aa96
|
[] |
no_license
|
ricardobtxr/experiment-scripts
|
1e2abfcd94fb0ef5a56c5d7dffddfe814752eef1
|
7bcebff7ac2f2822423f211f1162cd017a18babb
|
refs/heads/master
| 2023-04-09T02:37:41.466794 | 2021-04-25T03:27:16 | 2021-04-25T03:27:16 | 358,926,457 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 252 |
py
|
{'cpus': 4,
'duration': 30,
'final_util': '3.107095',
'max_util': '3.0',
'periods': 'harmonic-2',
'release_master': False,
'res_distr': '1',
'res_nmb': '4',
'res_weight': '0.06',
'scheduler': 'GSN-EDF',
'trial': 81,
'utils': 'uni-medium-3'}
|
[
"[email protected]"
] | |
07e4a5831a138c258c31661783f57549e8e2aa79
|
50948d4cb10dcb1cc9bc0355918478fb2841322a
|
/azure-mgmt-compute/azure/mgmt/compute/v2018_10_01/models/win_rm_configuration_py3.py
|
7ee36d3a256e666ff72aa6de041509fd0721681d
|
[
"MIT"
] |
permissive
|
xiafu-msft/azure-sdk-for-python
|
de9cd680b39962702b629a8e94726bb4ab261594
|
4d9560cfd519ee60667f3cc2f5295a58c18625db
|
refs/heads/master
| 2023-08-12T20:36:24.284497 | 2019-05-22T00:55:16 | 2019-05-22T00:55:16 | 187,986,993 | 1 | 0 |
MIT
| 2020-10-02T01:17:02 | 2019-05-22T07:33:46 |
Python
|
UTF-8
|
Python
| false | false | 1,036 |
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class WinRMConfiguration(Model):
"""Describes Windows Remote Management configuration of the VM.
:param listeners: The list of Windows Remote Management listeners
:type listeners:
list[~azure.mgmt.compute.v2018_10_01.models.WinRMListener]
"""
_attribute_map = {
'listeners': {'key': 'listeners', 'type': '[WinRMListener]'},
}
def __init__(self, *, listeners=None, **kwargs) -> None:
super(WinRMConfiguration, self).__init__(**kwargs)
self.listeners = listeners
|
[
"[email protected]"
] | |
2f6a82d63491db26eebcd1c31b0fac8b1e2040a2
|
d5e787f85b37f966ccdf0cd5f7a7061eae1c70a8
|
/src/core/celery.py
|
0def2c6a5fba8808a1f787d4065df7b5ec14710d
|
[
"MIT"
] |
permissive
|
iBuilder-Tech/phase
|
5ee6cd1fb410de0d067e7b5b8adfea3c4411b62c
|
cc8f9b9f3e2c31b139d5cce433667c8d5ba2c6f2
|
refs/heads/main
| 2023-07-10T18:24:20.291136 | 2021-08-31T13:22:17 | 2021-08-31T13:22:17 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 417 |
py
|
import os
from celery import Celery
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'core.settings.production')
from django.conf import settings # noqa
app = Celery('phase')
app.config_from_object('django.conf:settings', namespace='CELERY')
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
@app.task(bind=True)
def celery_debug_task(self):
print('This is a debug task to verify that Celery works')
|
[
"[email protected]"
] | |
b8dfd6bf8be08eccce64129d19717fa8d4ac4eed
|
997c82f5d9684945fb2f5d5481dc4d251a93755f
|
/famapy/metamodels/bdd_metamodel_withObjects/models/bdd_model.py
|
af0274543db70bddfc413d11b3eaabfab872f8f1
|
[] |
no_license
|
jmhorcas/famapy-aafms
|
a6e45b5fff2c820037daf95151df5bc6895b1611
|
bcc80f7061bed4d6bfd536f9d53cf195bffa01e6
|
refs/heads/main
| 2023-08-24T05:51:47.337325 | 2021-10-15T10:18:20 | 2021-10-15T10:18:20 | 389,559,981 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,424 |
py
|
from collections import defaultdict
from dd.autoref import BDD, Function
from famapy.metamodels.cnf_metamodel.models.cnf_model import CNFNotation, CNFLogicConnective
class BDDModel:
"""A Binary Decision Diagram (BDD) representation of the feature model given as a CNF formula.
It relies on the dd module: https://pypi.org/project/dd/
"""
CNF_NOTATION = CNFNotation.JAVA_SHORT
NOT = CNF_NOTATION.value[CNFLogicConnective.NOT]
AND = CNF_NOTATION.value[CNFLogicConnective.AND]
OR = CNF_NOTATION.value[CNFLogicConnective.OR]
def __init__(self):
self.bdd = BDD() # BDD manager
self.cnf_formula = None
self.root = None
self.variables = []
def from_cnf(self, cnf_formula: str, variables: list[str]):
self.cnf_formula = cnf_formula
self.variables = variables
# Declare variables
for v in self.variables:
self.bdd.declare(v)
# Build the BDD
self.root = self.bdd.add_expr(self.cnf_formula)
# Reorder variables
# variable_order = self.bdd.vars
# var = self.bdd.var_at_level(0)
# level = self.root.level
# variable_order[self.root.var] = 0
# variable_order[var] = level
# self.bdd.reorder(variable_order)
# self.root = self.bdd.var(self.bdd.var_at_level(0))
def index(self, n: Function) -> int:
"""Position of the variable that labels the node `n` in the ordering (i.e., the level).
Example: node `n4` is labeled `B`, and `B` is in the second position of the ordering `[A,B,C]`.
thus var(n4) = 2.
"""
if n.node == -1 or n.node == 1: # index(n0) = index(n1) = s + 1, being s the number of variables.
return len(self.bdd.vars) + 1
else:
return n.level + 1
def get_high_node(self, node: Function) -> Function:
return ~node.high if node.negated and not self.is_terminal_node(node.high) else node.high
def get_low_node(self, node: Function) -> Function:
return ~node.low if node.negated and not self.is_terminal_node(node.low) else node.low
def is_terminal_node(self, node: Function) -> bool:
return node.var is None
# def traverse(self):
# root = self.root
# self.mark = defaultdict(bool)
# self._traverse(root)
# def _traverse(self, n):
# print('-----')
# print(f'n: {n} (var={n.var}), (level={n.level}), (id={n.node}), (negated={n.negated})')
# self.mark[n.node] = not self.mark[n.node]
# if not self.is_terminal_node(n):
# #level, low, high = self.bdd.succ(n)
# level = n.level
# low = n.low #self.get_low_node(n)
# high = n.high #self.get_high_node(n)
# print(f'|--level: {level}')
# print(f'|--low: {low} (var={low.var}), (level={low.level}), (id={low.node}), (negated={low.negated})')
# print(f'|--high: {high} (var={high.var}), (level={high.level}), (id={high.node}), (negated={high.negated})')
# if self.is_terminal_node(low) and low.negated:
# print(f'negated: {~low}')
# print('-----')
# if self.mark[n.node] != self.mark[low.node]:
# self._traverse(low)
# if self.mark[n.node] != self.mark[high.node]:
# self._traverse(high)
|
[
"[email protected]"
] | |
380d2de8a0216873416cca9c2d5e636526a7dd16
|
e73003ad3417daf4eb4b4e9909b42225833aedea
|
/0x07-python-test_driven_development/0-add_integer.py
|
0a8fd23207973a5fd688889a15bd00d764bc346b
|
[] |
no_license
|
Alphamj/holbertonschool-higher_level_programming
|
34f10da3407969928a333af8a6ef52d2817d838a
|
379ed7fc70f8ba7e4c41e07b4ae804c21d540725
|
refs/heads/master
| 2023-03-15T13:11:04.545118 | 2020-09-25T01:38:33 | 2020-09-25T01:38:33 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 533 |
py
|
#!/usr/bin/python3
"""
My add module
add_integer: function that add two numbers
Return: the add of two intigers
"""
def add_integer(a, b=98):
""" Return the add of intigers
a and b are intigers
"""
if not isinstance(a, (int, float)) or isinstance(a, bool):
raise TypeError("a must be an integer")
elif not isinstance(b, (int, float)) or isinstance(b, bool):
raise TypeError("b must be an integer")
else:
a = int(round(a))
b = int(round(b))
return (a + b)
|
[
"[email protected]"
] | |
da35d8a5edab37d07c1c2a4664be8b6030f98f66
|
35cf6fc79b8d6c335add8e55e0f4dca6f2816d1d
|
/Python_Study/Selenium自动化/Day02/Email/send_email.py
|
f4b9ce3225290a4c822ebb3ee2db697acd756345
|
[] |
no_license
|
KongChan1988/51CTO-Treasure
|
08b4ca412ad8a09d67c1ea79c7149f8573309ca4
|
edb2e4bd11d39ac24cd240f3e815a88361867621
|
refs/heads/master
| 2021-07-04T15:57:56.164446 | 2019-07-24T15:28:36 | 2019-07-24T15:28:36 | 97,453,749 | 5 | 8 | null | 2019-10-30T22:05:12 | 2017-07-17T08:34:59 |
Python
|
UTF-8
|
Python
| false | false | 2,369 |
py
|
# -*- coding:utf-8 -*-
# Author:D.Gray
import unittest
import smtplib
import time
import os
from email.mime.text import MIMEText
from email.header import Header
from HTMLTestRunner import HTMLTestRunner
print("开始发送邮件".center(50,"-"))
def sendReport(file_new):
with open(file_new,'rb') as f:
email_body = f.read()
msg = MIMEText
# print("发送邮件".center(50,"-"))
# def sendReport(file_new):
# '''
#
# :param file_new:
# :return:
# '''
# with open(file_new,"rb") as f:
# new_body = f.read()
# msg = MIMEText(new_body,"html","utf-8") #构造MIMEText对象,作为邮件内容的形式进行附加
# msg["Subject"] = Header("自动化测试报告","utf-8")
# msg["From"] = "[email protected]" #发送地址
# msg["to"] = "[email protected]" #收件地址
#
# smtp = smtplib.SMTP("smtp.mxhichina.com") #邮件服务器地址
# smtp.login("[email protected]","sdchendijayD1988") #邮箱账号和密码
# smtp.sendmail(msg["From"],msg["to"].split(";"),msg.as_string()) #多个收件人用 ;号分割
# smtp.quit()
# print("The HTML Send Out".center(50,"-") )
#
# def newReport(testReport):
# lists = os.listdir(testReport) #操作本地目录 列出本地目录文件
# lists2 = sorted(lists) #获得排序后的测试报告列表
# file_name = os.path.join(testReport,lists2[-1]) #获得最新一条HTML报告
# # print(file_name)
# return file_name
#
# print("开始运行".center(50,"-"))
# if __name__ == '__main__':
# test_dir = "E:\\python_work\\51CTO_Python\Selenium自动化\Day02\Email" #测试用例路径
# test_report = "E:\\python_work\\51CTO_Python\Selenium自动化\Day02\Email\TestReport" #测试报告路径
#
# discover = unittest.defaultTestLoader.discover(test_dir,"baidu.py") #加载测试函数
# now = time.strftime("%Y-%m-%d %H%M%S") #当前时间
# file_path = os.path.join(test_report,"%sresult.html"%now) #拼接出测试报告名称
# with open(file_path,"wb") as fe:
# runner = HTMLTestRunner(stream=fe,title="测试结果",description="测试执行结果")
# runner.run(discover)
# new_report = newReport(test_report) #获取最新测试报告
# print(new_report)
# sendReport(new_report) #发送最新测试报告
|
[
"[email protected]"
] | |
73297c5c9fd66b418c12dcf9b3d27bcd9b384d06
|
ac45b55915e634815922329195c203b1e810458c
|
/panstars630_18.py
|
d98a96508dabb530f35f2383490b9e488ac6b495
|
[] |
no_license
|
mj1e16lsst/iridisPeriodicNew
|
96a8bfef0d09f13e18adb81b89e25ae885e30bd9
|
dc0214b1e702b454e0cca67d4208b2113e1fbcea
|
refs/heads/master
| 2020-03-23T15:01:23.583944 | 2018-07-23T18:58:59 | 2018-07-23T18:58:59 | 141,715,292 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 12,060 |
py
|
from operator import add
#from astropy import units as u
#from astropy.coordinates import SkyCoord
#from astropy.stats import LombScargle
#from gatspy.periodic import LombScargleFast
from functools import partial
#from gatspy import periodic
#import matplotlib.pyplot as plt
#from matplotlib.font_manager import FontProperties
import lomb_scargle_multiband as periodic
from multiprocessing import Pool
import numpy as np
import os
#from sqlite3 import *
import random
from random import shuffle
from random import randint
import Observations
import Magnitudes
# In[13]:
#conn = connect('minion_1016_sqlite.db')
#conn = connect('astro_lsst_01_1004_sqlite.db')
#conn = connect('minion_1020_sqlite.db')
# In[14]:
# LSST zero points u,g,r,i,z,y
zeroPoints = [0,26.5,28.3,28.13,27.79,27.4,26.58]
FWHMeff = [0.8,0.92,0.87,0.83,0.80,0.78,0.76] # arcmins?
pixelScale = 0.2
readOut = 12.7
sigSys = 0.005
flareperiod = 4096
flarecycles = 10
dayinsec=86400
background = 40
# sat mag u,g,r,i,z,y=14.7,15.7,15.8,15.8,15.3 and 13.9
# start date 59580.033829 end date + 10 years
#maglist=[20]*7
lim = [0, 23.5, 24.8, 24.4, 23.9, 23.3, 22.1] # limiting magnitude ugry
sat = [0, 14.7, 15.7, 15.8, 15.8, 15.3, 13.9] # sat mag as above
# In[15]:
looooops = 10000
maglength = 20
freqlength = 20
processors = 20
startnumber = 0 + 18
endnumber = startnumber + 1
#observingStrategy = 'minion'
observingStrategy = 'astroD'
#observingStrategy = 'panstars'
inFile = '/home/mj1e16/periodic/in'+str(startnumber)+'.txt'
outFile = '/home/mj1e16/periodic/outpanstars630'+str(startnumber)+'.txt'
#inFile = '/home/ubuntu/vagrant/'+observingStrategy+'/in'+observingStrategy+'KtypefullresultsFile'+str(startnumber)+'.txt'
#outFile = '/home/ubuntu/vagrant/'+observingStrategy+'/out'+observingStrategy+'KtypefullresultsFile'+str(startnumber)+'.txt'
obs = Observations.obspanstars630
# In[19]:
def magUncertainy(Filter, objectmag, exposuretime,background, FWHM): # b is background counts per pixel
countsPS = 10**((Filter-objectmag)/2.5)
counts = countsPS * exposuretime
uncertainty = 1/(counts/((counts/2.3)+(((background/2.3)+(12.7**2))*2.266*((FWHM/0.2)**2)))**0.5) # gain assumed to be 1
return uncertainty
#from lsst should have got the website! https://smtn-002.lsst.io/
# In[20]:
def averageFlux(observations, Frequency, exptime):
b = [0]*len(observations)
for seconds in range(0, exptime):
a = [np.sin((2*np.pi*(Frequency))*(x+(seconds/(3600*24)))) for x in observations] # optical modulation
b = map(add, a, b)
c = [z/exptime for z in b]
return c
def Flux(observations,Frequency,exptime):
a = [np.sin((2*np.pi*(Frequency)*x)) for x in observations]
return a
# In[21]:
def ellipsoidalFlux(observations, Frequency,exptime):
period = 1/(Frequency)
phase = [(x % (2*period)) for x in observations]
b = [0]*len(observations)
for seconds in range(0, exptime):
a = [np.sin((2*np.pi*(Frequency))*(x+(seconds/(3600*24)))) for x in observations] # optical modulation
b = map(add, a, b)
c = [z/exptime for z in b]
for x in range(0,len(phase)):
if (phase[x]+(1.5*period)) < (3*period):
c[x] = c[x]*(1./3.)
else:
c[x] = c[x]*(2./3.)
return c
## this is doing something but not the right something, come back to it
# In[22]:
def flaring(B, length, dayinsec=86400,amplitude=1):
global flareMag, minutes
fouriers = np.linspace(0.00001,0.05,(dayinsec/30))
logF = [np.log(x) for x in fouriers] # start at 30 go to a day in 30 sec increments
real = [random.gauss(0,1)*((1/x)**(B/2)) for x in fouriers] #random.gauss(mu,sigma) to change for values from zurita
# imaginary = [random.gauss(0,1)*((1/x)**(B/2)) for x in fouriers]
IFT = np.fft.ifft(real)
seconds = np.linspace(0,dayinsec, (dayinsec/30)) # the day in 30 sec increments
minutes = [x for x in seconds]
minimum = (np.max(-IFT))
positive = [x + minimum for x in IFT] # what did this even achieve? it helped with normalisation!
normalised = [x/(np.mean(positive)) for x in positive] # find normalisation
normalisedmin = minimum/(np.mean(positive))
normalised = [x - normalisedmin for x in normalised]
flareMag = [amplitude * x for x in normalised] # normalise to amplitude
logmins = [np.log(d) for d in minutes] # for plotting?
# plt.plot(minutes,flareMag)
# plt.title('lightcurve')
# plt.show()
return flareMag
# In[55]:
def lombScargle(frequencyRange,objectmag=20,loopNo=looooops,df=0.001,fmin=0.001,numsteps=100000,modulationAmplitude=0.1,Nquist=200): # frequency range and object mag in list
#global totperiod, totmperiod, totpower, date, amplitude, frequency, periods, LSperiod, power, mag, error, SigLevel
results = {}
totperiod = []
totmperiod = []
totpower = [] # reset
SigLevel = []
filterletter = ['o','u','g','r','i','z','y']
period = 1/(frequencyRange)
if period > 0.5:
numsteps = 10000
elif period > 0.01:
numsteps = 100000
else:
numsteps = 200000
freqs = fmin + df * np.arange(numsteps) # for manuel
allobsy, uobsy, gobsy, robsy, iobsy, zobsy, yobsy = [], [], [], [], [], [], [] #reset
measuredpower = [] # reset
y = [allobsy, uobsy, gobsy, robsy, iobsy, zobsy, yobsy] # for looping only
for z in range(1, len(y)):
#y[z] = averageFlux(obs[z], frequencyRange[frange], 30) # amplitde calculation for observations, anf frequency range
y[z] = ellipsoidalFlux(obs[z], frequencyRange,30)
y[z] = [modulationAmplitude * t for t in y[z]] # scaling
for G in range(0, len(y[z])):
flareMinute = int(round((obs[z][G]*24*60*2)%((dayinsec/(30*2))*flarecycles)))
y[z][G] = y[z][G] + longflare[flareMinute] # add flares swapped to second but not changing the name intrtoduces fewer bugs
date = []
amplitude = []
mag = []
error = []
filts = []
for z in range(1, len(y)):
if objectmag[z] > sat[z] and objectmag[z] < lim[z]:
#date.extend([x for x in obs[z]])
date.extend(obs[z])
amplitude = [t + random.gauss(0,magUncertainy(zeroPoints[z],objectmag[z],30,background,FWHMeff[z])) for t in y[z]] # scale amplitude and add poisson noise
mag.extend([objectmag[z] - t for t in amplitude]) # add actual mag
error.extend([sigSys + magUncertainy(zeroPoints[z],objectmag[z],30,background,FWHMeff[z])+0.2]*len(amplitude))
filts.extend([filterletter[z]]*len(amplitude))
phase = [(day % (period*2))/(period*2) for day in obs[z]]
pmag = [objectmag[z] - t for t in amplitude]
# plt.plot(phase, pmag, 'o', markersize=4)
# plt.xlabel('Phase')
# plt.ylabel('Magnitude')
# plt.gca().invert_yaxis()
# plt.title('filter'+str(z)+', Period = '+str(period))#+', MeasuredPeriod = '+str(LSperiod)+', Periodx20 = '+(str(period*20)))
# plt.show()
# plt.plot(date, mag, 'o')
# plt.xlim(lower,higher)
# plt.xlabel('time (days)')
# plt.ylabel('mag')
# plt.gca().invert_yaxis()
# plt.show()
model = periodic.LombScargleMultibandFast(fit_period=False)
model.fit(date, mag, error, filts)
power = model.score_frequency_grid(fmin, df, numsteps)
if period > 10.:
model.optimizer.period_range=(10, 110)
elif period > 0.51:
model.optimizer.period_range=(0.5, 10)
elif period > 0.011:
model.optimizer.period_range=(0.01, 0.52)
else:
model.optimizer.period_range=(0.0029, 0.012)
LSperiod = model.best_period
if period < 10:
higher = 10
else:
higher = 100
# fig, ax = plt.subplots()
# ax.plot(1./freqs, power)
# ax.set(xlim=(0, higher), ylim=(0, 1.2),
# xlabel='period (days)',
# ylabel='Lomb-Scargle Power',
# title='Period = '+str(period)+', MeasuredPeriod = '+str(LSperiod)+', Periodx20 = '+(str(period*20)));
# plt.show()
phase = [(day % (period*2))/(period*2) for day in date]
#idealphase = [(day % (period*2))/(period*2) for day in dayZ]
#print(len(phase),len(idealphase))
#plt.plot(idealphase,Zmag,'ko',)
# plt.plot(phase, mag, 'o', markersize=4)
# plt.xlabel('Phase')
# plt.ylabel('Magnitude')
# plt.gca().invert_yaxis()
# plt.title('Period = '+str(period)+', MeasuredPeriod = '+str(LSperiod)+', Periodx20 = '+(str(period*20)))
# plt.show()
#print(period, LSperiod, period*20)
# print('actualperiod', period, 'measured period', np.mean(LSperiod),power.max())# 'power',np.mean(power[maxpos]))
# print(frequencyRange[frange], 'z', z)
# totperiod.append(period)
# totmperiod.append(np.mean(LSperiod))
# totpower.append(power.max())
mpower = power.max()
measuredpower.append(power.max()) # should this correspond to period power and not max power?
maxpower = []
counter = 0.
for loop in range(0,loopNo):
random.shuffle(date)
model = periodic.LombScargleMultibandFast(fit_period=False)
model.fit(date, mag, error, filts)
power = model.score_frequency_grid(fmin, df, numsteps)
maxpower.append(power.max())
for X in range(0, len(maxpower)):
if maxpower[X] > measuredpower[-1]:
counter = counter + 1.
Significance = (1.-(counter/len(maxpower)))
#print('sig', Significance, 'counter', counter)
SigLevel.append(Significance)
#freqnumber = FrangeLoop.index(frequencyRange)
#magnumber = MagRange.index(objectmag)
#print(fullmaglist)
#listnumber = (magnumber*maglength)+freqnumber
# print(listnumber)
# measuredperiodlist[listnumber] = LSperiod
# periodlist[listnumber] = period
# powerlist[listnumber] = mpower
# siglist[listnumber] = Significance
# fullmaglist[listnumber] = objectmag
# results order, 0=mag,1=period,2=measuredperiod,3=siglevel,4=power,5=listnumber
results[0] = objectmag[3]
results[1] = period
results[2] = LSperiod
results[3] = Significance
results[4] = mpower
results[5] = 0#listnumber
return results
# In[24]:
#findObservations([(630,)])
#remove25(obs)
#averageFlux(obs[0], 1, 30)
longflare = []
for floop in range(0,flarecycles):
flareone = flaring(-1, flareperiod, amplitude=0.3)
flareone = flareone[0:1440]
positiveflare = [abs(x) for x in flareone]
longflare.extend(positiveflare)
# In[25]:
PrangeLoop = np.logspace(-2.5,2,freqlength)
FrangeLoop = [(1/x) for x in PrangeLoop]
# In[26]:
# reset results file
with open(inFile,'w') as f:
f.write('fullmaglist \n\n periodlist \n\n measuredperiodlist \n\n siglist \n\n powerlist \n\n listnumberlist \n\n end of file')
# In[57]:
results = []
fullmeasuredPeriod = []
fullPeriod = []
fullPower = []
fullSigLevel = []
fullMag = []
MagRangearray = np.linspace(17,24,maglength)
MagRange = [x for x in MagRangearray]
maglist = []
for x in range(len(MagRange)):
maglist.append([MagRange[x]]*7)
newlist = Magnitudes.mag630
pool = Pool(processors)
for h in range(startnumber,endnumber):
print(newlist[h])
results.append(pool.map(partial(lombScargle, objectmag=newlist[h]),FrangeLoop))
twoDlist = [[],[],[],[],[],[]]
for X in range(len(results)):
for Y in range(len(results[X])):
twoDlist[0].append(results[X][Y][0])
twoDlist[1].append(results[X][Y][1])
twoDlist[2].append(results[X][Y][2])
twoDlist[3].append(results[X][Y][3])
twoDlist[4].append(results[X][Y][4])
twoDlist[5].append(results[X][Y][5])
with open(inFile, 'r') as istr:
with open(outFile,'w') as ostr:
for i, line in enumerate(istr):
# Get rid of the trailing newline (if any).
line = line.rstrip('\n')
if i % 2 != 0:
line += str(twoDlist[int((i-1)/2)])+','
ostr.write(line+'\n')
|
[
"[email protected]"
] | |
c47994dc7e144c848612afbf957dd5ef9965dc65
|
079c07c5d97eb60d36269e27309e84b25ea0aaeb
|
/guidehero-backend/tests/api/ask/test_set_askers.py
|
fa36cc6582edab111b92747b6fc3a4ca8ef0038f
|
[] |
no_license
|
itdream-dev/python
|
3aa44329673f05e2a86e1cba56cb88101c777233
|
eda81b802b99f45933bdf0d22b508837cfa538f0
|
refs/heads/master
| 2023-03-05T12:27:42.776870 | 2020-05-11T15:54:45 | 2020-05-11T15:54:45 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,742 |
py
|
import unittest
import json
from ..base import ApiBaseTestCase
from lib.models.card import Card
from lib.models.user import User
from lib.models.user_role_card import UserRoleCard
from lib.models.card_role import CardRole
@unittest.skip('set_askers endpoint is obsolete')
class SetAskersTestCase(ApiBaseTestCase):
def setUp(self):
super(SetAskersTestCase, self).setUp()
# input data
self.owner = User(username='owner')
self.db.session.add(self.owner)
self.deck = Card(type=Card.DECK, name='test_deck', creator=self.owner, is_ask_mode_enabled=True)
self.db.session.add(self.deck)
self.asker = User(username='asker')
self.db.session.add(self.asker)
self.db.session.commit()
self.asker_ids = None
def call_target(self):
asker_ids = self.asker_ids
if asker_ids is None:
asker_ids = [self.asker.id]
data = {
'deck_id': self.deck.id,
'user_ids': asker_ids,
}
response = self.client.post('/api/v1/deck/set_askers', data=json.dumps(data), content_type='application/json')
self.assertEqual(response.status, '200 OK')
return response
def assert_askers(self, asker_ids):
urc_s = list(UserRoleCard.query.filter(UserRoleCard.card_id == self.deck.id, UserRoleCard.role_id == CardRole.JOINED).all())
self.assertEqual(len(urc_s), len(asker_ids))
urc_s = sorted(urc_s, key=lambda it: it.user_id)
asker_ids = sorted(asker_ids)
for idx in range(0, len(asker_ids)):
self.assertEqual(urc_s[idx].user_id, asker_ids[idx])
def test_success(self):
self.call_target()
self.assert_askers([self.asker.id])
|
[
"[email protected]"
] | |
4c33afd7ec68d476be15ac1a4b104c83b7b17113
|
e9abd9878f1bef884e32d754b0dfe0c263b6c569
|
/tracing/tracing/trace_data/trace_data.py
|
d42fa62b16296c1ceaf0dc8ca7c303cf00a8ad8e
|
[
"BSD-3-Clause"
] |
permissive
|
SantosTailfeather/catapult
|
ab6ebf71e21a59d3e52f5005a11fdf00d80978bf
|
d93fde1cd5a5c8824b034ac8e8150a62ebc5bcb9
|
refs/heads/master
| 2020-12-21T02:38:00.147479 | 2020-01-25T00:08:10 | 2020-01-25T00:16:15 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 10,829 |
py
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import gzip
import json
import logging
import os
import platform
import shutil
import subprocess
import tempfile
import time
import six
try:
StringTypes = six.string_types # pylint: disable=invalid-name
except NameError:
StringTypes = str
_TRACING_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)),
os.path.pardir, os.path.pardir)
_TRACE2HTML_PATH = os.path.join(_TRACING_DIR, 'bin', 'trace2html')
MIB = 1024 * 1024
class TraceDataPart(object):
"""Trace data can come from a variety of tracing agents.
Data from each agent is collected into a trace "part" and accessed by the
following fixed field names.
"""
def __init__(self, raw_field_name):
self._raw_field_name = raw_field_name
def __repr__(self):
return 'TraceDataPart("%s")' % self._raw_field_name
@property
def raw_field_name(self):
return self._raw_field_name
def __eq__(self, other):
return self.raw_field_name == other.raw_field_name
def __hash__(self):
return hash(self.raw_field_name)
ANDROID_PROCESS_DATA_PART = TraceDataPart('androidProcessDump')
ATRACE_PART = TraceDataPart('systemTraceEvents')
ATRACE_PROCESS_DUMP_PART = TraceDataPart('atraceProcessDump')
CHROME_TRACE_PART = TraceDataPart('traceEvents')
CPU_TRACE_DATA = TraceDataPart('cpuSnapshots')
TELEMETRY_PART = TraceDataPart('telemetry')
WALT_TRACE_PART = TraceDataPart('waltTraceEvents')
ALL_TRACE_PARTS = {ANDROID_PROCESS_DATA_PART,
ATRACE_PART,
ATRACE_PROCESS_DUMP_PART,
CHROME_TRACE_PART,
CPU_TRACE_DATA,
TELEMETRY_PART}
class _TraceData(object):
"""Provides read access to traces collected from multiple tracing agents.
Instances are created by calling the AsData() method on a TraceDataWriter.
"""
def __init__(self, raw_data):
self._raw_data = raw_data
def HasTracesFor(self, part):
return bool(self.GetTracesFor(part))
def GetTracesFor(self, part):
"""Return the list of traces for |part| in string or dictionary forms."""
if not isinstance(part, TraceDataPart):
raise TypeError('part must be a TraceDataPart instance')
return self._raw_data.get(part.raw_field_name, [])
def GetTraceFor(self, part):
traces = self.GetTracesFor(part)
assert len(traces) == 1
return traces[0]
_TraceItem = collections.namedtuple(
'_TraceItem', ['part_name', 'handle'])
class TraceDataBuilder(object):
"""TraceDataBuilder helps build up a trace from multiple trace agents.
Note: the collected trace data is maintained in a set of temporary files to
be later processed e.g. by the Serialize() method. To ensure proper clean up
of such files clients must call the CleanUpTraceData() method or, even easier,
use the context manager API, e.g.:
with trace_data.TraceDataBuilder() as builder:
builder.AddTraceFor(trace_part, data)
builder.Serialize(output_file)
"""
def __init__(self):
self._traces = []
self._frozen = False
self._temp_dir = tempfile.mkdtemp()
def __enter__(self):
return self
def __exit__(self, *args):
self.CleanUpTraceData()
def OpenTraceHandleFor(self, part, suffix):
"""Open a file handle for writing trace data into it.
Args:
part: A TraceDataPart instance.
suffix: A string used as file extension and identifier for the format
of the trace contents, e.g. '.json'. Can also append '.gz' to
indicate gzipped content, e.g. '.json.gz'.
"""
if not isinstance(part, TraceDataPart):
raise TypeError('part must be a TraceDataPart instance')
if self._frozen:
raise RuntimeError('trace data builder is no longer open for writing')
trace = _TraceItem(
part_name=part.raw_field_name,
handle=tempfile.NamedTemporaryFile(
delete=False, dir=self._temp_dir, suffix=suffix))
self._traces.append(trace)
return trace.handle
def AddTraceFileFor(self, part, trace_file):
"""Move a file with trace data into this builder.
This is useful for situations where a client might want to start collecting
trace data into a file, even before the TraceDataBuilder itself is created.
Args:
part: A TraceDataPart instance.
trace_file: A path to a file containing trace data. Note: for efficiency
the file is moved rather than copied into the builder. Therefore the
source file will no longer exist after calling this method; and the
lifetime of the trace data will thereafter be managed by this builder.
"""
_, suffix = os.path.splitext(trace_file)
with self.OpenTraceHandleFor(part, suffix) as handle:
pass
if os.name == 'nt':
# On windows os.rename won't overwrite, so the destination path needs to
# be removed first.
os.remove(handle.name)
os.rename(trace_file, handle.name)
def AddTraceFor(self, part, data, allow_unstructured=False):
"""Record new trace data into this builder.
Args:
part: A TraceDataPart instance.
data: The trace data to write: a json-serializable dict, or unstructured
text data as a string.
allow_unstructured: This must be set to True to allow passing
unstructured text data as input. Note: the use of this flag is
discouraged and only exists to support legacy clients; new tracing
agents should all produce structured trace data (e.g. proto or json).
"""
if isinstance(data, StringTypes):
if not allow_unstructured:
raise ValueError('must pass allow_unstructured=True for text data')
do_write = lambda d, f: f.write(d)
suffix = '.txt' # Used for atrace and systrace data.
elif isinstance(data, dict):
do_write = json.dump
suffix = '.json'
else:
raise TypeError('invalid trace data type')
with self.OpenTraceHandleFor(part, suffix) as handle:
do_write(data, handle)
def Freeze(self):
"""Do not allow writing any more data into this builder."""
self._frozen = True
return self
def CleanUpTraceData(self):
"""Clean up resources used by the data builder."""
if self._traces is None:
return # Already cleaned up.
self.Freeze()
for trace in self._traces:
# Make sure all trace handles are closed. It's fine if we close some
# of them multiple times.
trace.handle.close()
shutil.rmtree(self._temp_dir)
self._temp_dir = None
self._traces = None
def Serialize(self, file_path, trace_title=None):
"""Serialize the trace data to a file in HTML format."""
self.Freeze()
assert self._traces, 'trace data has already been cleaned up'
trace_files = [trace.handle.name for trace in self._traces]
SerializeAsHtml(trace_files, file_path, trace_title)
def AsData(self):
"""Allow in-memory access to read the collected JSON trace data.
This method is only provided for writing tests which require read access
to the collected trace data (e.g. for tracing agents to test they correctly
write data), and to support legacy TBMv1 metric computation. Only traces
in JSON format are supported.
Be careful: this may require a lot of memory if the traces to process are
very large. This has lead in the past to OOM errors (e.g. crbug/672097).
TODO(crbug/928278): Ideally, this method should be removed when it can be
entirely replaced by calls to an external trace processor.
"""
self.Freeze()
assert self._traces, 'trace data has already been cleaned up'
raw_data = {}
for trace in self._traces:
is_compressed_json = trace.handle.name.endswith('.json.gz')
is_json = trace.handle.name.endswith('.json') or is_compressed_json
if is_json:
traces_for_part = raw_data.setdefault(trace.part_name, [])
opener = gzip.open if is_compressed_json else open
with opener(trace.handle.name, 'rb') as f:
traces_for_part.append(json.load(f))
else:
logging.info('Skipping over non-json trace: %s', trace.handle.name)
return _TraceData(raw_data)
def IterTraceParts(self):
"""Iterates over trace parts.
Return value: iterator over pairs (part_name, file_path).
"""
for trace in self._traces:
yield trace.part_name, trace.handle.name
def CreateTestTrace(number=1):
"""Convenient helper method to create trace data objects for testing.
Objects are created via the usual trace data writing route, so clients are
also responsible for cleaning up trace data themselves.
Clients are meant to treat these test traces as opaque. No guarantees are
made about their contents, which they shouldn't try to read.
"""
builder = TraceDataBuilder()
builder.AddTraceFor(CHROME_TRACE_PART, {'traceEvents': [{'test': number}]})
return builder.Freeze()
def CreateFromRawChromeEvents(events):
"""Convenient helper to create trace data objects from raw Chrome events.
This bypasses trace data writing, going directly to the in-memory json trace
representation, so there is no need for trace file cleanup.
This is used only for testing legacy clients that still read trace data.
"""
assert isinstance(events, list)
return _TraceData({
CHROME_TRACE_PART.raw_field_name: [{'traceEvents': events}]})
def SerializeAsHtml(trace_files, html_file, trace_title=None):
"""Serialize a set of traces to a single file in HTML format.
Args:
trace_files: a list of file names, each containing a trace from
one of the tracing agents.
html_file: a name of the output file.
trace_title: optional. A title for the resulting trace.
"""
if not trace_files:
raise ValueError('trace files list is empty')
input_size = sum(os.path.getsize(trace_file) for trace_file in trace_files)
cmd = []
if platform.system() == 'Windows':
version_cmd = ['python', '-c',
'import sys\nprint(sys.version_info.major)']
version = subprocess.check_output(version_cmd)
if version.strip() == '3':
raise RuntimeError('trace2htmal cannot run with python 3.')
cmd.append('python')
cmd.append(_TRACE2HTML_PATH)
cmd.extend(trace_files)
cmd.extend(['--output', html_file])
if trace_title is not None:
cmd.extend(['--title', trace_title])
start_time = time.time()
subprocess.check_output(cmd)
elapsed_time = time.time() - start_time
logging.info('trace2html processed %.01f MiB of trace data in %.02f seconds.',
1.0 * input_size / MIB, elapsed_time)
|
[
"[email protected]"
] | |
916623c6560a309b4929336812aad08d5b56189e
|
d253cb1e5b52a67a2e20030e6f0f9ddcdbcbd3aa
|
/config/testing.py
|
f3ddff8c058d926e91e16aadf7a6bd2603e50f2f
|
[] |
no_license
|
TheFifthMan/flask-basic-template
|
84e3c8b0acdbd2ede121f267e92ca0ee298e1bce
|
313ce8c6827b1d33cfb0039c57d6d1aa4be8555b
|
refs/heads/master
| 2020-04-07T19:03:19.565438 | 2019-01-29T05:44:03 | 2019-01-29T05:44:03 | 158,634,487 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 211 |
py
|
from .general import Config
import os
class TestingConfig(Config):
Testing = True
SQLALCHEMY_DATABASE_URI = os.getenv("TEST_SQLALCHEMY_DATABASE_URI") or "mysql+pymysql://root:[email protected]/flasky"
|
[
"[email protected]"
] | |
3aab521adda4453f55ff28cfe6d24b1ec4c84c96
|
757c2daa5e2ea70005783d1e5ac78aec47712e9c
|
/python/elb/61-create-elb.py
|
bf88664c92a82190829253256ebdc602123d79a9
|
[] |
no_license
|
obulpathi/aws
|
ab9e6f6a62e8d76f196b063da87c8d8c0d09d25e
|
35c2181377c4c536b7e1d6fb9386705a90f85763
|
refs/heads/master
| 2021-01-17T04:43:10.358333 | 2017-04-10T21:02:23 | 2017-04-10T21:02:23 | 21,324,885 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 705 |
py
|
import boto.ec2.elb
from boto.ec2.elb import HealthCheck
elb_conn = boto.ec2.elb.connect_to_region('us-east-1')
# ELB requires a few pieces to be setup
hc = HealthCheck(
interval=20,
healthy_threshold=3,
unhealthy_threshold=5,
target='TCP:22'
# target='HTTP:8080/health'
)
zones = ['us-east-1a', 'us-east-1b', 'us-east-1c', 'us-east-1d']
ports = [(80, 80, 'http')]
#ports = [(80, 8080, 'http'), (443, 8443, 'tcp')]
# Now create a new load balancer
lb = elb_conn.create_load_balancer('pywebdev-lb', zones, ports)
print 'New ELB: ', lb
print 'New ELB public DNS: ', lb.dns_name
# Add the health check configuration to the ELB.
lb.configure_health_check(hc)
|
[
"[email protected]"
] | |
988ece74cdaa2dcd3a36b809507d1bc66cfb90f4
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03072/s343667969.py
|
87c47d33b4536d07dce370dc61baa549b2bf3c3d
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 172 |
py
|
n = int(input())
h = list(map(int, input().split()))
res = 1
h_max = h[0]
for i in range(1, len(h)):
if h[i] >= h_max:
res += 1
h_max = h[i]
print(res)
|
[
"[email protected]"
] | |
058eda00c75edcb0e82137576ef2baebcbe8ad6c
|
22279487bee5c983c13887ba11e6a4cd40e8bbe3
|
/PreprocessData/all_class_files/CheckInAction.py
|
af33e7115f5e5b86b3228ccc926f998d5bf75519
|
[
"MIT"
] |
permissive
|
DylanNEU/Schema
|
018c9f683c683068422ed7b6392dcebd4ab4d4cd
|
4854720a15894dd814691a55e03329ecbbb6f558
|
refs/heads/main
| 2023-08-30T01:50:20.541634 | 2021-11-01T15:30:41 | 2021-11-01T15:30:41 | 425,238,713 | 1 | 0 |
MIT
| 2021-11-06T12:29:12 | 2021-11-06T12:29:11 | null |
UTF-8
|
Python
| false | false | 899 |
py
|
from PreprocessData.all_class_files.CommunicateAction import CommunicateAction
import global_data
class CheckInAction(CommunicateAction):
def __init__(self, additionalType=None, alternateName=None, description=None, disambiguatingDescription=None, identifier=None, image=None, mainEntityOfPage=None, name=None, potentialAction=None, sameAs=None, url=None, actionStatus=None, agent=None, endTime=None, error=None, instrument=None, location=None, object=None, participant=None, result=None, startTime=None, target=None, about=None, inLanguage=None, recipient=None):
CommunicateAction.__init__(self, additionalType, alternateName, description, disambiguatingDescription, identifier, image, mainEntityOfPage, name, potentialAction, sameAs, url, actionStatus, agent, endTime, error, instrument, location, object, participant, result, startTime, target, about, inLanguage, recipient)
|
[
"[email protected]"
] | |
71e887082cff90085822f44fbae6b963c2301a2d
|
6ffa236a008d1cd1dc70f2c8ea0544d20ec350ee
|
/aries_cloudagent/messaging/issue_credential/v1_0/messages/tests/test_credential_issue.py
|
7e02b911b654489666e5eb5dbc45298e8e2e30ec
|
[
"Apache-2.0"
] |
permissive
|
blockpass-identity-lab/aries-fl-demo
|
99e849f782dd80e729e3fe24c3af2881c5c49dca
|
310b748c1ac2e814ec6f97c46ddbb9985584e0fc
|
refs/heads/master
| 2022-07-06T18:37:16.007582 | 2020-04-23T15:48:33 | 2020-04-23T15:48:33 | 221,698,330 | 5 | 0 |
Apache-2.0
| 2021-02-26T02:40:03 | 2019-11-14T12:58:58 |
Python
|
UTF-8
|
Python
| false | false | 9,380 |
py
|
from .....decorators.attach_decorator import AttachDecorator
from ...message_types import CREDENTIAL_ISSUE
from ..credential_issue import CredentialIssue
from unittest import mock, TestCase
class TestCredentialIssue(TestCase):
"""Credential issue tests"""
indy_cred = {
"schema_id": "LjgpST2rjsoxYegQDRm7EL:2:bc-reg:1.0",
"cred_def_id": "LjgpST2rjsoxYegQDRm7EL:3:CL:18:tag",
"rev_reg_id": "LjgpST2rjsoxYegQDRm7EL:4:LjgpST2rjsoxYegQDRm7EL:3:CL:18:tag:CL_ACCUM:1",
"values": {
"busId": {
"raw": "11155555",
"encoded": "11155555"
},
"legalName": {
"raw": "Babka Galaxy",
"encoded": "107723975795096474174315415205901102419879622561395089750910511985549475735747"
},
"id": {
"raw": "5",
"encoded": "5"
},
"orgTypeId": {
"raw": "1",
"encoded": "1"
},
"effectiveDate": {
"raw": "2012-12-01",
"encoded": "58785836675119218543950531421539993546216494060018521243314445986885543138388"
},
"jurisdictionId": {
"raw": "1",
"encoded": "1"
},
"endDate": {
"raw": "",
"encoded": "102987336249554097029535212322581322789799900648198034993379397001115665086549"
}
},
"signature": {
"p_credential": {
"m_2": "60025883287089799626689274984362649922028954710702989273350424792094051625907",
"a": "33574785085847496372223801384241174668280696192852342004649681358898319989377891201713237406189930904621943660579244780378356431325594072391319837474469436200535615918847408676250915598611100068705846552950672619639766733118699744590194148554187848404028169947572858712592004307286251531728499790515868404251079046925435202101170698552776314885035743276729493940581544827310348632105741785505818500141788882165796461479904049413245974826370118124656594309043126033311790481868941737635314924873471152593101941520014919522243774177999183508913726745154494726830096189641688720673911842149721875115446765101254783088102",
"e": "259344723055062059907025491480697571938277889515152306249728583105665800713306759149981690559193987143012367913206299323899696942213235956742929940839890541204554505134958365542601",
"v": "8609087712648327689510560843448768242969198387856549646434987127729892694214386082710530362693226591495343780017066542203667948482019255226968628218013767981247576292730389932608795727994162072985790185993138122475561426334951896920290599111436791225402577204027790420706987810169826735050717355066696030347321187354133263894735515127702270039945304850524250402144664403971571904353156572222923701680935669167750650688016372444804704998087365054978152701248950729399377780813365024757989269208934482967970445445223084620917624825052959697120057360426040239100930790635416973591134497181715131476498510569905885753432826750000829362210364061766697316138646771666357343198925355584209303847699218225254051213598531538421032318684976506329062116913654998320196203740062523483508588929287294193683755114531891923195772740958"
},
"r_credential": {
"sigma": "1 00F38C50E192DAF9133130888DA4A3291754B1A7D09A7DCCDD408D4E13F57267 1 0C6C9D8510580A8C9D8F0E21F51FF76E8F1419C2C909BBB9761AD9E75E46517F 2 095E45DDF417D05FB10933FFC63D474548B7FFFF7888802F07FFFFFF7D07A8A8",
"c": "12F8B7BD08471C27F6AF8EE06374D200FCEA61718FACA61FD8B90EEED7A11AD6",
"vr_prime_prime": "103015BFD51C02121DF61993973F312D5972EFF3B3B1B80BC614D5A747510366",
"witness_signature": {
"sigma_i": "1 165767F82FF8FD92237985441D2C758706A5EC1D21FBEF8611C6AC4E3CAD10DA 1 1FC786E5CD2D8B30F1C567579B4EC143C5951B7464F78B86A03419CB335EA81B 1 0B1A1356056BEDF9C61AE2D66FF0405E3B1D934DAC97099BDF6AC3ECCBFAF745 1 106B15BC294810EEDF8AD363A85CC8ECC8AA061538BB31BAE5252377D77E7FA3 2 095E45DDF417D05FB10933FFC63D474548B7FFFF7888802F07FFFFFF7D07A8A8 1 0000000000000000000000000000000000000000000000000000000000000000",
"u_i": "1 017A61B7C8B5B80EB245BE6788A28F926D8CBB9829E657D437640EF09ACD0C80 1 1AF4229C05C728AEAEEE6FC411B357B857E773BA79FF677373A6BE8F60C02C3A 1 10CB82C4913E2324C06164BF22A2BD38CEE528C797C55061C2D2486C3F6BF747 1 116CE544B1CB99556BFC0621C57C3D9F2B78D034946322EEA218DFDBDD940EA3 2 095E45DDF417D05FB10933FFC63D474548B7FFFF7888802F07FFFFFF7D07A8A8 1 0000000000000000000000000000000000000000000000000000000000000000",
"g_i": "1 0042BF46E9BAE9696F394FE7C26AFDE3C8963A2A0658D4C32737405F1576EB46 1 0194E97A9D92D46AAD61DAE06926D3361F531EB10D03C7520F3BD69D3E49311C 2 095E45DDF417D05FB10933FFC63D474548B7FFFF7888802F07FFFFFF7D07A8A8"
},
"g_i": "1 0042BF46E9BAE9696F394FE7C26AFDE3C8963A2A0658D4C32737405F1576EB46 1 0194E97A9D92D46AAD61DAE06926D3361F531EB10D03C7520F3BD69D3E49311C 2 095E45DDF417D05FB10933FFC63D474548B7FFFF7888802F07FFFFFF7D07A8A8",
"i": 1,
"m2": "84B5722AE3A1CF27CB1EA56CD33D289CB87A4401C6B103D0D7B7EA869DAF6BB3"
}
},
"signature_correctness_proof": {
"se": "19792617148120152105226254239016588540058878757479987545108556827210662529343348161518678852958020771878595740749192412985440625444455760950622452787061547854765389520937092533324699495837410270589105368479415954380927050080439536019149709356488657394895381670676082762285043378943096265107585990717517541825549361747506315768406364562926877132553754434293723146759285511815164904802662712140021121638529229138315163496513377824821704164701067409581646133944445999621553849950380606679724798867481070896073389886302519310697801643262282687875393404841657943289557895895565050618203027512724917946512514235898009424924",
"c": "20346348618412341786428948997994890734628812067145521907471418530511751955386"
},
"rev_reg": {
"accum": "21 12E821764448DE2B5754DEC16864096CFAE4BB68D4DC0CE3E5C4849FC7CBCCC0C 21 11677132B2DFB0C291D0616811BF2AC0CD464A35FF6927B821A5EACF24D94F3A5 6 5471991A0950DBD431A4DD86A8AD101E033AB5EBC29A97CAFE0E4F2C426F5821 4 1B34A4C75174974A698061A09AFFED62B78AC2AAF876BF7788BAF3FC9A8B47DF 6 7D7C5E96AE17DDB21EC98378E3185707A69CF86426F5526C9A55D1FAA2F6FA83 4 277100094333E24170CD3B020B0C91A7E9510F69218AD96AC966565AEF66BC71"
},
"witness": {
"omega": "21 136960A5E73C494F007BFE156889137E8B6DF301D5FF673C410CEE0F14AFAF1AE 21 132D4BA49C6BD8AB3CF52929D115976ABB1785D288F311CBB4455A85D07E2568C 6 70E7C40BA4F607262697556BB17FA6C85E9C188FA990264F4F031C39B5811239 4 351B98620B239DF14F3AB0B754C70597035A3B099D287A9855D11C55BA9F0C16 6 8AA1C473D792DF4F8287D0A93749046385CE411AAA1D685AA3C874C15B8628DB 4 0D6491BF5F127C1A0048CF137AEE17B62F4E49F3BDD9ECEBD14D56C43D211544"
}
}
cred_issue = CredentialIssue(
comment="Test",
credentials_attach=[AttachDecorator.from_indy_dict(indy_cred)]
)
def test_init(self):
"""Test initializer"""
credential_issue = CredentialIssue(
comment="Test",
credentials_attach=[AttachDecorator.from_indy_dict(self.indy_cred)]
)
assert credential_issue.credentials_attach[0].indy_dict == self.indy_cred
assert credential_issue.indy_credential(0) == self.indy_cred
def test_type(self):
"""Test type"""
credential_issue = CredentialIssue(
comment="Test",
credentials_attach=[AttachDecorator.from_indy_dict(self.indy_cred)]
)
assert credential_issue._type == CREDENTIAL_ISSUE
@mock.patch(
"aries_cloudagent.messaging.issue_credential.v1_0.messages."
"credential_issue.CredentialIssueSchema.load"
)
def test_deserialize(self, mock_credential_issue_schema_load):
"""
Test deserialize
"""
obj = self.cred_issue
credential_issue = CredentialIssue.deserialize(obj)
mock_credential_issue_schema_load.assert_called_once_with(obj)
assert credential_issue is mock_credential_issue_schema_load.return_value
@mock.patch(
"aries_cloudagent.messaging.issue_credential.v1_0.messages."
"credential_issue.CredentialIssueSchema.dump"
)
def test_serialize(self, mock_credential_issue_schema_dump):
"""
Test serialization.
"""
obj = self.cred_issue
credential_issue_dict = obj.serialize()
mock_credential_issue_schema_dump.assert_called_once_with(obj)
assert credential_issue_dict is mock_credential_issue_schema_dump.return_value
class TestCredentialIssueSchema(TestCase):
"""Test credential cred issue schema"""
credential_issue = CredentialIssue(
comment="Test",
# credentials_attach=[AttachDecorator.from_indy_dict(TestCredentialIssue.indy_cred)]
credentials_attach=[AttachDecorator.from_indy_dict({'hello': 'world'})]
)
def test_make_model(self):
"""Test making model."""
data = self.credential_issue.serialize()
model_instance = CredentialIssue.deserialize(data)
assert isinstance(model_instance, CredentialIssue)
|
[
"[email protected]"
] | |
c2e31a747260c7ac46d579c9c49836e9b9311df8
|
63ce52a8dcbbb4f64b1f3265f54904a928128af6
|
/ben_projects/RoboQuasar1.0/camera/analyzers.py
|
d6eea271bf5e1ef59b86465592640a1e97d35357
|
[] |
no_license
|
Woz4tetra/Self-Driving-Buggy
|
0ab629242e07ad8aa706573bbab0809f895d5533
|
ceba78445e1b0dcc6922cd67e8be23d78eb1667a
|
refs/heads/master
| 2021-01-10T18:51:35.253412 | 2016-01-15T02:56:50 | 2016-01-15T02:56:50 | 40,282,642 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,281 |
py
|
# applies houghlines or cascade classfier filters on an input camera frame
import numpy as np
import cv2
class LineFollower:
def __init__(self, expected, y_bottom, width, height):
self.centerRho, self.centerTheta = expected
self.width, self.height = width, height
self.yBottom = y_bottom
def isEqual(self, currentTheta, existedTheta, tolerance):
'''
if rho and theta are close enough,
set these lines as equivalent
To minimize # of lines on screen
'''
if abs(currentTheta - existedTheta) <= tolerance:
return True
return False
def merge(self, line_set):
occurance = len(line_set)
# if occurs more than once,
# merge and return a single median (rho, theta)
if occurance > 1:
medianTheta = np.median(line_set[0][0])
medianRho = np.median(line_set[0][1])
line_set = [occurance, medianRho, medianTheta]
else:
line_set = [occurance, line_set[0][0], line_set[0][1]]
return line_set
def findAverageLines(self, lines):
'''
findAvgLines is not supposed to draw;
use update to blur and find lines,
then use findAvglines func to return avgline
'''
rightRho, rightTheta, leftRho, leftTheta = [], [], [], []
# Divide lines into left and right groups, accoridng to sign of gradient
for currentLine in lines:
# notes on indexing: currentline has format[[x1, y1]]
(rho, theta) = (currentLine[0], currentLine[1])
if theta > 0:
# lines with negative gradient; (y increases downwards in frame)
leftTheta.append(theta)
leftRho.append(rho)
elif theta <= 0:
rightTheta.append(theta)
rightRho.append(rho)
if len(leftRho) != 0:
avgLeftRho = np.median([leftRho])
avgLeftTheta = np.median([leftTheta])
else:
(avgLeftRho, avgLeftTheta) = (0, 0)
if len(rightRho) != 0:
avgRightRho = np.median([rightRho])
avgRightTheta = np.median([rightTheta])
else: (avgRightRho, avgRightTheta) = (0, 0)
self.avgCenterRho = (avgLeftRho + avgRightRho) / 2.0
self.avgCenterTheta = (avgLeftTheta + avgRightTheta) / 2.0
return [(avgLeftRho, avgLeftTheta), (avgRightRho, avgRightTheta)]
def findLineCoord(self, rho, theta):
# turn avgLines into avgLinesCoord =[(x1, y1), (x2, y2)]
a = np.cos(theta)
b = np.sin(theta)
x0 = a * rho
y0 = b * rho
x1 = int(x0 + 1000 * -b)
y1 = int(y0 + 1000 * a)
x2 = int(x0 - 1000 * -b)
y2 = int(y0 - 1000 * a)
return (x1, y1, x2, y2)
def findDistLine(self,rho,theta):
(x1,y1,x2,y2) = self.findLineCoord(rho,theta)
Xavg = (x1+x2)/2
Yavg = (y1+y2)/2
return (self.width-Xavg,self.height-Yavg)
def key(self, item):
return item[0]
def difference(self, expected, actual, y_bottom):
return 0, 0 # distance difference, theta difference
''' need to filter out unneeded lines before taking avg'''
def update(self, frame, draw_avg=True, draw_all=True,
maxNumLines = 10, tolerance = 0.04):
frame = frame[90:360,::]
frame_lines = cv2.medianBlur(frame, 5)
frame_lines = cv2.Canny(frame_lines, 1, 100)
frame_lines = cv2.medianBlur(frame_lines, 3)
#smooth the edge detection image for easier use(above)
lines = cv2.HoughLines(frame_lines, rho=1, theta=np.pi / 180,
threshold=100,
min_theta=-60 * np.pi / 180,
max_theta= 60 * np.pi / 180)
linesDrawn = []
# updating lines, after merge in similar ones
# condense lines together (remove "duplicates")
if lines != None:
lines = lines[:,0] # removing one layer of brackets
'''
tests on merging and sorting starts here,
1) lines are sorted accoriding to their rho value (len)
(could also sort according to theta)
2) while loops compare neighboring ones to partition them,
3) merge func also append multiplicity/ occurance of that partition
4) all lines are sorted based on # of occurance
'''
lines.sort(axis = 0) #sort on rho
print("LINES HERE----", lines)
i = -1 #used in loop
while i < (len(lines)/2 - 1):
# len(lines) doublecounts (rho, theta)
i += 1
temp = []
temp.append(np.array(lines[i]))
while self.isEqual(lines[i][1], lines[i+1][1], tolerance):
# ugly syntax, but it's comparing neighboring theta vals
temp.append(lines[i+1])
i += 1
temp = self.merge(temp)
linesDrawn.append(temp)
linesDrawn = np.array(linesDrawn)
#print (len(linesDrawn), "number of lines after merge") #for information purposes
#Sort the lines by distance from center by the average lines
temp = []
if lines != None:
#makes a list with dist from center included
for i in range(len(linesDrawn)):
(rho, theta) = (linesDrawn[i][1], linesDrawn[i][2])
dist = self.findDist(rho,theta)
temp.append(dist,rho,theta)
#now want to sort this list based on dist
sorted_list = sorted(temp, key=self.key)
sorted_list = np.array(sorted_list)
#now draw the lines
if draw_all == True:
idx = 0
while idx < (maxNumLines/2 -1) and idx < len(sorted_list):
(dist, rho, theta) = (linesDrawn[idx][1], linesDrawn[idx][2])
a = np.cos(theta)
b = np.sin(theta)
x0 = a * rho
y0 = b * rho
x1 = int(x0 + 1000 * -b)
y1 = int(y0 + 1000 * a)
x2 = int(x0 - 1000 * -b)
y2 = int(y0 - 1000 * a)
cv2.line(frame, (x1, y1), (x2, y2), (0, 0, 255), 2)
idx +=1
if lines is not None:
averaged_line = self.findAverageLines(lines[:maxNumLines])
(rho1, theta1) = (averaged_line)[0]
(rho2, theta2) = (averaged_line)[1]
(x1, y1, x2, y2) = self.findLineCoord(rho1, theta1)
(x3, y3, x4, y4) = self.findLineCoord(rho2, theta2)
# get coordinates of lines before drawing
if draw_avg:
cv2.line(frame, (x1, y1), (x2, y2), (0, 255, 0), 2)
cv2.line(frame, (x3, y3), (x4, y4), (0, 255, 0), 2)
else:
averaged_line = None, None
return frame, self.difference((self.centerRho, self.centerTheta
), averaged_line, self.yBottom)
|
[
"[email protected]"
] | |
5997d151d9f1e7de50553485f111894ab52bdc98
|
4801b8c21b94335cf93bd961532f437fbb8124a2
|
/backend/bot/modules/tosurnament/bracket/bracket.py
|
3df10de8152d48cca26b308d432bc84949183735
|
[
"MIT"
] |
permissive
|
SugiuraAyano/Tosurnament
|
1e7160e037dca47e0c627b069ee6ab1c0a96c02a
|
926827398aef97581eede8685ee5303d798374eb
|
refs/heads/master
| 2023-04-25T22:44:31.356993 | 2021-06-04T16:42:25 | 2021-06-04T16:42:25 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 14,388 |
py
|
"""Contains all bracket settings commands related to Tosurnament."""
import asyncio
import datetime
import discord
from discord.ext import commands
from bot.modules.tosurnament import module as tosurnament
from common.api import challonge
from common.databases.bracket import Bracket
from common.api import osu
class TosurnamentBracketCog(tosurnament.TosurnamentBaseModule, name="bracket"):
"""Tosurnament bracket settings commands."""
def __init__(self, bot):
super().__init__(bot)
self.bot = bot
def cog_check(self, ctx):
"""Check function called before any command of the cog."""
return self.admin_cog_check(ctx)
@commands.command(aliases=["sbn"])
async def set_bracket_name(self, ctx, *, name: str):
"""Modifies the current bracket's name."""
await self.set_bracket_values(ctx, {"name": name})
@commands.command(aliases=["sbr"])
async def set_bracket_role(self, ctx, *, role: discord.Role):
"""Modifies the current bracket's role."""
await self.set_bracket_values(ctx, {"role_id": role.id})
@commands.command(aliases=["scbr"])
async def set_current_bracket_round(self, ctx, *, current_round: str = ""):
"""Sets the round of the current bracket."""
await self.set_bracket_values(ctx, {"current_round": current_round})
@commands.command(aliases=["sprc"])
async def set_post_result_channel(self, ctx, *, channel: discord.TextChannel):
"""Sets the post result's channel."""
await self.set_bracket_values(ctx, {"post_result_channel_id": channel.id})
@commands.command(aliases=["sc"])
async def set_challonge(self, ctx, challonge_tournament: str):
"""Sets the challonge."""
challonge_tournament = challonge.extract_tournament_id(challonge_tournament)
await self.set_bracket_values(ctx, {"challonge": challonge_tournament})
@commands.command(aliases=["sre"])
async def set_registration_end(self, ctx, *, date: str):
"""Sets the registration end date."""
tournament = self.get_tournament(ctx.guild.id)
try:
new_date = tournament.parse_date(date, prefer_dates_from="future")
except ValueError:
raise commands.UserInputError()
if not new_date:
raise commands.UserInputError()
new_date_string = new_date.strftime(tosurnament.DATABASE_DATE_FORMAT)
await self.set_bracket_values(ctx, {"registration_end_date": new_date_string})
def is_player_in_challonge(self, member, teams_info, participants):
participants_casefold = [participant.casefold() for participant in participants]
user = tosurnament.UserAbstraction.get_from_user(self.bot, member)
if teams_info:
for team_info in teams_info:
if player := self.get_player_in_team(member, team_info):
if player.name.casefold() in participants_casefold:
return team_info, player.name.get()
else:
return None, None
elif user.verified and user.name.casefold() in participants_casefold:
return None, user.name
return None, None
def get_all_brackets_string(self, brackets):
brackets_string = ""
for i, bracket in enumerate(brackets):
brackets_string += str(i + 1) + ": `" + bracket.name + "`\n"
return brackets_string
@commands.command(aliases=["cpr"])
async def clear_player_role(self, ctx, bracket_index: int = None, remove_player_role: bool = True):
"""Removes the player role of users not present in the challonge."""
tournament = self.get_tournament(ctx.guild.id)
brackets = tournament.brackets
bracket = self.get_bracket_from_index(brackets, bracket_index)
if not bracket:
await self.send_reply(ctx, "default", self.get_all_brackets_string(brackets))
return
if not bracket.challonge:
raise tosurnament.NoChallonge(bracket.name)
player_role = tosurnament.get_role(ctx.guild.roles, tournament.player_role_id, "Player")
bracket_role = tosurnament.get_role(ctx.guild.roles, bracket.role_id, bracket.name)
team_captain_role = tosurnament.get_role(ctx.guild.roles, tournament.team_captain_role_id, "Team Captain")
if remove_player_role:
roles_to_remove = list(filter(None, [player_role, bracket_role, team_captain_role]))
else:
roles_to_remove = list(filter(None, [bracket_role, team_captain_role]))
teams_info, teams_roles = await self.get_all_teams_infos_and_roles(
ctx.guild, await bracket.get_players_spreadsheet()
)
roles_to_remove = [*roles_to_remove, *teams_roles]
challonge_tournament = challonge.get_tournament(bracket.challonge)
running_participants = challonge_tournament.get_running_participants()
players_found = []
n_user_roles_removed = 0
users_role_not_removed = []
for member in ctx.guild.members:
if bracket_role:
if not tosurnament.get_role(member.roles, bracket_role.id):
continue
elif player_role and not tosurnament.get_role(member.roles, player_role.id):
continue
_, player_name = self.is_player_in_challonge(member, teams_info, running_participants)
if player_name:
players_found.append(player_name.casefold())
else:
if member:
try:
await member.remove_roles(*roles_to_remove)
n_user_roles_removed += 1
except Exception:
users_role_not_removed.append(str(member))
success_extra = ""
players_not_found = []
for participant in running_participants:
if participant.casefold() not in players_found:
players_not_found.append(participant)
if players_not_found:
success_extra += self.get_string(ctx, "players_not_found", "\n".join(players_not_found))
if users_role_not_removed:
success_extra += self.get_string(ctx, "users_role_not_removed", "\n".join(users_role_not_removed))
await self.send_reply(ctx, "success", bracket.name, n_user_roles_removed, len(players_found), success_extra)
@commands.command(aliases=["gpr"])
async def give_player_role(self, ctx, bracket_index: int = None):
"""Gives the player role of users not present in the challonge."""
tournament = self.get_tournament(ctx.guild.id)
brackets = tournament.brackets
bracket = self.get_bracket_from_index(brackets, bracket_index)
if not bracket:
await self.send_reply(ctx, "default", self.get_all_brackets_string(brackets))
return
if not bracket.challonge:
raise tosurnament.NoChallonge(bracket.name)
player_role = tosurnament.get_role(ctx.guild.roles, tournament.player_role_id, "Player")
bracket_role = tosurnament.get_role(ctx.guild.roles, bracket.role_id, bracket.name)
team_captain_role = tosurnament.get_role(ctx.guild.roles, tournament.team_captain_role_id, "Team Captain")
roles_to_add = list(filter(None, [player_role, bracket_role]))
challonge_tournament = challonge.get_tournament(bracket.challonge)
running_participants = challonge_tournament.get_running_participants()
teams_info, _ = await self.get_all_teams_infos_and_roles(ctx.guild, await bracket.get_players_spreadsheet())
n_user_roles_added = 0
users_role_not_added = []
players_found = []
for member in ctx.guild.members:
team_info, player_name = self.is_player_in_challonge(member, teams_info, running_participants)
if player_name:
players_found.append(player_name.casefold())
roles_to_add_to_player = list(roles_to_add)
if team_info:
team_role = tosurnament.get_role(ctx.guild.roles, None, team_info.team_name.get())
if team_role:
roles_to_add_to_player.append(team_role)
if team_captain_role and team_info.get_team_captain().name.casefold() == player_name.casefold():
roles_to_add_to_player.append(team_captain_role)
try:
await member.add_roles(*roles_to_add_to_player)
n_user_roles_added += 1
except Exception:
users_role_not_added.append(str(member))
success_extra = ""
players_not_found = []
for participant in running_participants:
if participant.casefold() not in players_found:
players_not_found.append(participant)
if players_not_found:
success_extra += self.get_string(ctx, "players_not_found", "\n".join(players_not_found))
if users_role_not_added:
success_extra += self.get_string(ctx, "users_role_not_added", "\n".join(users_role_not_added))
await self.send_reply(ctx, "success", bracket.name, n_user_roles_added, len(players_not_found), success_extra)
async def set_bracket_values(self, ctx, values):
"""Puts the input values into the corresponding bracket."""
tournament = self.get_tournament(ctx.guild.id)
for key, value in values.items():
setattr(tournament.current_bracket, key, value)
self.bot.session.update(tournament.current_bracket)
await self.send_reply(ctx, "success", value)
@commands.command(aliases=["cp"])
async def copy_bracket(self, ctx, index_from: int, index_to: int):
"""Copies the settings of a bracket to another one."""
tournament = self.get_tournament(ctx.guild.id)
brackets = tournament.brackets
if index_from > 0 and index_from <= len(brackets) and index_to > 0 and index_to <= len(brackets):
bracket_from = brackets[index_from - 1]
bracket_to = brackets[index_to - 1]
bracket_to.post_result_channel_id = bracket_from.post_result_channel_id
bracket_to.current_round = bracket_from.current_round
for spreadsheet_type in Bracket.get_spreadsheet_types().keys():
spreadsheet_from = bracket_from.get_spreadsheet_from_type(spreadsheet_type)
if spreadsheet_from:
spreadsheet_to = bracket_to.get_spreadsheet_from_type(spreadsheet_type)
if not spreadsheet_to:
spreadsheet_to = bracket_to.create_spreadsheet_from_type(self.bot, spreadsheet_type)
spreadsheet_from.copy_to(spreadsheet_to)
self.bot.session.update(spreadsheet_to)
await self.send_reply(ctx, "success", bracket_from.name, bracket_to.name)
return
raise commands.UserInputError()
async def update_players_spreadsheet_registration(self, guild, tournament):
now = datetime.datetime.now()
for bracket in tournament.brackets:
if not bracket.registration_end_date:
continue
registration_end_date = datetime.datetime.strptime(
bracket.registration_end_date, tosurnament.DATABASE_DATE_FORMAT
)
if now > registration_end_date:
continue
players_spreadsheet = await bracket.get_players_spreadsheet()
if not players_spreadsheet:
continue
team_infos, _ = await self.get_all_teams_infos_and_roles(guild, players_spreadsheet)
update_spreadsheet = False
for team_info in team_infos:
for player_info in team_info.players:
osu_id = player_info.name.get()
if player_info.osu_id:
osu_id = player_info.osu_id.get()
osu_user = osu.get_user(osu_id, m=tournament.game_mode)
if not osu_user:
continue
if player_info.name != osu_user.name:
user = tosurnament.UserAbstraction.get_from_player_info(self.bot, player_info, guild)
member = user.get_member(guild)
if member:
try:
await member.edit(nick=osu_user.name)
except (discord.Forbidden, discord.HTTPException):
pass
player_info.name.set(osu_user.name)
player_info.rank.set(str(osu_user.rank))
player_info.bws_rank.set(str(osu_user.rank))
player_info.pp.set(str(int(float(osu_user.pp))))
update_spreadsheet = True
if update_spreadsheet:
self.add_update_spreadsheet_background_task(players_spreadsheet)
async def background_task_update_players_spreadsheet_registration(self):
try:
await self.bot.wait_until_ready()
while not self.bot.is_closed():
for guild in self.bot.guilds:
try:
tournament = self.get_tournament(guild.id)
if tournament.registration_background_update:
await self.update_players_spreadsheet_registration(guild, tournament)
except asyncio.CancelledError:
return
except Exception:
continue
await asyncio.sleep(86000)
except asyncio.CancelledError:
return
def background_task(self):
spreadsheet_ids = self.get_spreadsheet_ids_to_update_pickle()
for spreadsheet_id in spreadsheet_ids:
self.bot.tasks.append(self.bot.loop.create_task(self.update_spreadsheet_background_task(spreadsheet_id)))
self.bot.tasks.append(self.bot.loop.create_task(self.background_task_update_players_spreadsheet_registration()))
def get_class(bot):
"""Returns the main class of the module"""
return TosurnamentBracketCog(bot)
def setup(bot):
"""Setups the cog"""
bot.add_cog(TosurnamentBracketCog(bot))
|
[
"[email protected]"
] | |
6cab7e4a0b61be1e5a1bf905f462d105a149b688
|
f5d766ee70de02422aabfbe53d81d94e1abfff3b
|
/photos/apps.py
|
4a36c51d287c37936560a34480aae670a5e66708
|
[
"MIT"
] |
permissive
|
antomuli/Chi_Gallery
|
f7b0a0466110278776744b9c3873af0f7cdcad94
|
cbc45e7c54f812081af73e2946bf846a4e16e4af
|
refs/heads/master
| 2022-12-08T19:00:11.851715 | 2020-07-22T10:22:04 | 2020-07-22T10:22:04 | 243,785,499 | 3 | 1 | null | 2022-12-08T03:43:41 | 2020-02-28T14:49:39 |
Python
|
UTF-8
|
Python
| false | false | 86 |
py
|
from django.apps import AppConfig
class PhotosConfig(AppConfig):
name = 'photos'
|
[
"[email protected]"
] | |
841b4a196027f6232cc3d170f481da13576dc249
|
2e74cff6c9639f3903ccde662e79359d0724285e
|
/2019_late/20190826/aa.py
|
f51aeefafcffc501a66e2b90d4c18c5bb3a5c82a
|
[] |
no_license
|
dodonmountain/algorithm
|
e29988071f651e51ba65e3926302f94a3d4074a5
|
ce33e0d74220839aed4b17a47fa0069458a4324e
|
refs/heads/master
| 2022-11-05T05:14:01.527015 | 2022-11-01T04:29:37 | 2022-11-01T04:29:37 | 200,008,533 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 901 |
py
|
import sys
sys.stdin = open('input4881.txt', 'r')
def backtracking(k, n, sum_temp):
global min_sum
if k == n:
# print('sum_temp_final: {}'.format(sum_temp))
if min_sum > sum_temp:
min_sum = sum_temp
return
if sum_temp > min_sum: # 시간 초과 나서 가지치기 해줌!
return
for i in range(N):
if used_col[i]:
continue
sum_temp += lst[k][i]
# print('lst[{}][{}]: {}, sum_temp: {}'.format(k, i, lst[k][i], sum_temp))
used_col[i] = True
backtracking(k + 1, n, sum_temp)
sum_temp -= lst[k][i]
used_col[i] = False
for t in range(1, int(input()) + 1):
N = int(input())
lst = []
for j in range(N):
lst.append(list(map(int, input().split())))
used_col = [False] * N
min_sum = 10*N
backtracking(0, N, 0)
print('#{} {}'.format(t, min_sum))
|
[
"[email protected]"
] | |
31178d0de5aff1e94f4abb2ac1bf30250a058b35
|
f2889a13368b59d8b82f7def1a31a6277b6518b7
|
/203.py
|
4cc915c1697a80508539b0d9a6f8f2a77f90cc65
|
[] |
no_license
|
htl1126/leetcode
|
dacde03de5c9c967e527c4c3b29a4547154e11b3
|
c33559dc5e0bf6879bb3462ab65a9446a66d19f6
|
refs/heads/master
| 2023-09-01T14:57:57.302544 | 2023-08-25T15:50:56 | 2023-08-25T15:50:56 | 29,514,867 | 7 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,066 |
py
|
# ref: https://leetcode.com/discuss/33150/python-solution
# Definition for singly-linked list.
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
class Solution(object):
def removeElements(self, head, val):
"""
:type head: ListNode
:type val: int
:rtype: ListNode
"""
dummy = ListNode(-1)
dummy.next = head
next = dummy
while next is not None and next.next is not None:
if next.next.val == val:
next.next = next.next.next
else:
next = next.next
return dummy.next
if __name__ == '__main__':
sol = Solution()
head = ListNode(1)
head.next = ListNode(2)
head.next.next = ListNode(6)
head.next.next.next = ListNode(3)
head.next.next.next.next = ListNode(4)
head.next.next.next.next.next = ListNode(5)
head.next.next.next.next.next.next = ListNode(6)
res = sol.removeElements(head, 6)
while res:
print res.val
res = res.next
|
[
"[email protected]"
] | |
0b0c72893b0cb54445a840ddd464ccffc5ea4a81
|
b73e14ea63e0d728a23b780bd5b0eb4d8fac4362
|
/hassio-google-drive-backup/backup/drive/folderfinder.py
|
86319775f85ffbcf72329dd692eaedf91c8715ae
|
[
"MIT"
] |
permissive
|
agusalex/hassio-google-drive-backup
|
7c722c26e2c45382aeabef706842674ba6c176d3
|
c97fd6e7e4f95d48b85d3cfe67a01bdc2103da9c
|
refs/heads/master
| 2023-04-03T06:42:49.797408 | 2021-04-05T23:30:33 | 2021-04-05T23:30:33 | 354,999,639 | 0 | 1 |
MIT
| 2021-04-05T23:30:33 | 2021-04-05T23:26:33 | null |
UTF-8
|
Python
| false | false | 7,601 |
py
|
import os
import os.path
from datetime import timedelta
from typing import Any, Dict
from aiohttp.client_exceptions import ClientResponseError
from injector import inject, singleton
from ..config import Config, Setting
from ..exceptions import (BackupFolderInaccessible, BackupFolderMissingError,
GoogleDrivePermissionDenied, LogInToGoogleDriveError)
from ..time import Time
from .driverequests import DriveRequests
from ..logger import getLogger
logger = getLogger(__name__)
FOLDER_MIME_TYPE = 'application/vnd.google-apps.folder'
FOLDER_NAME = 'Home Assistant Snapshots'
FOLDER_CACHE_SECONDS = 30
@singleton
class FolderFinder():
@inject
def __init__(self, config: Config, time: Time, drive_requests: DriveRequests):
self.config = config
self.drivebackend: DriveRequests = drive_requests
self.time = time
# The cached folder id
self._folderId = None
# When the fodler id was last cached
self._folder_queryied_last = None
# These get set when an existing folder is found and should cause the UI to
# prompt for what to do about it.
self._existing_folder = None
self._use_existing = None
def resolveExisting(self, val):
if self._existing_folder:
self._use_existing = val
else:
self._use_existing = None
async def get(self):
if self._existing_folder and self._use_existing is not None:
if self._use_existing:
await self.save(self._existing_folder)
else:
await self.create()
self._use_existing = None
if not self._folder_queryied_last or self._folder_queryied_last + timedelta(seconds=FOLDER_CACHE_SECONDS) < self.time.now():
try:
self._folderId = await self._readFolderId()
except (BackupFolderMissingError, BackupFolderInaccessible):
if not self.config.get(Setting.SPECIFY_SNAPSHOT_FOLDER):
# Search for a folder, they may have created one before
self._existing_folder = await self._search()
if self._existing_folder:
self._folderId = self._existing_folder.get('id')
else:
# Create folder, since no other folder is available
await self.create()
else:
raise
self._folder_queryied_last = self.time.now()
return self._folderId
def getExisting(self):
return self._existing_folder
async def save(self, folder: Any) -> str:
if not isinstance(folder, str):
folder = folder.get('id')
logger.info("Saving snapshot folder: " + folder)
with open(self.config.get(Setting.FOLDER_FILE_PATH), 'w') as folder_file:
folder_file.write(folder)
self._folderId = folder
self._folder_queryied_last = self.time.now()
self._existing_folder = None
def reset(self):
if os.path.exists(self.config.get(Setting.FOLDER_FILE_PATH)):
os.remove(self.config.get(Setting.FOLDER_FILE_PATH))
self._folderId = None
self._folder_queryied_last = None
self._existing_folder = None
def getCachedFolder(self):
return self._folderId
def deCache(self):
self._folderId = None
self._folder_queryied_last = None
async def _readFolderId(self) -> str:
# First, check if we cached the drive folder
if not os.path.exists(self.config.get(Setting.FOLDER_FILE_PATH)):
raise BackupFolderMissingError()
if os.path.exists(self.config.get(Setting.FOLDER_FILE_PATH)):
with open(self.config.get(Setting.FOLDER_FILE_PATH), "r") as folder_file:
folder_id: str = folder_file.readline()
if await self._verify(folder_id):
return folder_id
else:
raise BackupFolderInaccessible(folder_id)
async def _search(self) -> str:
folders = []
try:
async for child in self.drivebackend.query("mimeType='" + FOLDER_MIME_TYPE + "'"):
if self._isValidFolder(child):
folders.append(child)
except ClientResponseError as e:
# 404 means the folder doesn't exist (maybe it got moved?)
if e.status == 404:
"Make Error"
raise LogInToGoogleDriveError()
else:
raise e
if len(folders) == 0:
return None
folders.sort(key=lambda c: Time.parse(c.get("modifiedTime")))
# Found a folder, which means we're probably using the add-on from a
# previous (or duplicate) installation. Record and return the id but don't
# persist it until the user chooses to do so.
folder = folders[len(folders) - 1]
logger.info("Found " + folder.get('name'))
return folder
async def _verify(self, id):
if self.drivebackend.isCustomCreds():
# If the user is using custom creds and specifying the snapshot folder, then chances are the
# app doesn't have permission to access the parent folder directly. Ironically, we can still
# query for children and add/remove snapshots. Not a huge deal, just
# means we can't verify the folder still exists, isn't trashed, etc. Just let it be valid
# and handle potential errors elsewhere.
return True
# Query drive for the folder to make sure it still exists and we have the right permission on it.
try:
folder = await self.drivebackend.get(id)
if not self._isValidFolder(folder):
logger.info("Provided snapshot folder {0} is invalid".format(id))
return False
return True
except ClientResponseError as e:
if e.status == 404:
# 404 means the folder doesn't exist (maybe it got moved?) but can also mean that we
# just don't have permission to see the folder. Often we can still upload into it, so just
# let it pass without further verification and let other error handling (on upload) identify problems.
return True
else:
raise e
except GoogleDrivePermissionDenied:
# Lost permission on the backup folder
return False
def _isValidFolder(self, folder) -> bool:
try:
caps = folder.get('capabilities')
if folder.get('trashed'):
return False
elif not caps['canAddChildren']:
return False
elif not caps['canListChildren']:
return False
elif not caps.get('canDeleteChildren', False) and not caps.get('canRemoveChildren', False):
return False
elif folder.get("mimeType") != FOLDER_MIME_TYPE:
return False
except Exception:
return False
return True
async def create(self) -> str:
logger.info('Creating folder "{}" in "My Drive"'.format(FOLDER_NAME))
file_metadata: Dict[str, str] = {
'name': FOLDER_NAME,
'mimeType': FOLDER_MIME_TYPE,
'appProperties': {
"backup_folder": "true",
},
}
folder = await self.drivebackend.createFolder(file_metadata)
await self.save(folder)
return folder.get('id')
|
[
"[email protected]"
] | |
8a52414ded940232afbcfaec121283acd02cb77d
|
bc233c24523f05708dd1e091dca817f9095e6bb5
|
/bitmovin_api_sdk/models/limit_transform_unit_depth_recursion_mode.py
|
0ba484b9233f3c3819003b1483ecb20fa575e34a
|
[
"MIT"
] |
permissive
|
bitmovin/bitmovin-api-sdk-python
|
e3d6cf8eb8bdad62cb83ec77c0fc4950b06b9cdd
|
b0860c0b1be7747cf22ad060985504da625255eb
|
refs/heads/main
| 2023-09-01T15:41:03.628720 | 2023-08-30T10:52:13 | 2023-08-30T10:52:13 | 175,209,828 | 13 | 14 |
MIT
| 2021-04-29T12:30:31 | 2019-03-12T12:47:18 |
Python
|
UTF-8
|
Python
| false | false | 313 |
py
|
# coding: utf-8
from enum import Enum
from six import string_types, iteritems
from bitmovin_api_sdk.common.poscheck import poscheck_model
class LimitTransformUnitDepthRecursionMode(Enum):
DISABLED = "DISABLED"
LEVEL_1 = "LEVEL_1"
LEVEL_2 = "LEVEL_2"
LEVEL_3 = "LEVEL_3"
LEVEL_4 = "LEVEL_4"
|
[
"[email protected]"
] | |
b11eff7cd71907dd1ef5d3d85b2f65a8f197cec1
|
27750e8d10776babf3ef459365e31f020071384b
|
/tensor2tensor/data_generators/desc2code_test.py
|
24b7568d0b862caca630bdcb5fbadcc04dc2b4d7
|
[
"Apache-2.0"
] |
permissive
|
rmbrad/tensor2tensor
|
364da5e065075b363fc539cea67ce12008cbd23a
|
45a787e46b32bdb18b70f835cba0b3270267e19b
|
refs/heads/master
| 2021-05-16T17:10:10.342682 | 2017-08-11T23:21:37 | 2017-08-11T23:21:37 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,750 |
py
|
# coding=utf-8
# Copyright 2017 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for desc2code."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
from tensor2tensor.data_generators import desc2code
import tensorflow as tf
CODE_CPP_IN = """
#include <iostream>
void main() { // This comment will be removed
// This too.
//
/* Not this one */
\t
\t
int a \t\n = 3;//
//
}
"""
CODE_CPP_OUT = ("#include <iostream> void main() { /* Not this one */ int a = "
"3; }")
class Desc2codeTest(tf.test.TestCase):
def testCppPreprocess(self):
"""Check that the file correctly preprocess the code source."""
cpp_pb = desc2code.Desc2CodeCppProblem()
self.assertEqual( # Add space beween two lines
cpp_pb.preprocess_target("firstline//comm1\nsecondline//comm2\n"),
"firstline secondline")
# Checking for boths comments and spaces
self.assertEqual(cpp_pb.preprocess_target(CODE_CPP_IN), CODE_CPP_OUT)
self.assertEqual(
cpp_pb.preprocess_target(" not removed //abcd "),
"not removed //abcd")
if __name__ == "__main__":
tf.test.main()
|
[
"[email protected]"
] | |
557b2ddc68afe5bdfefe61daf150e73970c74871
|
900b98964288a9cb0aaf2e45706ae2b32f92657f
|
/examples/adspygoogle/dfp/v201208/update_orders.py
|
bda89f799cad962bb451e7cf7fe364f5bbc28388
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
krux/adspygoogle
|
df2405c2042aa9c9a83d97b8442afe68572e3e2e
|
6505a71122f45fe3e675f27f2c29f67a1768069b
|
refs/heads/master
| 2022-02-22T08:55:19.777002 | 2022-02-11T22:42:19 | 2022-02-11T22:42:19 | 7,103,378 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,331 |
py
|
#!/usr/bin/python
#
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example updates the notes of each order up to the first 500. To
determine which orders exist, run get_all_orders.py."""
__author__ = '[email protected] (Jeff Sham)'
# Locate the client library. If module was installed via "setup.py" script, then
# the following two lines are not needed.
import os
import sys
sys.path.insert(0, os.path.join('..', '..', '..', '..'))
# Import appropriate classes from the client library.
from adspygoogle import DfpClient
from adspygoogle.common import Utils
# Initialize client object.
client = DfpClient(path=os.path.join('..', '..', '..', '..'))
# Initialize appropriate service.
order_service = client.GetService('OrderService', version='v201208')
# Create statement object to get all orders.
filter_statement = {'query': 'LIMIT 500'}
# Get orders by statement.
response = order_service.GetOrdersByStatement(filter_statement)[0]
orders = []
if 'results' in response:
orders = response['results']
if orders:
# Update each local order object by changing its notes.
updated_orders = []
for order in orders:
# Archived orders cannot be updated.
if not Utils.BoolTypeConvert(order['isArchived']):
order['notes'] = 'Spoke to advertiser. All is well.'
updated_orders.append(order)
# Update orders remotely.
orders = order_service.UpdateOrders(updated_orders)
# Display results.
if orders:
for order in orders:
print ('Order with id \'%s\', name \'%s\', advertiser id \'%s\', and '
'notes \'%s\' was updated.'
% (order['id'], order['name'], order['advertiserId'],
order['notes']))
else:
print 'No orders were updated.'
else:
print 'No orders found to update.'
|
[
"[email protected]"
] | |
263d4468d717c24d9dbb54540b3466b94e3b6850
|
6c46cde091086cc302fa417ad00283702221b487
|
/인프런/섹션 6/6. 중복순열 구하기/AA3.py
|
adcf3c6bc22fe5386e6d6fed6fd8f95e690d09fe
|
[] |
no_license
|
Hugo-Oh/study_DataStructure
|
4da1e358ad1458f52075065d7bd54540cc8b8ad4
|
da492bbe0267d73cefb71c7ada129cfc41b7dcee
|
refs/heads/master
| 2023-06-19T14:40:14.590970 | 2021-07-21T15:07:11 | 2021-07-21T15:07:11 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 358 |
py
|
import sys
#sys.stdin = open("input.txt", "rt")
N, M = map(int, input().split())
arr = [0] * M
cnt = 0
def DFS(n):
global cnt
if n == M:
for i in arr:
print(i, end = " ")
print()
cnt += 1
return
else:
for i in range(1, N + 1):
arr[n] = i
DFS(n+1)
DFS(0)
print(cnt)
|
[
"[email protected]"
] | |
3f2523ace4187d1f2fba4fa4a681546956844653
|
7bededcada9271d92f34da6dae7088f3faf61c02
|
/pypureclient/flasharray/FA_2_19/models/admin_api_token_response.py
|
03c5ebe3b8f1b0eba86d1b226fd09b8c7ece9747
|
[
"BSD-2-Clause"
] |
permissive
|
PureStorage-OpenConnect/py-pure-client
|
a5348c6a153f8c809d6e3cf734d95d6946c5f659
|
7e3c3ec1d639fb004627e94d3d63a6fdc141ae1e
|
refs/heads/master
| 2023-09-04T10:59:03.009972 | 2023-08-25T07:40:41 | 2023-08-25T07:40:41 | 160,391,444 | 18 | 29 |
BSD-2-Clause
| 2023-09-08T09:08:30 | 2018-12-04T17:02:51 |
Python
|
UTF-8
|
Python
| false | false | 3,841 |
py
|
# coding: utf-8
"""
FlashArray REST API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.19
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flasharray.FA_2_19 import models
class AdminApiTokenResponse(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'items': 'list[AdminApiToken]'
}
attribute_map = {
'items': 'items'
}
required_args = {
}
def __init__(
self,
items=None, # type: List[models.AdminApiToken]
):
"""
Keyword args:
items (list[AdminApiToken])
"""
if items is not None:
self.items = items
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `AdminApiTokenResponse`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
raise AttributeError
else:
return value
def __getitem__(self, key):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `AdminApiTokenResponse`".format(key))
return object.__getattribute__(self, key)
def __setitem__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `AdminApiTokenResponse`".format(key))
object.__setattr__(self, key, value)
def __delitem__(self, key):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `AdminApiTokenResponse`".format(key))
object.__delattr__(self, key)
def keys(self):
return self.attribute_map.keys()
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(AdminApiTokenResponse, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, AdminApiTokenResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"[email protected]"
] | |
1d928f312bda7d0394b7dae7b9c9fd6d04ae4e6a
|
51086c09f2c920d057db12e373a01b08571c4cbf
|
/.env/bin/pip2.7
|
051b5bd26d51057fd1e03bc5152231762e563c95
|
[] |
no_license
|
JohnHoder/pebble-dev
|
66dc69258dfd009313c23ba5c2eb518aec257652
|
e9d95bd564ba6f58b539a1a68f21fe82b6d0992b
|
refs/heads/master
| 2022-11-23T17:32:26.573394 | 2018-12-26T03:17:37 | 2018-12-26T03:17:37 | 163,131,045 | 0 | 1 | null | 2022-10-31T10:03:38 | 2018-12-26T03:15:57 |
Python
|
UTF-8
|
Python
| false | false | 219 |
7
|
#!/opt/pebblexxx/.env/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pip import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"[email protected]"
] | |
acfdefacf1fe49a47bbfeab9d602460b1834d794
|
097952b49b373e2a391cd7b7f32ac1732379beaa
|
/src/z3c/tabular/testing.py
|
a470ec0c1108886d51cc2dc0e97e3d75f2ce107b
|
[
"ZPL-2.1"
] |
permissive
|
zopefoundation/z3c.tabular
|
d9942205aa1af3d62768ae702c403365ec9e7e54
|
e222b9cc245e044e4a38d1b64fe1aec79465ff1f
|
refs/heads/master
| 2023-06-21T20:17:56.129955 | 2020-12-16T16:12:51 | 2020-12-16T16:12:51 | 8,727,901 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,229 |
py
|
##############################################################################
#
# Copyright (c) 2008 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""
$Id:$
"""
__docformat__ = "reStructuredText"
from zope.browserpage import metaconfigure
from zope.app.testing import setup
import z3c.macro.tales
import z3c.table.testing
###############################################################################
#
# testing setup
#
###############################################################################
def setUp(test):
test.globs = {'root': setup.placefulSetUp(True)}
metaconfigure.registerType('macro', z3c.macro.tales.MacroExpression)
z3c.table.testing.setUpAdapters()
def tearDown(test):
setup.placefulTearDown()
|
[
"[email protected]"
] | |
3d76d571204087973ff5d24b2ece337edc64f28d
|
be0a3aa7b83b87c5d2c257b538545bdded39c051
|
/Chatbot_KG/model/KE/HolE.py
|
7e95c8a6ef8798bcb9b6f12ab9a937dec413c0a4
|
[
"Apache-2.0"
] |
permissive
|
water123li/Chatbot_CN
|
480e3bc6d6c0d8b6b0823452556acef14df1c2c3
|
e63808030c6cc516020075cdcd0c332120a998fc
|
refs/heads/master
| 2022-01-25T10:34:34.726243 | 2019-06-13T10:44:44 | 2019-06-13T10:44:44 | 192,504,292 | 1 | 0 |
Apache-2.0
| 2019-06-18T09:01:55 | 2019-06-18T09:01:55 | null |
UTF-8
|
Python
| false | false | 3,681 |
py
|
#coding:utf-8
import numpy as np
import tensorflow as tf
from .Model import Model
class HolE(Model):
def _cconv(self, a, b):
return tf.ifft(tf.fft(a) * tf.fft(b)).real
def _ccorr(self, a, b):
a = tf.cast(a, tf.complex64)
b = tf.cast(b, tf.complex64)
return tf.real(tf.ifft(tf.conj(tf.fft(a)) * tf.fft(b)))
r'''
HolE employs circular correlations to create compositional representations.
HolE can capture rich interactions but simultaneously remains efficient to compute.
'''
def _calc(self, head, tail, rel):
relation_mention = tf.nn.l2_normalize(rel, 1)
entity_mention = self._ccorr(head, tail)
return -tf.sigmoid(tf.reduce_sum(relation_mention * entity_mention, 1, keep_dims = True))
def embedding_def(self):
#Obtaining the initial configuration of the model
config = self.get_config()
#Defining required parameters of the model, including embeddings of entities and relations
self.ent_embeddings = tf.get_variable(name = "ent_embeddings", shape = [config.entTotal, config.hidden_size], initializer = tf.contrib.layers.xavier_initializer(uniform = False))
self.rel_embeddings = tf.get_variable(name = "rel_embeddings", shape = [config.relTotal, config.hidden_size], initializer = tf.contrib.layers.xavier_initializer(uniform = False))
self.parameter_lists = {"ent_embeddings":self.ent_embeddings, \
"rel_embeddings":self.rel_embeddings, }
def loss_def(self):
#Obtaining the initial configuration of the model
config = self.get_config()
#To get positive triples and negative triples for training
#The shapes of pos_h, pos_t, pos_r are (batch_size, 1)
#The shapes of neg_h, neg_t, neg_r are (batch_size, negative_ent + negative_rel)
pos_h, pos_t, pos_r = self.get_positive_instance(in_batch = True)
neg_h, neg_t, neg_r = self.get_negative_instance(in_batch = True)
#Embedding entities and relations of triples, e.g. pos_h_e, pos_t_e and pos_r_e are embeddings for positive triples
pos_h_e = tf.reshape(tf.nn.embedding_lookup(self.ent_embeddings, pos_h), [-1, config.hidden_size])
pos_t_e = tf.reshape(tf.nn.embedding_lookup(self.ent_embeddings, pos_t), [-1, config.hidden_size])
pos_r_e = tf.reshape(tf.nn.embedding_lookup(self.rel_embeddings, pos_r), [-1, config.hidden_size])
neg_h_e = tf.reshape(tf.nn.embedding_lookup(self.ent_embeddings, neg_h), [-1, config.hidden_size])
neg_t_e = tf.reshape(tf.nn.embedding_lookup(self.ent_embeddings, neg_t), [-1, config.hidden_size])
neg_r_e = tf.reshape(tf.nn.embedding_lookup(self.rel_embeddings, neg_r), [-1, config.hidden_size])
#Calculating score functions for all positive triples and negative triples
#The shape of _p_score is (batch_size, 1, 1)
#The shape of _n_score is (batch_size, negative_ent + negative_rel, 1)
_p_score = tf.reshape(self._calc(pos_h_e, pos_t_e, pos_r_e), [-1, 1])
_n_score = tf.reshape(self._calc(neg_h_e, neg_t_e, neg_r_e), [-1, config.negative_rel + config.negative_ent])
#The shape of p_score is (batch_size, 1)
#The shape of n_score is (batch_size, 1)
p_score = _p_score
n_score = tf.reduce_mean(_n_score, 1, keep_dims = True)
#Calculating loss to get what the framework will optimize
self.loss = tf.reduce_sum(tf.maximum(p_score - n_score + config.margin, 0))
def predict_def(self):
config = self.get_config()
predict_h, predict_t, predict_r = self.get_predict_instance()
predict_h_e = tf.nn.embedding_lookup(self.ent_embeddings, predict_h)
predict_t_e = tf.nn.embedding_lookup(self.ent_embeddings, predict_t)
predict_r_e = tf.nn.embedding_lookup(self.rel_embeddings, predict_r)
self.predict = tf.reduce_sum(self._calc(predict_h_e, predict_t_e, predict_r_e), 1, keep_dims = True)
|
[
"[email protected]"
] | |
6253aae47154a92d62c55de0c48e638cda492064
|
6969dbf9ff8fabf811efa04cc76207e955c0d481
|
/simics/monitorCore/idaFuns.py
|
6430e8accb60cd4855aa86f78717c87114b37f66
|
[] |
no_license
|
heruix/RESim
|
c6f5a1919afa6872d3175b5b4012ea2b45438797
|
bf514e9c08fced46ee752dd14d498971a059bc16
|
refs/heads/master
| 2020-06-16T13:40:32.389194 | 2019-07-06T00:14:48 | 2019-07-06T00:14:48 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,689 |
py
|
import os
import json
class IDAFuns():
def __init__(self, path, lgr):
self.funs = {}
self.lgr = lgr
self.did_paths = []
#self.lgr.debug('IDAFuns for path %s' % path)
if os.path.isfile(path):
with open(path) as fh:
jfuns = json.load(fh)
for sfun in jfuns:
fun = int(sfun)
self.funs[fun] = jfuns[sfun]
def getFunPath(self, path):
fun_path = path+'.funs'
if not os.path.isfile(fun_path):
''' No functions file, check for symbolic links '''
#self.lgr.debug('is link? %s' % path)
if os.path.islink(path):
actual = os.path.join(os.path.dirname(path), os.readlink(path))
#self.lgr.debug('actual %s' % actual)
fun_path = actual+'.funs'
return fun_path
def add(self, path, offset):
if path in self.did_paths:
return
else:
self.did_paths.append(path)
funfile = self.getFunPath(path)
if os.path.isfile(funfile):
with open(funfile) as fh:
#self.lgr.debug('IDAFuns add for path %s offset 0x%x' % (path, offset))
newfuns = json.load(fh)
for f in newfuns:
fun = int(f)+offset
self.funs[fun] = {}
self.funs[fun]['start'] = fun
self.funs[fun]['end'] = newfuns[f]['end']+offset
self.funs[fun]['name'] = newfuns[f]['name']
#self.lgr.debug('idaFun add %s was %s %x %x now %x %x %x' % (newfuns[f]['name'], f, newfuns[f]['start'], newfuns[f]['end'], fun, self.funs[fun]['start'], self.funs[fun]['end']))
else:
#self.lgr.debug('IDAFuns NOTHING at %s' % funfile)
pass
def isFun(self, fun):
if fun in self.funs:
return True
else:
return False
def getName(self, fun):
if fun in self.funs:
return self.funs[fun]['name']
else:
return None
def inFun(self, ip, fun):
#self.lgr.debug('is 0x%x in %x ' % (ip, fun))
if fun in self.funs:
if ip >= self.funs[fun]['start'] and ip <= self.funs[fun]['end']:
return True
else:
return False
def getFun(self, ip):
for fun in self.funs:
if ip >= self.funs[fun]['start'] and ip <= self.funs[fun]['end']:
return fun
#print('ip 0x%x start 0x%x - 0x%x' % (ip, self.funs[fun]['start'], self.funs[fun]['end']))
return None
|
[
"[email protected]"
] | |
afc3ff37fffe764fe8ba9e14e30caf05b5c58063
|
cbee2fee9d91559b0db43c1050feeb1cd278183d
|
/analysis_ee_ZH_mumubb_cfg.py
|
456431702ceeb985fd4c2020abaaddc4711b638a
|
[] |
no_license
|
cbernet/fcc-ee-higgs
|
dec6e7963d640cd20eea65b7e8cbfa1d370b4e6d
|
8f7ebdae52b19ad1829173f08145a81e0d090aa9
|
refs/heads/master
| 2021-01-20T07:43:12.188590 | 2018-10-18T14:53:36 | 2018-10-18T14:53:36 | 90,035,545 | 0 | 3 | null | null | null | null |
UTF-8
|
Python
| false | false | 10,699 |
py
|
'''Example configuration file for an ee->ZH->mumubb analysis in heppy, with the FCC-ee
While studying this file, open it in ipython as well as in your editor to
get more information:
ipython
from analysis_ee_ZH_cfg import *
'''
import sys
import os
import copy
import heppy.framework.config as cfg
import logging
# next 2 lines necessary to deal with reimports from ipython
logging.shutdown()
reload(logging)
# global logging level for the heppy framework.
# in addition, all the analyzers declared below have their own logger,
# an each of them can be set to a different logging level.
logging.basicConfig(level=logging.WARNING)
# setting the random seed for reproducible results
import heppy.statistics.rrandom as random
# do not forget to comment out the following line if you want to produce and combine
# several samples of events
random.seed(0xdeadbeef)
# loading the FCC event data model library to decode
# the format of the events in the input file
# help(Events) for more information
from ROOT import gSystem
gSystem.Load("libdatamodelDict")
from EventStore import EventStore as Events
# setting the event printout
# help(Event) for more information
from heppy.framework.event import Event
# comment the following line to see all the collections stored in the event
# if collection is listed then print loop.event.papasevent will include the collections
Event.print_patterns=['zeds*', 'higgs*', 'jets*', 'bquarks', 'recoil*', 'collections']
# definition of the collider
# help(Collider) for more information
from heppy.configuration import Collider
Collider.BEAMS = 'ee'
Collider.SQRTS = 240.
jet_correction = True
# import pdb; pdb.set_trace()
# mode = 'pythia/ee_to_ZH_Oct30'
mode = 'pythia/ee_to_ZZ_Sep12_A_2'
# mode = 'all'
nfiles = 1
# mode = 'test'
### definition of input samples
### from components.ZH_Znunu import components as cps
##from fcc_ee_higgs.components.all import load_components
##cps = load_components(mode='pythia')
from fcc_datasets.fcc_component import FCCComponent
zh = FCCComponent(
'pythia/ee_to_ZH_Oct30',
splitFactor=1
)
zz = FCCComponent(
'pythia/ee_to_ZZ_Sep12_A_2',
splitFactor=1
)
cpslist = [
zh,
zz
]
cps = dict( (c.name, c) for c in cpslist)
selectedComponents = cps.values()
for comp in selectedComponents:
comp.splitFactor = min(len(comp.files),nfiles)
test_filename = os.path.abspath('samples/test/ee_ZH_Hbb.root')
if mode == 'test':
comp = cps['pythia/ee_to_ZH_Oct30']
comp.files = [test_filename]
comp.splitFactor = 1
selectedComponents = [comp]
elif mode == 'all':
selectedComponents = cps.values()
else:
selectedComponents = [cps[mode]]
if nfiles:
for cp in cps.values():
cp.files = cp.files[:nfiles]
# read FCC EDM events from the input root file(s)
# do help(Reader) for more information
from heppy.analyzers.fcc.Reader import Reader
source = cfg.Analyzer(
Reader,
gen_particles = 'GenParticle',
gen_vertices = 'GenVertex'
)
# gen level filtering
lepton_id = 13
from heppy.analyzers.Selector import Selector
gen_leptons = cfg.Analyzer(
Selector,
'gen_leptons',
output = 'gen_leptons',
input_objects = 'gen_particles',
filter_func = lambda ptc: ptc.pt() > 10. and abs(ptc.pdgid()) == lepton_id
)
from heppy.analyzers.EventFilter import EventFilter
gen_counter = cfg.Analyzer(
EventFilter ,
'gen_counter',
input_objects = 'gen_leptons',
min_number = 2,
veto = False
)
from fcc_ee_higgs.analyzers.GenResonanceAnalyzer import GenResonanceAnalyzer
gen_ana = cfg.Analyzer(
GenResonanceAnalyzer,
pdgids=[23, 25],
statuses=[62]
)
# importing the papas simulation and reconstruction sequence,
# as well as the detector used in papas
# check papas_cfg.py for more information
from heppy.test.papas_cfg import papas, papas_sequence, detector
from heppy.test.papas_cfg import papasdisplaycompare as display
# Use a Selector to select leptons from the output of papas simulation.
# Currently, we're treating electrons and muons transparently.
# we could use two different instances for the Selector module
# to get separate collections of electrons and muons
# help(Selector) for more information
leptons = cfg.Analyzer(
Selector,
'sel_leptons',
output = 'leptons',
input_objects = 'rec_particles',
filter_func = lambda ptc: ptc.e()>10. and abs(ptc.pdgid()) in [11, 13]
)
# Compute lepton isolation w/r other particles in the event.
# help(IsolationAnalyzer) for more information
from heppy.analyzers.IsolationAnalyzer import IsolationAnalyzer
from heppy.particles.isolation import EtaPhiCircle
iso_leptons = cfg.Analyzer(
IsolationAnalyzer,
candidates = 'leptons',
particles = 'rec_particles',
iso_area = EtaPhiCircle(0.4)
)
# Select isolated leptons with a Selector
# one can pass a function like this one to the filter:
def relative_isolation(lepton):
sumpt = lepton.iso_211.sumpt + lepton.iso_22.sumpt + lepton.iso_130.sumpt
sumpt /= lepton.pt()
return sumpt
# ... or use a lambda statement as done below.
sel_iso_leptons = cfg.Analyzer(
Selector,
'sel_iso_leptons',
output = 'sel_iso_leptons',
input_objects = 'leptons',
# filter_func = relative_isolation
filter_func = lambda lep : lep.iso.sumpt/lep.pt()<0.5 # fairly loose
)
# Building Zeds
# help(ResonanceBuilder) for more information
from heppy.analyzers.ResonanceBuilder import ResonanceBuilder
zeds = cfg.Analyzer(
ResonanceBuilder,
output = 'zeds',
leg_collection = 'sel_iso_leptons',
pdgid = 23
)
zed_counter = cfg.Analyzer(
EventFilter ,
'zed_counter',
input_objects = 'zeds',
min_number = 0,
veto = False
)
# Computing the recoil p4 (here, p_initial - p_zed)
# help(RecoilBuilder) for more information
sqrts = Collider.SQRTS
from heppy.analyzers.RecoilBuilder import RecoilBuilder
recoil = cfg.Analyzer(
RecoilBuilder,
instance_label = 'recoil',
output = 'recoil',
sqrts = sqrts,
to_remove = 'zeds_legs'
)
missing_energy = cfg.Analyzer(
RecoilBuilder,
instance_label = 'missing_energy',
output = 'missing_energy',
sqrts = sqrts,
to_remove = 'jets'
)
missing_energy_rescaled = cfg.Analyzer(
RecoilBuilder,
instance_label = 'missing_energy_rescaled',
output = 'missing_energy_rescaled',
sqrts = sqrts,
to_remove = 'jets_rescaled'
)
# Creating a list of particles excluding the decay products of the best zed.
# help(Masker) for more information
from heppy.analyzers.Masker import Masker
particles_not_zed = cfg.Analyzer(
Masker,
output = 'particles_not_zed',
input = 'rec_particles',
mask = 'zeds_legs',
)
# Make jets from the particles not used to build the best zed.
# Here the event is forced into 2 jets to target ZH, H->b bbar)
# help(JetClusterizer) for more information
from heppy.analyzers.fcc.JetClusterizer import JetClusterizer
jets = cfg.Analyzer(
JetClusterizer,
output = 'jets',
particles = 'particles_not_zed',
fastjet_args = dict( njets = 2 ),
njets_required=False
)
if jet_correction:
from heppy.analyzers.JetEnergyCorrector import JetEnergyCorrector
jets_cor = cfg.Analyzer(
JetEnergyCorrector,
input_jets='jets',
detector=detector
)
jets = cfg.Sequence(jets, jets_cor)
from fcc_ee_higgs.analyzers.ZHnunubbJetRescaler import ZHnunubbJetRescaler
jet_rescaling = cfg.Analyzer(
ZHnunubbJetRescaler,
output='jets_rescaled',
jets='jets',
)
# b tagging
from heppy.test.btag_parametrized_cfg import btag_parametrized, btag
from heppy.analyzers.roc import cms_roc
btag.roc = cms_roc
def is_bjet(jet):
return jet.tags['b'] == 1
bjets = cfg.Analyzer(
Selector,
'bjets',
output = 'bjets',
input_objects = 'jets',
# filter_func=is_bjet,
filter_func = lambda jet: jet.tags['b'] == 1
)
onebjet = cfg.Analyzer(
EventFilter ,
'onebjet',
input_objects = 'bjets',
min_number = 1,
veto = False
)
# Build Higgs candidates from pairs of jets.
higgses_rescaled = cfg.Analyzer(
ResonanceBuilder,
output = 'higgses_rescaled',
leg_collection = 'jets_rescaled',
pdgid = 25
)
higgses = cfg.Analyzer(
ResonanceBuilder,
output = 'higgses',
leg_collection = 'jets',
pdgid = 25
)
# Just a basic analysis-specific event Selection module.
# this module implements a cut-flow counter
# After running the example as
# heppy_loop.py Trash/ analysis_ee_ZH_cfg.py -f -N 100
# this counter can be found in:
# Trash/example/heppy.analyzers.examples.zh.selection.Selection_cuts/cut_flow.txt
# Counter cut_flow :
# All events 100 1.00 1.0000
# At least 2 leptons 87 0.87 0.8700
# Both leptons e>30 79 0.91 0.7900
# For more information, check the code of the Selection class
# in heppy/analyzers/examples/zh/selection.py
from heppy.analyzers.examples.zh.selection import Selection
selection = cfg.Analyzer(
Selection,
instance_label='cuts'
)
# Analysis-specific ntuple producer
# please have a look at the code of the ZHTreeProducer class,
# in heppy/analyzers/examples/zh/ZHTreeProducer.py
from fcc_ee_higgs.analyzers.ZHTreeProducer import ZHTreeProducer
tree = cfg.Analyzer(
ZHTreeProducer,
jet_collections = ['jets'],
resonances=['higgses', 'zeds'],
misenergy = ['missing_energy'],
recoil='recoil'
)
from heppy.analyzers.PDebugger import PDebugger
pdebug = cfg.Analyzer(
PDebugger,
output_to_stdout = False, #optional
debug_filename = os.getcwd()+'/python_physics_debug.log' #optional argument
)
# definition of a sequence of analyzers,
# the analyzers will process each event in this order
sequence = cfg.Sequence(
source,
gen_leptons,
gen_counter,
papas_sequence,
leptons,
iso_leptons,
sel_iso_leptons,
zeds,
zed_counter,
recoil,
particles_not_zed,
jets,
missing_energy,
# jet_rescaling,
btag_parametrized,
bjets,
# onebjet,
# missing_energy_rescaled,
higgses,
# higgses_rescaled,
# selection,
tree,
# display
)
# Specifics to read FCC events
from ROOT import gSystem
gSystem.Load("libdatamodelDict")
from EventStore import EventStore as Events
config = cfg.Config(
components = selectedComponents,
sequence = sequence,
services = [],
events_class = Events
)
|
[
"[email protected]"
] | |
948cdbd33fbfdf2a1feb1b18a771048e4de9a817
|
e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f
|
/indices/nnswiftest.py
|
f328f0897e70191363f2933df675b46faa2b20d7
|
[] |
no_license
|
psdh/WhatsintheVector
|
e8aabacc054a88b4cb25303548980af9a10c12a8
|
a24168d068d9c69dc7a0fd13f606c080ae82e2a6
|
refs/heads/master
| 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 |
Python
|
UTF-8
|
Python
| false | false | 391 |
py
|
ii = [('RogePAV.py', 3), ('RennJIT.py', 1), ('KembFJ1.py', 1), ('CarlTFR.py', 3), ('AinsWRR3.py', 1), ('BailJD1.py', 1), ('RoscTTI2.py', 1), ('CoolWHM.py', 1), ('LandWPA.py', 1), ('BuckWGM.py', 2), ('CoopJBT.py', 1), ('KirbWPW2.py', 1), ('KembFJ2.py', 1), ('LewiMJW.py', 1), ('AinsWRR2.py', 1), ('BrewDTO.py', 1), ('FitzRNS2.py', 2), ('NortSTC.py', 1), ('BowrJMM2.py', 1), ('BeckWRE.py', 1)]
|
[
"[email protected]"
] | |
ffe101fce315842b655c25a42ead6035be8f11af
|
9b422078f4ae22fe16610f2ebc54b8c7d905ccad
|
/xlsxwriter/test/comparison/test_table26.py
|
b49983db23e74cea0ced01081727ac1e0965388c
|
[
"BSD-2-Clause-Views"
] |
permissive
|
projectsmahendra/XlsxWriter
|
73d8c73ea648a911deea63cb46b9069fb4116b60
|
9b9d6fb283c89af8b6c89ad20f72b8208c2aeb45
|
refs/heads/master
| 2023-07-21T19:40:41.103336 | 2023-07-08T16:54:37 | 2023-07-08T16:54:37 | 353,636,960 | 0 | 0 |
NOASSERTION
| 2021-04-01T08:57:21 | 2021-04-01T08:57:20 | null |
UTF-8
|
Python
| false | false | 1,139 |
py
|
###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2021, John McNamara, [email protected]
#
from ..excel_comparison_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('table26.xlsx')
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with tables."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.set_column('C:D', 10.288)
worksheet.set_column('F:G', 10.288)
worksheet.add_table('C2:D3')
worksheet.add_table('F3:G3', {'header_row': 0})
# These tables should be ignored since the ranges are incorrect.
import warnings
warnings.filterwarnings('ignore')
worksheet.add_table('I2:J2')
worksheet.add_table('L3:M3', {'header_row': 1})
workbook.close()
self.assertExcelEqual()
|
[
"[email protected]"
] | |
7e15463225ac1ec4cf055441229ae9fc9583bf7c
|
19a32440205b2caeec67c73c10d917b5fb30a86a
|
/isi_sdk/models/groups_group_members.py
|
7588806de998c9e5618d140042d2af2c4dbe5e33
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
marrotte/isilon_sdk_python
|
480e84312f5924a506aeb09c9c7cae79a2b9b7f4
|
91039da803ae37ed4abf8d2a3f59c333f3ef1866
|
refs/heads/master
| 2020-03-23T07:31:40.376316 | 2016-06-07T23:44:31 | 2016-06-07T23:44:31 | 141,277,076 | 1 | 0 |
MIT
| 2018-07-17T11:02:08 | 2018-07-17T11:02:08 | null |
UTF-8
|
Python
| false | false | 3,949 |
py
|
# coding: utf-8
"""
Copyright 2015 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
from pprint import pformat
from six import iteritems
class GroupsGroupMembers(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
GroupsGroupMembers - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'members': 'list[GroupsGroupMember]',
'resume': 'str'
}
self.attribute_map = {
'members': 'members',
'resume': 'resume'
}
self._members = None
self._resume = None
@property
def members(self):
"""
Gets the members of this GroupsGroupMembers.
:return: The members of this GroupsGroupMembers.
:rtype: list[GroupsGroupMember]
"""
return self._members
@members.setter
def members(self, members):
"""
Sets the members of this GroupsGroupMembers.
:param members: The members of this GroupsGroupMembers.
:type: list[GroupsGroupMember]
"""
self._members = members
@property
def resume(self):
"""
Gets the resume of this GroupsGroupMembers.
Continue returning results from previous call using this token (token should come from the previous call, resume cannot be used with other options).
:return: The resume of this GroupsGroupMembers.
:rtype: str
"""
return self._resume
@resume.setter
def resume(self, resume):
"""
Sets the resume of this GroupsGroupMembers.
Continue returning results from previous call using this token (token should come from the previous call, resume cannot be used with other options).
:param resume: The resume of this GroupsGroupMembers.
:type: str
"""
self._resume = resume
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
[
"[email protected]"
] | |
81400f6a60d5ce716e6bfe14b26e3e9e580e1531
|
2293c76c3d18e2fcd44ded90bd40113d26285663
|
/pyeccodes/defs/grib2/tables/4/4_2_0_1_table.py
|
df23e0fdf6df61f8139bc1cf8b1185d7b5dd5ad9
|
[
"Apache-2.0"
] |
permissive
|
ecmwf/pyeccodes
|
b1f121dbddf68d176a03805ed5144ba0b37ac211
|
dce2c72d3adcc0cb801731366be53327ce13a00b
|
refs/heads/master
| 2022-04-23T10:37:40.524078 | 2020-04-18T06:30:29 | 2020-04-18T06:30:29 | 255,554,540 | 9 | 3 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,520 |
py
|
def load(h):
return ({'abbr': 0, 'code': 0, 'title': 'Specific humidity', 'units': 'kg kg-1'},
{'abbr': 1, 'code': 1, 'title': 'Relative humidity', 'units': '%'},
{'abbr': 2, 'code': 2, 'title': 'Humidity mixing ratio', 'units': 'kg kg-1'},
{'abbr': 3, 'code': 3, 'title': 'Precipitable water', 'units': 'kg m-2'},
{'abbr': 4, 'code': 4, 'title': 'Vapor pressure', 'units': 'Pa'},
{'abbr': 5, 'code': 5, 'title': 'Saturation deficit', 'units': 'Pa'},
{'abbr': 6, 'code': 6, 'title': 'Evaporation', 'units': 'kg m-2'},
{'abbr': 7, 'code': 7, 'title': 'Precipitation rate', 'units': 'kg m-2 s-1'},
{'abbr': 8, 'code': 8, 'title': 'Total precipitation', 'units': 'kg m-2'},
{'abbr': 9,
'code': 9,
'title': 'Large scale precipitation (non-convective)',
'units': 'kg m-2'},
{'abbr': 10,
'code': 10,
'title': 'Convective precipitation',
'units': 'kg m-2'},
{'abbr': 11, 'code': 11, 'title': 'Snow depth', 'units': 'm'},
{'abbr': 12,
'code': 12,
'title': 'Snowfall rate water equivalent',
'units': 'kg m-2 s-1'},
{'abbr': 13,
'code': 13,
'title': 'Water equivalent of accumulated snow depth',
'units': 'kg m-2'},
{'abbr': 14, 'code': 14, 'title': 'Convective snow', 'units': 'kg m-2'},
{'abbr': 15, 'code': 15, 'title': 'Large scale snow', 'units': 'kg m-2'},
{'abbr': 16, 'code': 16, 'title': 'Snow melt', 'units': 'kg m-2'},
{'abbr': 17, 'code': 17, 'title': 'Snow age', 'units': 'day'},
{'abbr': 18, 'code': 18, 'title': 'Absolute humidity', 'units': 'kg m-3'},
{'abbr': 19,
'code': 19,
'title': 'Precipitation type',
'units': 'Code table 4.201'},
{'abbr': 20,
'code': 20,
'title': 'Integrated liquid water',
'units': 'kg m-2'},
{'abbr': 21, 'code': 21, 'title': 'Condensate', 'units': 'kg kg-1'},
{'abbr': 22, 'code': 22, 'title': 'Cloud mixing ratio', 'units': 'kg kg-1'},
{'abbr': 23,
'code': 23,
'title': 'Ice water mixing ratio',
'units': 'kg kg-1'},
{'abbr': 24, 'code': 24, 'title': 'Rain mixing ratio', 'units': 'kg kg-1'},
{'abbr': 25, 'code': 25, 'title': 'Snow mixing ratio', 'units': 'kg kg-1'},
{'abbr': 26,
'code': 26,
'title': 'Horizontal moisture convergence',
'units': 'kg kg-1 s-1'},
{'abbr': 27, 'code': 27, 'title': 'Maximum relative humidity', 'units': '%'},
{'abbr': 28,
'code': 28,
'title': 'Maximum absolute humidity',
'units': 'kg m-3'},
{'abbr': 29, 'code': 29, 'title': 'Total snowfall', 'units': 'm'},
{'abbr': 30,
'code': 30,
'title': 'Precipitable water category',
'units': 'Code table 4.202'},
{'abbr': 31, 'code': 31, 'title': 'Hail', 'units': 'm'},
{'abbr': 32,
'code': 32,
'title': 'Graupel (snow pellets)',
'units': 'kg kg-1'},
{'abbr': 33,
'code': 33,
'title': 'Categorical rain',
'units': 'Code table 4.222'},
{'abbr': 34,
'code': 34,
'title': 'Categorical freezing rain',
'units': 'Code table 4.222'},
{'abbr': 35,
'code': 35,
'title': 'Categorical ice pellets',
'units': 'Code table 4.222'},
{'abbr': 36,
'code': 36,
'title': 'Categorical snow',
'units': 'Code table 4.222'},
{'abbr': 37,
'code': 37,
'title': 'Convective precipitation rate',
'units': 'kg m-2 s-1'},
{'abbr': 38,
'code': 38,
'title': 'Horizontal moisture divergence',
'units': 'kg kg-1 s-1'},
{'abbr': 39,
'code': 39,
'title': 'Percent frozen precipitation',
'units': '%'},
{'abbr': 40, 'code': 40, 'title': 'Potential evaporation', 'units': 'kg m-2'},
{'abbr': 41,
'code': 41,
'title': 'Potential evaporation rate',
'units': 'W m-2'},
{'abbr': 42, 'code': 42, 'title': 'Snow cover', 'units': '%'},
{'abbr': 43,
'code': 43,
'title': 'Rain fraction of total cloud water',
'units': 'Proportion'},
{'abbr': 44, 'code': 44, 'title': 'Rime factor', 'units': 'Numeric'},
{'abbr': 45,
'code': 45,
'title': 'Total column integrated rain',
'units': 'kg m-2'},
{'abbr': 46,
'code': 46,
'title': 'Total column integrated snow',
'units': 'kg m-2'},
{'abbr': 51, 'code': 51, 'title': 'Total column water', 'units': 'kg m-2'},
{'abbr': 52,
'code': 52,
'title': 'Total precipitation rate',
'units': 'kg m-2 s-1'},
{'abbr': 53,
'code': 53,
'title': 'Total snowfall rate water equivalent',
'units': 'kg m-2 s-1'},
{'abbr': 54,
'code': 54,
'title': 'Large scale precipitation rate',
'units': 'kg m-2 s-1'},
{'abbr': 55,
'code': 55,
'title': 'Convective snowfall rate water equivalent',
'units': 'kg m-2 s-1'},
{'abbr': 56,
'code': 56,
'title': 'Large scale rate water equivalent',
'units': 'kg m-2 s-1'},
{'abbr': 57, 'code': 57, 'title': 'Total snowfall rate', 'units': 'm s-1'},
{'abbr': 58,
'code': 58,
'title': 'Convective snowfall rate',
'units': 'm s-1'},
{'abbr': 59,
'code': 59,
'title': 'Large scale snowfall rate',
'units': 'm s-1'},
{'abbr': 60,
'code': 60,
'title': 'Snow depth water equivalent',
'units': 'kg m-2'},
{'abbr': None, 'code': 255, 'title': 'Missing'})
|
[
"[email protected]"
] | |
08e6f1e1db4df41d78df96ea52bdc0967f4d8f87
|
9b64f0f04707a3a18968fd8f8a3ace718cd597bc
|
/huaweicloud-sdk-elb/huaweicloudsdkelb/v2/model/session_persistence.py
|
60f23e36214ae6ab55ee3ba9361efc5946182bfb
|
[
"Apache-2.0"
] |
permissive
|
jaminGH/huaweicloud-sdk-python-v3
|
eeecb3fb0f3396a475995df36d17095038615fba
|
83ee0e4543c6b74eb0898079c3d8dd1c52c3e16b
|
refs/heads/master
| 2023-06-18T11:49:13.958677 | 2021-07-16T07:57:47 | 2021-07-16T07:57:47 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,074 |
py
|
# coding: utf-8
import re
import six
class SessionPersistence:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'type': 'str',
'cookie_name': 'str',
'persistence_timeout': 'int'
}
attribute_map = {
'type': 'type',
'cookie_name': 'cookie_name',
'persistence_timeout': 'persistence_timeout'
}
def __init__(self, type=None, cookie_name=None, persistence_timeout=None):
"""SessionPersistence - a model defined in huaweicloud sdk"""
self._type = None
self._cookie_name = None
self._persistence_timeout = None
self.discriminator = None
self.type = type
if cookie_name is not None:
self.cookie_name = cookie_name
if persistence_timeout is not None:
self.persistence_timeout = persistence_timeout
@property
def type(self):
"""Gets the type of this SessionPersistence.
会话保持的类型。SOURCE_IP:根据请求的源IP,将同一IP的请求发送到同一个后端云服务器上。HTTP_COOKIE:客户端第一次发送请求时,负载均衡器自动生成cookie并将该cookie插入响应消息中,后续请求会发送到处理第一个请求的后端云服务器上。APP_COOKIE:客户端第一次发送请求时,后端服务器生成cookie并将该cookie插入响应消息中,后续请求会发送到处理第一个请求的后端云服务器上。当后端云服务器的protocol为TCP时,只按SOURCE_IP生效当后端云服务器的protocol为HTTP时,只按HTTP_COOKIE或APP_COOKIE生效
:return: The type of this SessionPersistence.
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this SessionPersistence.
会话保持的类型。SOURCE_IP:根据请求的源IP,将同一IP的请求发送到同一个后端云服务器上。HTTP_COOKIE:客户端第一次发送请求时,负载均衡器自动生成cookie并将该cookie插入响应消息中,后续请求会发送到处理第一个请求的后端云服务器上。APP_COOKIE:客户端第一次发送请求时,后端服务器生成cookie并将该cookie插入响应消息中,后续请求会发送到处理第一个请求的后端云服务器上。当后端云服务器的protocol为TCP时,只按SOURCE_IP生效当后端云服务器的protocol为HTTP时,只按HTTP_COOKIE或APP_COOKIE生效
:param type: The type of this SessionPersistence.
:type: str
"""
self._type = type
@property
def cookie_name(self):
"""Gets the cookie_name of this SessionPersistence.
cookie的名称。只有当会话保持的类型是APP_COOKIE时可以指定。
:return: The cookie_name of this SessionPersistence.
:rtype: str
"""
return self._cookie_name
@cookie_name.setter
def cookie_name(self, cookie_name):
"""Sets the cookie_name of this SessionPersistence.
cookie的名称。只有当会话保持的类型是APP_COOKIE时可以指定。
:param cookie_name: The cookie_name of this SessionPersistence.
:type: str
"""
self._cookie_name = cookie_name
@property
def persistence_timeout(self):
"""Gets the persistence_timeout of this SessionPersistence.
会话保持的超时时间。取值范围:[1,60](分钟):当后端云服务器的protocol为TCP、UDP时[1,1440](分钟):当后端云服务器的protocol为HTTP时。当type为APP_COOKIE时该字段不生效。
:return: The persistence_timeout of this SessionPersistence.
:rtype: int
"""
return self._persistence_timeout
@persistence_timeout.setter
def persistence_timeout(self, persistence_timeout):
"""Sets the persistence_timeout of this SessionPersistence.
会话保持的超时时间。取值范围:[1,60](分钟):当后端云服务器的protocol为TCP、UDP时[1,1440](分钟):当后端云服务器的protocol为HTTP时。当type为APP_COOKIE时该字段不生效。
:param persistence_timeout: The persistence_timeout of this SessionPersistence.
:type: int
"""
self._persistence_timeout = persistence_timeout
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
import simplejson as json
return json.dumps(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, SessionPersistence):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"[email protected]"
] | |
1143ef9ee7a22fa72650167e37cbfb456f031bf6
|
f7c5e3f5834206a7b0d1dadd773d1de032f731e7
|
/dmerce2/DMS/DGP/cipher.py
|
e22f959d5edf51dd21a61bfffe4001a9e3efd429
|
[] |
no_license
|
rbe/dmerce
|
93d601462c50dfbbf62b577803ae697d3abde333
|
3cfcae894c165189cc3ff61e27ca284f09e87871
|
refs/heads/master
| 2021-01-01T17:06:27.872197 | 2012-05-04T07:22:26 | 2012-05-04T07:22:26 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,710 |
py
|
import mainlib
class Cipher:
"the base class for all blockcipher algorithms"
def __init__(self, blocklen, wordlen, rounds, times):
self.__blocklen = blocklen
self.__wordlen = wordlen
self.__rounds = rounds
self.__wpb = self.__blocklen / self.__wordlen
self.__times = times
def NewData(self, data):
x = mainlib.String_Mince(mainlib.String_Chop(data, self.__blocklen), self.__wpb)
for i in range(0, len(x)):
for j in range(0, len(x[i])):
x[i][j] = mainlib.S2L_Convert(x[i][j])
self.__data = x
def Fillup(self, data):
return mainlib.String_Fillup(data, self.__blocklen)
def Filldown(self, data):
return mainlib.String_Filldown(data)
def OutFormat(self, output):
x = ''
for i in range(0, len(output)):
for j in range(0, len(output[i])):
x = x + mainlib.L2S_Convert(output[i][j], self.__wordlen)
return x
# Key Schedule
def E_Key_Create(self, list):
k = []
k.append(self.KeyExpand(list[0]))
for i in range(1, self.__times, 2):
k.append(self.KeyInvert(self.KeyExpand(list[i])))
k.append(self.KeyExpand(list[i+1]))
self.__ekey = k
def D_Key_Create(self, list):
list.reverse()
k = []
k.append(self.KeyInvert(self.KeyExpand(list[0])))
for i in range(1, self.__times, 2):
k.append(self.KeyExpand(list[i]))
k.append(self.KeyInvert(self.KeyExpand(list[i+1])))
self.__dkey = k
def IVinit(self, data):
y = []
data = mainlib.L2S_Convert(data, self.__blocklen)
for i in range(0, self.__wpb):
x = mainlib.String_Chop(data, self.__wordlen)
for j in range(0, len(x)):
x[j] = mainlib.S2L_Convert(x[j])
y.append(x)
y = y[0]
self.__iv = y
# ECB
def ECB_Encrypt(self):
"Electronic Codebook mode encryption"
output = []
for i in range(0, len(self.__data)):
x = self.Encrypt(self.__data[i], self.__ekey[0])
for j in range(1, self.__times, 2):
x = self.Decrypt(x, self.__ekey[j])
x = self.Encrypt(x, self.__ekey[j+1])
output.append(x)
return output
def ECB_Decrypt(self):
"Electronic Codebook mode decryption"
output = []
for i in range(0, len(self.__data)):
x = self.Decrypt(self.__data[i], self.__dkey[0])
for j in range(1, self.__times, 2):
x = self.Encrypt(x, self.__dkey[j])
x = self.Decrypt(x, self.__dkey[j+1])
output.append(x)
return output
# CBC
def CBC_Encrypt(self):
"Cipher Block Chaining mode encryption"
output = []
output.append(self.__iv)
for i in range(0, len(self.__data)):
x = []
for l in range(0, self.__wpb):
x.append(self.__data[i][l] ^ output[i][l])
x = self.Encrypt(x, self.__ekey[0])
for j in range(1, self.__times, 2):
x = self.Decrypt(x, self.__ekey[j])
x = self.Encrypt(x, self.__ekey[j+1])
output.append(x)
output[0:1] = []
return output
def CBC_Decrypt(self):
"Cipher Block Chaining mode decryption"
output = []
date = self.__data[:]
date.insert(0, self.__iv)
for i in range(0, len(self.__data)):
x = self.__data[i][:]
x = self.Decrypt(x, self.__dkey[0])
for j in range(1, self.__times, 2):
x = self.Encrypt(x, self.__dkey[j])
x = self.Decrypt(x, self.__dkey[j+1])
for l in range(0, self.__wpb):
x[l] = x[l] ^ date[i][l]
output.append(x)
return output
# CFB
def CFBE(self, data, key, iv):
pass
def CFBD(self, data, key, iv):
pass
# OFB
def OFBE(self, data, key, iv):
pass
def OFBD(self, data, key, iv):
pass
# API
def Encipher(self, data, key):
self.E_Key_Create(key)
data = self.Fillup(data)
self.NewData(data)
c = self.ECB_Encrypt()
return self.OutFormat(c)
def Decipher(self, data, key):
self.D_Key_Create(key)
self.NewData(data)
p = self.ECB_Decrypt()
p = self.OutFormat(p)
p = self.Filldown(p)
return p
# Counter
# BC
# PCBC
# CBCC
# OFBNLF
# PBC
# PFB
# CBCPD
|
[
"[email protected]"
] | |
e1323e36e8c66e4c7d890636ed43c37dfb9b447c
|
9059d9cbad4188ed2980f551151b9678ffb68b44
|
/mycode/13_exception/user_exception.py
|
c1e32cf9426a407991ac8f3a3d2e38c4390e01b1
|
[] |
no_license
|
mhee4321/python_basic
|
ad0e64fa21ecfab231a6627ba6abeea82d725690
|
86031975a9121efe5785e83f663255a7b4e4ba77
|
refs/heads/master
| 2023-02-11T20:31:54.353219 | 2021-01-07T05:44:31 | 2021-01-07T05:44:31 | 326,850,491 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 430 |
py
|
# 사용자 정의 예외 클래스 선언
class NegativePriceException(Exception):
# constructor 선언
def __int__(self):
print("Price can't be Negative")
raise AttributeError
def calc_price(value):
price = value * 100
if price < 0:
# NegativePriceException를 강제로 발생시킨다.
raise NegativePriceException
return price
print(calc_price(10))
print(calc_price(-10))
|
[
"[email protected]"
] | |
9e050f1bf998c8bf9bdb136685e56d99f8240ab7
|
8d910dcc2d15f1b21de02b9b46aee23c3f1df4cc
|
/004-median-of-two-sorted-arrays.py
|
ee0f710d58279c8e8e36102961dcef78cf9425df
|
[] |
no_license
|
MonikaBhasin7/leetcode-1
|
a7fa6115a7d3b604f8a1abb311d4cdc43182edef
|
77ff978aa259bd7e5da0d70037503c83e8cc78cc
|
refs/heads/master
| 2020-08-08T06:39:48.724618 | 2019-10-08T21:48:02 | 2019-10-08T21:48:02 | 213,760,531 | 0 | 0 | null | 2019-10-08T21:41:21 | 2019-10-08T21:41:20 | null |
UTF-8
|
Python
| false | false | 1,816 |
py
|
"""
Problem Link: https://leetcode.com/problems/median-of-two-sorted-arrays/
There are two sorted arrays nums1 and nums2 of size m and n respectively.
Find the median of the two sorted arrays. The overall run time complexity should be O(log (m+n)).
You may assume nums1 and nums2 cannot be both empty.
Example 1:
nums1 = [1, 3]
nums2 = [2]
The median is 2.0
Example 2:
nums1 = [1, 2]
nums2 = [3, 4]
The median is (2 + 3)/2 = 2.5
"""
class Solution:
def findMedianSortedArrays(self, nums1, nums2):
"""
:type nums1: List[int]
:type nums2: List[int]
:rtype: float
"""
if len(nums1) > len(nums2):
return self.findMedianSortedArrays(nums2,nums1)
x,y = len(nums1),len(nums2)
print(x,y)
low, high = 0,x
while low <= high:
partitionX = (low+high)//2
partitionY = ((x+y+1)//2) - partitionX
if partitionX < x and nums2[partitionY-1] > nums1[partitionX]:
low = partitionX + 1
elif partitionX > 0 and nums1[partitionX-1] > nums2[partitionY]:
high = partitionX - 1
else:
if partitionX == 0:
maxLeft = nums2[partitionY-1]
elif partitionY == 0:
maxLeft = nums1[partitionX-1]
else:
maxLeft = max(nums1[partitionX-1],nums2[partitionY-1])
if (x+y) % 2 == 1:
return maxLeft
if partitionX == x:
minRight = nums2[partitionY]
elif partitionY == y:
minRight = nums1[partitionX]
else:
minRight = min(nums1[partitionX],nums2[partitionY])
return (maxLeft+minRight)/2
|
[
"[email protected]"
] | |
a85b8eda787f00e981f98efa8c36f34310fba38b
|
8370083dbbbd32740ad1862637809396dc7984e2
|
/paresh75/a1.py
|
ecd7b956c3d295ccc274e1b1a60f1a7febd2eb21
|
[] |
no_license
|
parshuramsail/PYTHON_LEARN
|
a919b14aab823e0f5e769d8936ddbfb357133db2
|
8c76720bf73f13cf96930e6d4d5128e6ba9aa535
|
refs/heads/main
| 2023-07-14T16:25:26.240555 | 2021-08-29T17:10:19 | 2021-08-29T17:10:19 | 401,095,644 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 573 |
py
|
# READ
#f=open("ram.txt","rt")
#content=f.read(3) # read only 3 characters
#print(content)
#content=f.read(344)
#print(content)
#f.close()
#f=open("ram.txt","rt")
#content=f.read()
## print(line,end="")
#f.close()
#RADLINE()
#f=open("ram.txt","rt")
#print(f.readline()) # to print one line
#print(f.readline())#to print next line
#print(f.readline())#to print next line
#f.close()
#READINES()
# TO STORE LINES IN LIST
#f=open("ram.txt","rt")
##print(f.readlines())
#f.close()
#OUTPUT:
#['ram is good boy\n', 'ram is king of this universe\n', 'ram is very smart\n']
|
[
"[email protected]"
] | |
341f476266c35eab353587eb301e389470a302a9
|
0fdb402809188c34702bc70e4d106e56ca8e2bd0
|
/Algorithms/tkinter.py
|
68887798977aa716073634bbb8f0b59f3dc37a4a
|
[] |
no_license
|
the07/Python
|
356f2018a85caeb9dd6ccb251636ff697eb613b6
|
af34cf3ffe01504632cf3654a0a5f89653e163cb
|
refs/heads/master
| 2021-01-06T20:36:33.718087 | 2017-11-24T06:58:32 | 2017-11-24T06:58:32 | 90,789,881 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,347 |
py
|
from tkinter import *
from PIL import Image, ImageTk
class Window(Frame):
def __init__(self, master = None):
Frame.__init__(self, master)
self.master = master
self.init_window()
def init_window(self):
self.master.title("GUI")
self.pack(fill=BOTH, expand=1)
#quitButton = Button(self, text="X",command=self.client_exit)
#quitButton.place(x=0,y=0)
menu = Menu(self.master)
self.master.config(menu=menu)
file = Menu(menu)
file.add_command(label='Exit', command=self.client_exit)
file.add_command(label='Save', command=self.client_exit)
menu.add_cascade(label='File', menu=file)
edit = Menu(menu)
edit.add_command(label='Show Image', command=self.showImg)
edit.add_command(label='Show Text', command=self.showTxt)
menu.add_cascade(label='Edit', menu=edit)
def client_exit(self):
exit()
def showImg(self):
load=Image.open('pic.jpg')
render = ImageTk.PhotoImage(load)
img = Label(self, image=render)
img.image = render
img.place(x=10,y=10)
def showTxt(self):
text = Label(self, text='Hey there good looking')
text.pack()
root = Tk()
root.geometry("400x300")
app = Window(root)
root.mainloop()
|
[
"[email protected]"
] | |
1a16098041fbca03cf063a8d634ba651c06669a2
|
1c83920efda583d0dcedda2ac9d91235094685e2
|
/web/appauth/constants.py
|
c9214cd0195bd4ac937081777d0947385459c7d5
|
[] |
no_license
|
eshandas/django_project_template
|
d866d2d8c5e206b0430e6130bc470042af50b7fa
|
09786f6201d8e83199a2c0b7a83b6b6b0c8fd285
|
refs/heads/master
| 2022-07-22T14:39:50.521081 | 2019-08-06T11:00:19 | 2019-08-06T11:00:19 | 65,455,207 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 981 |
py
|
class ResponseKeys(object):
SESSION_ID = 'sessionId'
USER = 'user'
class SuccessMessages(object):
PASSWORD_RESET = 'An email has been sent to change the password'
class FailMessages(object):
USER_INACTIVE = 'This user is not active'
INVALID_CREDENTIALS = 'Wrong username or password'
INVALID_EMAIL = 'This email does not exist'
INVALID_PASSWORD = 'Invalid password'
USER_ALREADY_EXISTS = 'This user already exists'
INVALID_SESSION_ID = 'Invalid Session Id'
TOKEN_MISSING = 'Token missing'
INVALID_TOKEN = 'Invalid token'
NOT_ADMIN = 'User is not an admin'
AUTH_HEADER_INVALID = 'Invalid Authorization'
INVALID_VALUE = 'Invalid merchant id or app id or secret'
class RequestKeys(object):
EMAIL = 'email'
PASSWORD = 'password'
CONFIRM_PASSWORD = 'confirm_password'
NEXT = 'next'
TOKEN = 'token'
class ResponseKeys(object):
SESSION_ID = 'sessionId'
USER = 'user'
LOGGED_OUT = 'loggedOut'
|
[
"[email protected]"
] | |
16a12bf01a5c8f66c081745f20fe8d9e7257cbfc
|
2ff7e53d5e512cd762217ca54317982e07a2bb0c
|
/eve-8.51.857815/trinutils/bindings.py
|
e5220ff7fcd107047c5e0b787445c9b1c58c06ab
|
[] |
no_license
|
nanxijw/Clara-Pretty-One-Dick
|
66d3d69426642b79e8fd4cc8e0bec23adeeca6d6
|
50de3488a2140343c364efc2615cf6e67f152be0
|
refs/heads/master
| 2021-01-19T09:25:07.555284 | 2015-02-17T21:49:33 | 2015-02-17T21:49:33 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,701 |
py
|
#Embedded file name: trinutils\bindings.py
"""
Functions for dealing with bad value bindings on objects.
"""
import logging
import osutils.subst as subst
import trinity
logger = logging.getLogger(__name__)
def HasBrokenBindings(trinObj):
bindings = trinObj.Find('trinity.TriValueBinding')
for binding in bindings:
if not binding.destinationObject or not binding.sourceObject:
return True
if binding.destinationObject.GetRefCounts()[1] == 2:
return True
return False
def FixBrokenBindings(trinObj):
curveSets = trinObj.Find('trinity.TriCurveSet')
allBindings = trinObj.Find('trinity.TriValueBinding')
deleteCs = []
knownUsedCurves = []
deleteBinds = []
for cs in curveSets:
for binding in cs.bindings:
if not binding.destinationObject or not binding.sourceObject:
deleteBinds.append(binding)
elif binding.destinationObject.GetRefCounts()[1] == 2:
deleteBinds.append(binding)
else:
knownUsedCurves.append(binding.sourceObject)
for d in deleteBinds:
logger.info('Deleting binding: %s' % d.name)
cs.bindings.remove(d)
for cs in curveSets:
deleteCurves = []
for curve in cs.curves:
if curve not in knownUsedCurves:
usedElsewhere = False
for b in allBindings:
if b.sourceObject == curve and b not in deleteBinds:
usedElsewhere = True
logger.info('Curve found being used outside its curveset: %s' % curve.name)
break
if not usedElsewhere:
deleteCurves.append(curve)
for d in deleteCurves:
logger.info('Deleting curve: %s' % d.name)
cs.curves.remove(d)
for cs in curveSets:
if not cs.curves and not cs.bindings:
deleteCs.append(cs)
for d in deleteCs:
if hasattr(trinObj, 'curveSets'):
for cs in trinObj.curveSets:
if d == cs:
logger.info('Deleting curve set: %s' % d.name)
trinObj.curveSets.remove(d)
continue
return trinObj
def RepairFile(filePath):
filePath = subst.GetUnsubstedPath(filePath)
logger.info('==== File:%s====' % filePath)
original = trinity.Load(filePath)
if original:
if HasBrokenBindings(original):
logger.info('Broken bindings found!')
new = FixBrokenBindings(original)
trinity.Save(new, filePath)
else:
logger.info('No broken bindings found!')
|
[
"[email protected]"
] | |
1fb7deb6e862121c82bd11e1f35fb92ae1ba4494
|
4e5b112b32cc2eeffb39f7111122d0df13da4117
|
/Cap 9/Ex9.7.py
|
00af0098ccf8d32da4be756c75dd5d51b9252471
|
[
"MIT"
] |
permissive
|
FelipeDreissig/PenseEmPy
|
13c194f307a8ade747872efb1f4e50848f3c71a3
|
158a55d0e6bd06c8eadaa9159e816a1e4beb0ff7
|
refs/heads/main
| 2023-02-10T13:30:41.254544 | 2020-12-28T13:05:05 | 2020-12-28T13:05:05 | 324,915,293 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 448 |
py
|
# exercício 9.7
def repeat():
caminho = open(r'C:\Users\dreis\Desktop\Estudos\Projetos\words.txt', 'r')
for palavras in caminho:
if len(palavras) > 6:
for i in range(0, len(palavras) - 6):
if palavras[i] == palavras[i + 1]:
if palavras[i + 2] == palavras[i + 3]:
if palavras[i + 4] == palavras[i + 5]:
print(palavras)
repeat()
|
[
"[email protected]"
] | |
3557efa72c93cd24fed3afd99d4cc0064394697e
|
ad518c153efb6ced9744d1df7e8bbd91820c5048
|
/fm-api/fm_api_test.py
|
e748dfdb31e5010f99906f208c4fd8e5ac7fe0a6
|
[
"Apache-2.0"
] |
permissive
|
starlingx-staging/x.stx-fault
|
eccefc69c5015872da26869a07efd36464a1ae5d
|
6cd8940170c1799f9aa2fd05a38b84de0e7d87b3
|
refs/heads/master
| 2020-03-19T02:03:52.136983 | 2018-05-30T23:16:06 | 2018-05-31T14:36:00 | 135,595,467 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,450 |
py
|
# -*- encoding: utf-8 -*-
#
# Copyright (c) 2014 Wind River Systems, Inc.
#
# Author:
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
from fm_api import *
from fm_api import constants
def print_alarm(alarm):
alarm_str = "alarm_id: " + alarm.alarm_id + ", " + "uuid: " + alarm.uuid + ", "
alarm_str += "alarm_type: " + alarm.alarm_type + "\n"
alarm_str += "state: " + alarm.alarm_state + ", ""severity: " + alarm.severity + ", " \
+ "entity_type_id: " + alarm.entity_type_id + ", timestamp: "+ alarm.timestamp + "\n"
alarm_str += "entity_instance_id: " + alarm.entity_instance_id + ", "
alarm_str += "probable cause:" + alarm.probable_cause + "\n"
print alarm_str
def create():
ser = FaultAPIs()
fault = Fault(alarm_id=constants.FM_ALARM_ID_VM_RESCUED,
alarm_state=constants.FM_ALARM_STATE_SET,
entity_type_id=constants.FM_ENTITY_TYPE_INSTANCE,
entity_instance_id=constants.FM_ENTITY_TYPE_INSTANCE + '=' + 'a4e4cdb7-2ee6-4818-84c8-5310fcd67b5d',
severity = constants.FM_ALARM_SEVERITY_CRITICAL,
reason_text = "Unknown",
alarm_type = constants.FM_ALARM_TYPE_5,
probable_cause = constants.ALARM_PROBABLE_CAUSE_8,
proposed_repair_action = None,
service_affecting = False,
suppression = False)
uuid =ser.set_fault(fault)
print uuid
def delete(alarm_id, instance_id):
ser=FaultAPIs()
ret = ser.clear_fault(alarm_id,instance_id)
print "Delete fault return %s" % str(ret)
def del_all(instance_id):
ser=FaultAPIs()
ret= ser.clear_all(instance_id)
print "Delete faults return: %s" % str(ret)
def get(alarm_id, instance_id):
ser=FaultAPIs()
a = ser.get_fault(alarm_id, instance_id)
if a is not None:
print_alarm(a)
else:
print "Alarm not found"
def get_all(instance_id):
ser=FaultAPIs()
ll= ser.get_faults(instance_id)
if ll is not None:
print "Total alarm returned: %d\n" % len(ll)
for i in ll:
print_alarm(i)
else:
print "No alarm returned"
def get_list(alarm_id):
ser=FaultAPIs()
ll= ser.get_faults_by_id(alarm_id)
if ll is not None:
print "Total alarm returned: %d\n" % len(ll)
for i in ll:
print_alarm(i)
else:
print "No alarm returned"
if __name__ == "__main__":
if sys.argv[1] == "create":
sys.exit(create())
elif sys.argv[1] == "del":
sys.exit(delete(sys.argv[2],sys.argv[3]))
elif sys.argv[1] == "get":
sys.exit(get(sys.argv[2],sys.argv[3]))
elif sys.argv[1] == "get_all":
sys.exit(get_all(sys.argv[2]))
elif sys.argv[1] == "del_all":
sys.exit(del_all(sys.argv[2]))
elif sys.argv[1] == "get_list":
sys.exit(get_list(sys.argv[2]))
|
[
"[email protected]"
] | |
81ef3a1abaf74c63ccf1b403a791df16705a2301
|
dbaa45978f3392c200f8576a82e7f0ed063b9906
|
/home/blocks.py
|
d0968d1a95395de3c61f9843e28b0b6821588e5d
|
[] |
no_license
|
dentemm/yourin
|
033d29c6f946bb805f240f4c51bfabf1fa206dca
|
ec61fe8dfe1397ff1ee2fc76dc45caed529d7aa1
|
refs/heads/master
| 2022-12-02T12:59:39.431498 | 2017-04-27T19:34:29 | 2017-04-27T19:34:29 | 71,818,880 | 0 | 0 | null | 2022-11-22T01:29:32 | 2016-10-24T18:26:14 |
CSS
|
UTF-8
|
Python
| false | false | 4,082 |
py
|
from django import forms
from wagtail.wagtailcore import blocks
from wagtail.wagtailimages.blocks import ImageChooserBlock
from wagtail.wagtailembeds.blocks import EmbedBlock
TEXT_ALIGNMENT_CHOICES = (
('text-left', 'Links'),
('text-right', 'Rechts'),
('text-center', 'Centreer'),
)
class CarouselImageBlock(blocks.StructBlock):
afbeelding = ImageChooserBlock()
#tekst = blocks.CharBlock(required=False)
class Meta:
icon = 'image'
label = 'carousel afbeelding'
class BlogTitleBlock(blocks.StructBlock):
image = ImageChooserBlock(label='afbeelding', required=True)
title = blocks.CharBlock(label='titel', required=True)
class Meta:
template = 'home/blocks/title_block.html'
label = 'titel'
icon = 'title'
class SubtitleBlock(blocks.CharBlock):
class Meta:
template = 'home/blocks/subtitle_block.html'
label = 'ondertitel'
icon = 'pilcrow'
class IntroTextBlock(blocks.TextBlock):
class Meta:
template = 'home/blocks/introtext_block.html'
label = 'intro'
icon = 'snippet'
class ParagraphBlock(blocks.StructBlock):
text_alignment = blocks.ChoiceBlock(label='Tekst uitlijning', choices=TEXT_ALIGNMENT_CHOICES, default='text-left')
text_width = blocks.IntegerBlock(label='Tekst breedte',default=12, min_value=1, max_value=12, help_text="Geeft de breedte van de paragraaf aan, waarbij 12 maximaal is. Som van tekst breedte en tekst offset is ook best maximaal 12")
text_offset = blocks.IntegerBlock(label='Tekst offset', default=0, min_value=0, max_value=10, help_text="Geeft de offset van de paragraaf aan, dus hoever de paragraaf naar rechts wordt verschoven (0 = volledig links)")
text = blocks.TextBlock(label='Paragraaf tekst', min_length=160, required=False, help_text='Plaats hier de tekst voor 1 paragraaf, en voeg zoveel paragrafen toe als nodig')
richtext = blocks.RichTextBlock(label='Richtext (= alternatief)', required=False, help_text="Deze wordt enkel getoond indien de 'Paragraaf tekst' leeg is")
class Meta:
template = 'home/blocks/paragraph_block.html'
label = 'paragraaf'
icon = 'edit'
class BlogEmbedBlock(blocks.URLBlock):
class Meta:
template = 'home/blocks/embed_block.html'
label = 'video embed'
icon = 'media'
class ImageWithCaptionBlock(blocks.StructBlock):
class Meta:
template = 'home/blocks/imagewithcaption_block.html'
label = 'afbeelding met tekst'
icon = 'image'
class PullQuoteBlock(blocks.StructBlock):
quote = blocks.CharBlock(label='Citaat', required=True, max_length=164, help_text='Geef hier een citaat in')
class Meta:
template = 'home/blocks/pullquote_block.html'
label = 'citaat'
icon = 'openquote'
#('slider', ListBlock(customblocks.CarouselImageBlock(), template='home/blocks/carousel_block.html', icon='image')),
class SliderBlock(blocks.StructBlock):
afbeeldingen = blocks.ListBlock(CarouselImageBlock())
bijhorende_tekst = blocks.RichTextBlock()
class Meta:
template = 'home/blocks/slider_block.html'
label = 'slider'
icon = 'image'
class TabbedContentItem(blocks.StructBlock):
tab_name = blocks.CharBlock(label='tabblad titel', required=True, max_length=32, help_text='de titel voor het tabblad')
rich_content = blocks.RichTextBlock(required=True)
text_width = blocks.IntegerBlock(label='Breedte',default=12, min_value=1, max_value=12, help_text="Geeft de breedte van de tabs + inhoud aan, waarbij 12 maximaal is.")
class TwoColsBlock(blocks.StructBlock):
#left = blocks.RichTextBlock(label='linkse kolom', required=True)
#right = blocks.RichTextBlock(label='rechtse kolom', required=True)
content = blocks.StreamBlock([
('linkse_kolom', blocks.RichTextBlock()),
('rechtse_kolom', blocks.RichTextBlock()),
], icon='arrow-left', label='inhoud')
# left = blocks.StreamBlock([
# ('linkse_kolom', blocks.RichTextBlock()),
# ], icon='arrow-left', label='inhoud')
# right = blocks.StreamBlock([
# ('rechtse_kolom', blocks.RichTextBlock()),
# ], icon='arrow-right', label='inhoud')
class Meta:
template = 'home/blocks/two_cols.html'
icon = 'placeholder'
label = '2 kolommen'
form_classname = 'range'
|
[
"[email protected]"
] | |
f41e0c2c87253f5af0e523fec0a04fdcef77d705
|
00b762e37ecef30ed04698033f719f04be9c5545
|
/scripts/test_results/scikit-learn_test_results/conflicts/127_mlcomp_sparse_document_classification_conflict.py
|
8596dabdca286879c364138e4a68ba1148370e77
|
[] |
no_license
|
kenji-nicholson/smerge
|
4f9af17e2e516333b041727b77b8330e3255b7c2
|
3da9ebfdee02f9b4c882af1f26fe2e15d037271b
|
refs/heads/master
| 2020-07-22T02:32:03.579003 | 2018-06-08T00:40:53 | 2018-06-08T00:40:53 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,171 |
py
|
"""
======================================================
Classification of text documents using sparse features
======================================================
This is an example showing how the scikit-learn can be used to classify
documents by topics using a bag-of-words approach. This example uses
a scipy.sparse matrix to store the features instead of standard numpy arrays.
The dataset used in this example is the 20 newsgroups dataset and should be
downloaded from the http://mlcomp.org (free registration required):
http://mlcomp.org/datasets/379
Once downloaded unzip the arhive somewhere on your filesystem. For instance in::
% mkdir -p ~/data/mlcomp
% cd ~/data/mlcomp
% unzip /path/to/dataset-379-20news-18828_XXXXX.zip
You should get a folder ``~/data/mlcomp/379`` with a file named ``metadata`` and
subfolders ``raw``, ``train`` and ``test`` holding the text documents organized by
newsgroups.
Then set the ``MLCOMP_DATASETS_HOME`` environment variable pointing to
the root folder holding the uncompressed archive::
% export MLCOMP_DATASETS_HOME="~/data/mlcomp"
Then you are ready to run this example using your favorite python shell::
% ipython examples/mlcomp_sparse_document_classification.py
"""
# Author: Olivier Grisel <[email protected]>
# License: Simplified BSD
from time import time
import sys
import os
import numpy as np
import scipy.sparse as sp
import pylab as pl
from scikits.learn.datasets import load_mlcomp
from scikits.learn.feature_extraction.text.sparse import Vectorizer
from scikits.learn.svm.sparse import LinearSVC
from scikits.learn.metrics import confusion_matrix
<<<<<<< HEAD
from scikits.learn.metrics import f1_score
from scikits.learn.metrics import precision
from scikits.learn.metrics import recall
=======
from scikits.learn.metrics import classification_report
>>>>>>> remote
if 'MLCOMP_DATASETS_HOME' not in os.environ:
print "Please follow those instructions to get started:"
print __doc__
sys.exit(0)
# Load the training set
print "Loading 20 newsgroups training set... "
news_train = load_mlcomp('20news-18828', 'train')
print news_train.DESCR
print "%d documents" % len(news_train.filenames)
print "%d categories" % len(news_train.target_names)
print "Extracting features from the dataset using a sparse vectorizer"
t0 = time()
vectorizer = Vectorizer()
X_train = vectorizer.fit_transform((open(f).read() for f in news_train.filenames))
print "done in %fs" % (time() - t0)
print "n_samples: %d, n_features: %d" % X_train.shape
assert sp.issparse(X_train)
y_train = news_train.target
print "Training a linear SVM (hinge loss and L2 regularizer)..."
parameters = {
'loss': 'l2',
'penalty': 'l2',
'C': 10,
'dual': False,
'eps': 1e-4,
}
print "parameters:", parameters
t0 = time()
clf = LinearSVC(**parameters).fit(X_train, y_train)
print "done in %fs" % (time() - t0)
print "Percentage of non zeros coef: %f" % (np.mean(clf.coef_ != 0) * 100)
print "Loading 20 newsgroups test set... "
news_test = load_mlcomp('20news-18828', 'test')
t0 = time()
print "done in %fs" % (time() - t0)
print "Predicting the labels of the test set..."
print "%d documents" % len(news_test.filenames)
print "%d categories" % len(news_test.target_names)
print "Extracting features from the dataset using the same vectorizer"
t0 = time()
X_test = vectorizer.transform((open(f).read() for f in news_test.filenames))
y_test = news_test.target
print "done in %fs" % (time() - t0)
print "n_samples: %d, n_features: %d" % X_test.shape
print "Predicting the outcomes of the testing set"
t0 = time()
pred = clf.predict(X_test)
print "done in %fs" % (time() - t0)
<<<<<<< HEAD
print "precision: %0.3f" % precision(y_test, pred)
print "recall: %0.3f" % recall(y_test, pred)
print "f1_score: %0.3f" % f1_score(y_test, pred)
=======
print "Classification report on test set:"
print classification_report(news_test.target, pred,
class_names=news_test.target_names)
>>>>>>> remote
cm = confusion_matrix(y_test, pred)
print "Confusion matrix:"
print cm
# Show confusion matrix
pl.matshow(cm)
pl.title('Confusion matrix')
pl.colorbar()
pl.show()
|
[
"[email protected]"
] | |
95d44a6fd33c75ebdef986787fe08d50a5f247d3
|
b1d4a62b60cedaf0b88613b4c9f6e1c37a79ccef
|
/app/migrations/0002_auto_20200302_1040.py
|
ce110466c912ca49e91643733bd7b3cf8e61d897
|
[] |
no_license
|
juned8236/primary_foreign_based_onfront
|
05ac97730ecdb184c96f44e1c2fb67d40cd521c5
|
4fc3be613c246a7853b2896a120b924451673124
|
refs/heads/master
| 2021-02-07T02:25:18.923855 | 2020-03-13T03:56:17 | 2020-03-13T03:56:17 | 243,972,390 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 481 |
py
|
# Generated by Django 3.0.3 on 2020-03-02 10:40
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('app', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='product',
name='company',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='companydb', to='app.Company'),
),
]
|
[
"[email protected]"
] | |
beb525892b9b3398ef96bd78e6412b6090711a55
|
443aba47108d7b35984a18f8bdf8cf90a98af428
|
/src/test_sst.py
|
8823784b674cee5bcaf1c63c022e812bf4b2ce3e
|
[
"Apache-2.0"
] |
permissive
|
bgshin/mxnet_cnn
|
947898490d3845a3d4d5b89cbeab8857bb97b730
|
19ebc13f4990ee29612a479325cf13d3bd9723ec
|
refs/heads/master
| 2021-01-19T19:45:42.301126 | 2017-09-29T18:03:38 | 2017-09-29T18:03:38 | 101,208,665 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,148 |
py
|
# https://faroit.github.io/keras-docs/1.2.2/models/model/#methods
import os
# os.environ['KERAS_BACKEND']='mxnet'
os.environ['KERAS_BACKEND']='tensorflow'
from keras.layers import Convolution1D
from keras.layers import Dense, Dropout, Flatten, Input, MaxPooling1D, Embedding
from keras.layers import merge
from keras.models import Model
from keras.callbacks import ModelCheckpoint
from sst import load_all, Timer
import os
import argparse
def run(w2vdim, attempt, gpunum):
filter_sizes = (2, 3, 4, 5)
num_filters = 32
dropout_prob = 0.8
hidden_dims = 50
maxlen = 60
batch_size = 32
epochs = 30
os.environ["CUDA_VISIBLE_DEVICES"] = gpunum
def CNNv1(model_input, max_features, model_path):
z = Embedding(max_features,
w2vdim,
input_length=maxlen,
trainable=False)(model_input)
conv_blocks = []
for sz in filter_sizes:
conv = Convolution1D(nb_filter=num_filters,
filter_length=sz,
border_mode="valid",
activation="relu",
subsample_length=1)(z)
print(conv)
conv = MaxPooling1D(pool_length=2)(conv)
print(conv)
conv = Flatten()(conv)
conv_blocks.append(conv)
z = merge(conv_blocks, mode='concat')
z = Dropout(dropout_prob)(z)
z = Dense(hidden_dims, activation="relu")(z)
model_output = Dense(5, activation="softmax")(z)
model = Model(model_input, model_output)
model.load_weights(model_path)
model.compile(loss="sparse_categorical_crossentropy", optimizer="adam", metrics=["accuracy"],
context=["gpu(0)"])
return model
with Timer("load_all..."):
(x_trn, y_trn), (x_dev, y_dev), (x_tst, y_tst), embedding, max_features = \
load_all(w2vdim, maxlen, source='shm')
with Timer("Build model..."):
input_shape = (maxlen,)
model_input = Input(shape=input_shape)
modelpath = './model/newbest-%d-%d' % (w2vdim, attempt)
model = CNNv1(model_input, max_features, modelpath)
model.summary()
score_list = []
score = model.evaluate(x_trn, y_trn, batch_size=4, verbose=1)
print 'dev score=%f' % score[1]
score_list.append(score[1])
score = model.evaluate(x_dev, y_dev, batch_size=4, verbose=1)
print 'dev score=%f' % score[1]
score_list.append(score[1])
score = model.evaluate(x_tst, y_tst, batch_size=4, verbose=1)
print 'tst score=%f' % score[1]
score_list.append(score[1])
print '[summary]'
print 'trn\tdev\ttst'
print '\t'.join(map(str, score_list))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-d', default=400, choices=[50, 300, 400], type=int)
parser.add_argument('-t', default=2, choices=range(10), type=int)
parser.add_argument('-g', default="1", choices=["0", "1", "2", "3"], type=str)
args = parser.parse_args()
run(args.d, args.t, args.g)
|
[
"[email protected]"
] | |
06af8d9c34e3dadaebe4c707aa4f98b6d8c9c7c3
|
9f9ec8bebfe8b7ac8e60dcaa23153abe976585e6
|
/dataCommons/reporting/reports/postingQueueSize.py
|
84e6c0ffdd52f0ac5f0f6934c46737a76bcf1d73
|
[] |
no_license
|
erikwestra/data-commons
|
bbf32cd9b4b64ace28bcb049190d8272a23ed891
|
e3ed33fad104157ff505bb02bc7ae981f8ba3b11
|
refs/heads/master
| 2020-04-11T12:03:19.996644 | 2013-02-14T17:08:24 | 2013-02-14T17:08:24 | 8,188,655 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,775 |
py
|
""" dataCommons.reporting.reports.postingQueueSize
This module implements the "Posting Queue Size" report for the 3taps
Reporting system.
"""
from django.db.models import *
from dataCommons.shared.lib import dateHelpers,reportHelpers
from dataCommons.monitoringAPI.models import *
#############################################################################
# The unique type code for this report:
type = "postingQueueSize"
#############################################################################
# A user-visible name for this report:
name = "Posting Queue Size"
#############################################################################
# A user-visible description for this report:
description = "This report shows the size of the posting queue over a " \
+ "given timeframe."
#############################################################################
# The list of parameters used by this report:
params = [{'name' : "timeframe",
'label' : "View posting queue size for the last",
'required' : True,
'type' : "timeframe",
'default' : "1h"},
]
#############################################################################
# The function to generate our report from a given set of parameters:
def generator(params, timezone_offset):
startTime,endTime = reportHelpers.calc_timeframe(params['timeframe'])
# Get the "POSTINGS_QUEUED" and "POSTINGS_DEQUEUED" event types. We'll
# need these for our various database queries.
try:
postings_queued_event = EventType.objects.get(type="POSTINGS_QUEUED")
except EventType.DoesNotExist:
postings_queued_event = None
try:
postings_dequeued_event = EventType.objects.get(
type="POSTINGS_DEQUEUED")
except EventType.DoesNotExist:
postings_dequeued_event = None
# Now calculate the queue size at the start of the time period. We get
# this by summing up the total value of the POSTINGS_QUEUED events, and
# then subtract the total value of the POSTINGS_DEQUEUED events, prior to
# the starting time period.
if postings_queued_event != None:
query = Event.objects.filter(timestamp__lt=startTime,
type=postings_queued_event)
num_postings_added = \
query.aggregate(Sum("primary_value"))['primary_value__sum']
if num_postings_added == None: num_postings_added = 0
else:
num_postings_added = 0
if postings_dequeued_event != None:
query = Event.objects.filter(timestamp__lt=startTime,
type=postings_dequeued_event)
num_postings_removed = \
query.aggregate(Sum("primary_value"))['primary_value__sum']
if num_postings_removed == None: num_postings_removed = 0
else:
num_postings_removed = 0
starting_queue_size = num_postings_added - num_postings_removed
# Calculate the data to return to the caller. Note that we use a data
# reducer to simplify the data as necessary.
reducer = reportHelpers.DataReducer()
reducer.set_max_num_data_points(1000)
reducer.set_period(startTime, endTime)
reducer.set_value_combiner(sum)
if postings_queued_event != None:
for event in Event.objects.filter(timestamp__gte=startTime,
timestamp__lte=endTime,
type=postings_queued_event):
reducer.add(event.timestamp, event.primary_value)
if postings_dequeued_event != None:
for event in Event.objects.filter(timestamp__gte=startTime,
timestamp__lte=endTime,
type=postings_dequeued_event):
reducer.add(event.timestamp, -event.primary_value)
reduced_data = reducer.get_reduced_data()
# We now have a (possibly reduced) list of the changes to the queue size
# for the desired time period. Use these calculated values to build a
# running total of the queue size over the time period.
results = {'startTime' : reportHelpers.datetime_to_seconds(startTime,
timezone_offset),
'endTime' : reportHelpers.datetime_to_seconds(endTime,
timezone_offset),
'periods' : []}
running_total = starting_queue_size
for period_start,period_end,period_total in reduced_data:
running_total = running_total + period_total
timestamp = reportHelpers.datetime_to_seconds(period_start,
timezone_offset)
results['periods'].append((timestamp, running_total))
# Finally, return the calculated data back to the caller.
return (True, results)
#############################################################################
# The Javascript function to render the generated report into the web page:
renderer = """
function render(data) {
var points = [];
for (var i=0; i < data.periods.length; i++) {
var row = data.periods[i];
var timestamp = row[0];
var queue_size = row[1];
points.push([timestamp * 1000, queue_size]);
}
$.plot($("#report"), [
{data: points}
],
{xaxis: {mode: "time",
axisLabel: "Time of Day",
min: data.startTime * 1000,
max: data.endTime * 1000},
yaxis: {axisLabel: "Size of Posting Queue"}});
}
"""
|
[
"[email protected]"
] | |
7c745e331a254a56767884ec5b62a8ad36581097
|
aba9b00edec394f1389a7ecf88a290112303414d
|
/energetyka/inżynieria_materiałowa/lab/06/polprzewodnik.py
|
b090e4821c75dec9e3f3bfa8e5f885a841a9095b
|
[] |
no_license
|
torgiren/szkola
|
2aca12807f0030f8e2ae2dfcb808bf7cae5e2e27
|
5ed18bed273ab25b8e52a488e28af239b8beb89c
|
refs/heads/master
| 2020-12-25T18:18:36.317496 | 2014-04-27T23:43:21 | 2014-04-27T23:43:21 | 3,892,030 | 1 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 245 |
py
|
#!/usr/bin/env python
import math
f=open("data2.dat","r")
while True:
line=f.readline()
if not line:
break;
line=line.split()
print 1.0/(float(line[0])+273),
print math.log((0.0004*1000*float(line[1])/(0.00000016*float(line[2]))),math.e)
|
[
"[email protected]"
] | |
7238271ba0141772838ea30adbcf8d57f5070af2
|
c91eac635507950941003dd79a494a95cd39dc77
|
/test/data_formater/test_ttf_stage_parameters.py
|
775a2212fc596871b1b9b7604ff84a3adccd31a4
|
[] |
no_license
|
GabrielPenaU3F/confiabilidad-software
|
29b064cc9f866c06833cf6afc0bc424fd20619c6
|
c57572ec3f9fba01331718d892d94d720cc5d04d
|
refs/heads/master
| 2023-03-19T01:47:40.939503 | 2021-03-17T02:03:39 | 2021-03-17T02:03:39 | 193,144,043 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,330 |
py
|
import unittest
from src.data.data_formater import TTFDataFormater
from src.data.data_repository import DataRepository
class TestTTFStageParameters(unittest.TestCase):
ntds_data = None
ttf_formater = None
@classmethod
def setUpClass(cls):
cls.ntds_data = DataRepository.provide_project_data('ntds')
cls.ttf_formater = TTFDataFormater.get_instance()
def test_ttf_formater_determine_stage_t0_should_return_0_if_initial_t_is_0(self):
t0 = self.ttf_formater.determine_stage_t0(self.ntds_data, 0)
self.assertEqual(0, t0)
def test_ttf_formater_determine_stage_t0_should_return_21_if_initial_t_is_32(self):
t0 = self.ttf_formater.determine_stage_t0(self.ntds_data, 32)
self.assertEqual(21, t0)
def test_ttf_formater_determine_initial_sample_should_return_0_if_initial_t_is_0(self):
initial_sample = self.ttf_formater.determine_stage_initial_sample(self.ntds_data, 0)
self.assertEqual(0, initial_sample)
def test_ttf_formater_determine_initial_sample_should_return_0_if_initial_t_is_9(self):
initial_sample = self.ttf_formater.determine_stage_initial_sample(self.ntds_data, 9)
self.assertEqual(0, initial_sample)
def test_ttf_formater_determine_initial_sample_should_return_1_if_initial_t_is_11(self):
initial_sample = self.ttf_formater.determine_stage_initial_sample(self.ntds_data, 11)
self.assertEqual(1, initial_sample)
def test_ttf_formater_determine_initial_sample_should_return_1_if_initial_t_is_21(self):
initial_sample = self.ttf_formater.determine_stage_initial_sample(self.ntds_data, 21)
self.assertEqual(1, initial_sample)
def test_ttf_formater_determine_end_sample_should_return_2_if_end_t_is_32(self):
end_sample = self.ttf_formater.determine_stage_end_sample(self.ntds_data, 32)
self.assertEqual(2, end_sample)
def test_ttf_formater_determine_end_sample_should_return_2_if_end_t_is_35(self):
end_sample = self.ttf_formater.determine_stage_end_sample(self.ntds_data, 35)
self.assertEqual(2, end_sample)
def test_ttf_formater_determine_end_sample_should_return_25_if_end_t_is_260(self):
end_sample = self.ttf_formater.determine_stage_end_sample(self.ntds_data, 250)
self.assertEqual(25, end_sample)
|
[
"[email protected]"
] | |
2722bfbf80756d42355e953b07dc2b3411eb23a4
|
244ecfc2017a48c70b74556be8c188e7a4815848
|
/res_bw/scripts/common/lib/plat-irix5/in.py
|
81f04b7f3125011fcdad536e53c891298a4c1d7b
|
[] |
no_license
|
webiumsk/WOT-0.9.12
|
c1e1259411ba1e6c7b02cd6408b731419d3174e5
|
5be5fd9186f335e7bae88c9761c378ff5fbf5351
|
refs/heads/master
| 2021-01-10T01:38:36.523788 | 2015-11-18T11:33:37 | 2015-11-18T11:33:37 | 46,414,438 | 1 | 0 | null | null | null | null |
WINDOWS-1250
|
Python
| false | false | 3,280 |
py
|
# 2015.11.18 12:05:35 Střední Evropa (běžný čas)
# Embedded file name: scripts/common/Lib/plat-irix5/IN.py
from warnings import warnpy3k
warnpy3k('the IN module has been removed in Python 3.0', stacklevel=2)
del warnpy3k
LITTLE_ENDIAN = 1234
BIG_ENDIAN = 4321
PDP_ENDIAN = 3412
BYTE_ORDER = BIG_ENDIAN
BYTE_ORDER = LITTLE_ENDIAN
def ntohl(x):
return x
def ntohs(x):
return x
def htonl(x):
return x
def htons(x):
return x
def htonl(x):
return ntohl(x)
def htons(x):
return ntohs(x)
ONBITSMAJOR = 7
ONBITSMINOR = 8
OMAXMAJ = 127
OMAXMIN = 255
NBITSMAJOR = 14
NBITSMINOR = 18
MAXMAJ = 511
MAXMIN = 262143
OLDDEV = 0
NEWDEV = 1
MKDEV_VER = NEWDEV
def major(dev):
return __major(MKDEV_VER, dev)
def minor(dev):
return __minor(MKDEV_VER, dev)
FD_SETSIZE = 1024
NBBY = 8
IPPROTO_IP = 0
IPPROTO_ICMP = 1
IPPROTO_IGMP = 2
IPPROTO_GGP = 3
IPPROTO_ENCAP = 4
IPPROTO_TCP = 6
IPPROTO_EGP = 8
IPPROTO_PUP = 12
IPPROTO_UDP = 17
IPPROTO_IDP = 22
IPPROTO_TP = 29
IPPROTO_XTP = 36
IPPROTO_HELLO = 63
IPPROTO_ND = 77
IPPROTO_EON = 80
IPPROTO_RAW = 255
IPPROTO_MAX = 256
IPPORT_RESERVED = 1024
IPPORT_USERRESERVED = 5000
IPPORT_MAXPORT = 65535
def IN_CLASSA(i):
return long(i) & 2147483648L == 0
IN_CLASSA_NET = 4278190080L
IN_CLASSA_NSHIFT = 24
IN_CLASSA_HOST = 16777215
IN_CLASSA_MAX = 128
def IN_CLASSB(i):
return long(i) & 3221225472L == 2147483648L
IN_CLASSB_NET = 4294901760L
IN_CLASSB_NSHIFT = 16
IN_CLASSB_HOST = 65535
IN_CLASSB_MAX = 65536
def IN_CLASSC(i):
return long(i) & 3758096384L == 3221225472L
IN_CLASSC_NET = 4294967040L
IN_CLASSC_NSHIFT = 8
IN_CLASSC_HOST = 255
def IN_CLASSD(i):
return long(i) & 4026531840L == 3758096384L
IN_CLASSD_NET = 4026531840L
IN_CLASSD_NSHIFT = 28
IN_CLASSD_HOST = 268435455
def IN_MULTICAST(i):
return IN_CLASSD(i)
def IN_EXPERIMENTAL(i):
return long(i) & 4026531840L == 4026531840L
def IN_BADCLASS(i):
return long(i) & 4026531840L == 4026531840L
INADDR_ANY = 0
INADDR_BROADCAST = 4294967295L
INADDR_LOOPBACK = 2130706433
INADDR_UNSPEC_GROUP = 3758096384L
INADDR_ALLHOSTS_GROUP = 3758096385L
INADDR_MAX_LOCAL_GROUP = 3758096639L
INADDR_NONE = 4294967295L
IN_LOOPBACKNET = 127
IP_OPTIONS = 1
IP_MULTICAST_IF = 2
IP_MULTICAST_TTL = 3
IP_MULTICAST_LOOP = 4
IP_ADD_MEMBERSHIP = 5
IP_DROP_MEMBERSHIP = 6
IP_HDRINCL = 7
IP_TOS = 8
IP_TTL = 9
IP_RECVOPTS = 10
IP_RECVRETOPTS = 11
IP_RECVDSTADDR = 12
IP_RETOPTS = 13
IP_OPTIONS = 1
IP_HDRINCL = 2
IP_TOS = 3
IP_TTL = 4
IP_RECVOPTS = 5
IP_RECVRETOPTS = 6
IP_RECVDSTADDR = 7
IP_RETOPTS = 8
IP_MULTICAST_IF = 20
IP_MULTICAST_TTL = 21
IP_MULTICAST_LOOP = 22
IP_ADD_MEMBERSHIP = 23
IP_DROP_MEMBERSHIP = 24
IRIX4_IP_OPTIONS = 1
IRIX4_IP_MULTICAST_IF = 2
IRIX4_IP_MULTICAST_TTL = 3
IRIX4_IP_MULTICAST_LOOP = 4
IRIX4_IP_ADD_MEMBERSHIP = 5
IRIX4_IP_DROP_MEMBERSHIP = 6
IRIX4_IP_HDRINCL = 7
IRIX4_IP_TOS = 8
IRIX4_IP_TTL = 9
IRIX4_IP_RECVOPTS = 10
IRIX4_IP_RECVRETOPTS = 11
IRIX4_IP_RECVDSTADDR = 12
IRIX4_IP_RETOPTS = 13
IP_DEFAULT_MULTICAST_TTL = 1
IP_DEFAULT_MULTICAST_LOOP = 1
IP_MAX_MEMBERSHIPS = 20
# okay decompyling c:\Users\PC\wotsources\files\originals\res_bw\scripts\common\lib\plat-irix5\in.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2015.11.18 12:05:35 Střední Evropa (běžný čas)
|
[
"[email protected]"
] | |
536183f4949f6e92e8c56105ea1e5dfe526556a9
|
781e2692049e87a4256320c76e82a19be257a05d
|
/all_data/exercism_data/python/word-count/5af1e40a97664eb786adf47ea78a5857.py
|
e3c48f698e30288abe8547792608b7366661c877
|
[] |
no_license
|
itsolutionscorp/AutoStyle-Clustering
|
54bde86fe6dbad35b568b38cfcb14c5ffaab51b0
|
be0e2f635a7558f56c61bc0b36c6146b01d1e6e6
|
refs/heads/master
| 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null |
UTF-8
|
Python
| false | false | 400 |
py
|
"""
Write a program that given a phrase can count the occurrences of each word in that phrase.
For example for the input `"olly olly in come free"`
plain
olly: 2
in: 1
come: 1
free: 1
"""
from collections import defaultdict
def word_count(phrase):
dictionary = defaultdict(int)
words = phrase.split()
for word in words:
dictionary[word] += 1
return dict(dictionary)
|
[
"[email protected]"
] | |
4a474bffc05946755f60e75c53115e83ec96ac48
|
57caf9e323d5771c6463bb67a7e1a774f5315e5b
|
/setup.py
|
9cdd5dcfa36969a58547e0f10e3d4a660b6ad322
|
[
"Apache-2.0"
] |
permissive
|
geziaka/rater
|
5de97851d4207f03f996324b99b8fdc5881306e9
|
8437dea8baf0137ab3c07dd19c5f2bb8c15b4435
|
refs/heads/master
| 2022-12-06T00:33:33.867132 | 2020-09-01T15:04:37 | 2020-09-01T15:04:37 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,652 |
py
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import sys
from setuptools import setup, find_packages
import rater
if sys.version_info < (3,):
sys.exit('Sorry, Python3 is required.')
with open('README.md', 'r', encoding='utf-8') as f:
readme = f.read()
with open('LICENSE', 'r', encoding='utf-8') as f:
license = f.read()
with open('requirements.txt', 'r', encoding='utf-8') as f:
reqs = f.read()
setup(
name='rater',
version=rater.__version__,
description='rater',
long_description=readme,
long_description_content_type='text/markdown',
author='XuMing',
author_email='[email protected]',
url='https://github.com/shibing624/rater',
license="Apache License 2.0",
classifiers=[
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Natural Language :: Chinese (Simplified)',
'Natural Language :: Chinese (Traditional)',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Text Processing',
'Topic :: Text Processing :: Indexing',
'Topic :: Text Processing :: Linguistic',
],
keywords='rater,recommender,Recommendation System,recommendation model',
install_requires=reqs.strip().split('\n'),
packages=find_packages(exclude=['tests']),
package_dir={'rater': 'rater'},
package_data={
'rater': ['*.*', '../LICENSE', '../*.txt', '../README.*'],
},
test_suite='tests',
)
|
[
"[email protected]"
] | |
6a0effe99eed37ac3ee489761699b67ae14ef643
|
4aae2df13bfd53a8b16aa5f941f2cc8b8ac144b7
|
/torch/utils/data/_utils/collate.py
|
e520de6ebee91da435f8e8cb8bab02ebc24f851b
|
[
"BSD-2-Clause",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0"
] |
permissive
|
computerguy2030/pytorch-rocm-amd
|
e9f2718c470b505325d396baf6513e71bcf0a7ca
|
38da53d721fcb335dedb1b52f14fd89718e90bef
|
refs/heads/master
| 2023-04-08T00:55:01.542663 | 2021-04-16T11:33:39 | 2021-04-16T11:33:39 | 334,288,140 | 3 | 0 |
NOASSERTION
| 2021-04-16T11:27:55 | 2021-01-29T23:40:06 |
C++
|
UTF-8
|
Python
| false | false | 3,656 |
py
|
r""""Contains definitions of the methods used by the _BaseDataLoaderIter workers to
collate samples fetched from dataset into Tensor(s).
These **needs** to be in global scope since Py2 doesn't support serializing
static methods.
"""
import torch
import re
import collections
from torch._six import string_classes
np_str_obj_array_pattern = re.compile(r'[SaUO]')
def default_convert(data):
r"""Converts each NumPy array data field into a tensor"""
elem_type = type(data)
if isinstance(data, torch.Tensor):
return data
elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \
and elem_type.__name__ != 'string_':
# array of string classes and object
if elem_type.__name__ == 'ndarray' \
and np_str_obj_array_pattern.search(data.dtype.str) is not None:
return data
return torch.as_tensor(data)
elif isinstance(data, collections.abc.Mapping):
return {key: default_convert(data[key]) for key in data}
elif isinstance(data, tuple) and hasattr(data, '_fields'): # namedtuple
return elem_type(*(default_convert(d) for d in data))
elif isinstance(data, collections.abc.Sequence) and not isinstance(data, string_classes):
return [default_convert(d) for d in data]
else:
return data
default_collate_err_msg_format = (
"default_collate: batch must contain tensors, numpy arrays, numbers, "
"dicts or lists; found {}")
def default_collate(batch):
r"""Puts each data field into a tensor with outer dimension batch size"""
elem = batch[0]
elem_type = type(elem)
if isinstance(elem, torch.Tensor):
out = None
if torch.utils.data.get_worker_info() is not None:
# If we're in a background process, concatenate directly into a
# shared memory tensor to avoid an extra copy
numel = sum([x.numel() for x in batch])
storage = elem.storage()._new_shared(numel)
out = elem.new(storage)
return torch.stack(batch, 0, out=out)
elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \
and elem_type.__name__ != 'string_':
if elem_type.__name__ == 'ndarray' or elem_type.__name__ == 'memmap':
# array of string classes and object
if np_str_obj_array_pattern.search(elem.dtype.str) is not None:
raise TypeError(default_collate_err_msg_format.format(elem.dtype))
return default_collate([torch.as_tensor(b) for b in batch])
elif elem.shape == (): # scalars
return torch.as_tensor(batch)
elif isinstance(elem, float):
return torch.tensor(batch, dtype=torch.float64)
elif isinstance(elem, int):
return torch.tensor(batch)
elif isinstance(elem, string_classes):
return batch
elif isinstance(elem, collections.abc.Mapping):
return {key: default_collate([d[key] for d in batch]) for key in elem}
elif isinstance(elem, tuple) and hasattr(elem, '_fields'): # namedtuple
return elem_type(*(default_collate(samples) for samples in zip(*batch)))
elif isinstance(elem, collections.abc.Sequence):
# check to make sure that the elements in batch have consistent size
it = iter(batch)
elem_size = len(next(it))
if not all(len(elem) == elem_size for elem in it):
raise RuntimeError('each element in list of batch should be of equal size')
transposed = zip(*batch)
return [default_collate(samples) for samples in transposed]
raise TypeError(default_collate_err_msg_format.format(elem_type))
|
[
"[email protected]"
] | |
b1d34706132b3d17ed479db76bad66ce56f1572b
|
29c58b3bec6ac0fcdb3070efc118600ee92004da
|
/mailslurp_client/models/domain_issues_dto.py
|
308f604c368d5fc913ec8fb9b4289513ad448937
|
[
"MIT"
] |
permissive
|
mailslurp/mailslurp-client-python
|
a2b5a0545206714bd4462ae517f242852b52aaf9
|
5c9a7cfdd5ea8bf671928023e7263847353d92c4
|
refs/heads/master
| 2023-06-23T00:41:36.257212 | 2023-06-14T10:10:14 | 2023-06-14T10:10:14 | 204,662,133 | 8 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,963 |
py
|
# coding: utf-8
"""
MailSlurp API
MailSlurp is an API for sending and receiving emails from dynamically allocated email addresses. It's designed for developers and QA teams to test applications, process inbound emails, send templated notifications, attachments, and more. ## Resources - [Homepage](https://www.mailslurp.com) - Get an [API KEY](https://app.mailslurp.com/sign-up/) - Generated [SDK Clients](https://docs.mailslurp.com/) - [Examples](https://github.com/mailslurp/examples) repository # noqa: E501
The version of the OpenAPI document: 6.5.2
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from mailslurp_client.configuration import Configuration
class DomainIssuesDto(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'has_issues': 'bool'
}
attribute_map = {
'has_issues': 'hasIssues'
}
def __init__(self, has_issues=None, local_vars_configuration=None): # noqa: E501
"""DomainIssuesDto - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._has_issues = None
self.discriminator = None
self.has_issues = has_issues
@property
def has_issues(self):
"""Gets the has_issues of this DomainIssuesDto. # noqa: E501
:return: The has_issues of this DomainIssuesDto. # noqa: E501
:rtype: bool
"""
return self._has_issues
@has_issues.setter
def has_issues(self, has_issues):
"""Sets the has_issues of this DomainIssuesDto.
:param has_issues: The has_issues of this DomainIssuesDto. # noqa: E501
:type: bool
"""
if self.local_vars_configuration.client_side_validation and has_issues is None: # noqa: E501
raise ValueError("Invalid value for `has_issues`, must not be `None`") # noqa: E501
self._has_issues = has_issues
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, DomainIssuesDto):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, DomainIssuesDto):
return True
return self.to_dict() != other.to_dict()
|
[
"[email protected]"
] | |
5c7885d88d61fdcccf6ec615cce6cc965ff9f688
|
e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f
|
/indices/nnncx.py
|
17976d8f9057feb480af89c03ad232ada8404698
|
[] |
no_license
|
psdh/WhatsintheVector
|
e8aabacc054a88b4cb25303548980af9a10c12a8
|
a24168d068d9c69dc7a0fd13f606c080ae82e2a6
|
refs/heads/master
| 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 |
Python
|
UTF-8
|
Python
| false | false | 141 |
py
|
ii = [('LeakWTI2.py', 1), ('WilkJMC2.py', 1), ('TalfTIT.py', 1), ('LeakWTI.py', 1), ('MereHHB.py', 1), ('SomeMMH.py', 20), ('BrewDTO.py', 2)]
|
[
"[email protected]"
] | |
5bfd5f3f684821bf71868a10ddb26ba44701fba3
|
4acc08d2c165b5d88119df6bb4081bcfaca684f7
|
/python_program/NCR_NPR_find_value.py
|
5610825ece1676d469f61a64ce35330106ab9c4e
|
[] |
no_license
|
xiaotuzixuedaima/PythonProgramDucat
|
9059648f070db7304f9aaa45657c8d3df75f3cc2
|
90c6947e6dfa8ebb6c8758735960379a81d88ae3
|
refs/heads/master
| 2022-01-16T04:13:17.849130 | 2019-02-22T15:43:18 | 2019-02-22T15:43:18 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 869 |
py
|
# Python Find ncR & nPr ????
n = int(input("enter the nth term : " ))
r = int(input("enter the rth term : " ))
t = n - r
fact = 1
while n > 0:
fact = fact * n
n = n - 1
n_fact = fact
# print("n th term of fact value : ",n_fact)
fact1 = 1
while r > 0:
fact1 = fact1 * r
r = r - 1
r_fact = fact1
# print("r th term of fact value : ",r_fact)
fact2 = 1
while t > 0:
fact2 = fact2 * t
t = t - 1
diff_fact = fact2
# print("(n-r)th term of fact value : ",diff_fact)
NCR = ( n_fact ) // ( diff_fact * r_fact )
print("total value of the given numbers its combination : ",NCR)
NPR = ( n_fact ) // ( diff_fact)
print("total value of the given numbers its permutation : ",NPR)
'''
output ===
enter the nth term : 10
enter the rth term : 7
total value of the given numbers its combination : 120
total value of the given numbers its permutation : 604800
'''
|
[
"[email protected]"
] | |
144255196241663945336ba45beadedc72c62646
|
3dd43ff0dab514a39f611487ab421256b3b5b13b
|
/scripts/client/gui/Scaleform/daapi/view/meta/ClanSearchWindowMeta.py
|
e2816eaaf99fd1ffd00a31c5816c417a46e1f342
|
[] |
no_license
|
kusaku/wotscripts
|
04ab289e3fec134e290355ecf81cf703af189f72
|
a89c2f825d3c7dade7bc5163a6c04e7f5bab587d
|
refs/heads/master
| 2023-08-20T00:17:36.852522 | 2018-02-26T14:53:44 | 2018-02-26T14:53:44 | 80,610,354 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,518 |
py
|
# Embedded file name: scripts/client/gui/Scaleform/daapi/view/meta/ClanSearchWindowMeta.py
"""
This file was generated using the wgpygen.
Please, don't edit this file manually.
"""
from gui.Scaleform.framework.entities.abstract.AbstractWindowView import AbstractWindowView
class ClanSearchWindowMeta(AbstractWindowView):
def search(self, text):
self._printOverrideError('search')
def previousPage(self):
self._printOverrideError('previousPage')
def nextPage(self):
self._printOverrideError('nextPage')
def dummyButtonPress(self):
self._printOverrideError('dummyButtonPress')
def as_getDPS(self):
if self._isDAAPIInited():
return self.flashObject.as_getDP()
def as_setInitDataS(self, data):
"""
:param data: Represented by ClanSearchWindowInitDataVO (AS)
"""
if self._isDAAPIInited():
return self.flashObject.as_setInitData(data)
def as_setStateDataS(self, data):
"""
:param data: Represented by ClanSearchWindowStateDataVO (AS)
"""
if self._isDAAPIInited():
return self.flashObject.as_setStateData(data)
def as_setDummyS(self, data):
"""
:param data: Represented by DummyVO (AS)
"""
if self._isDAAPIInited():
return self.flashObject.as_setDummy(data)
def as_setDummyVisibleS(self, visible):
if self._isDAAPIInited():
return self.flashObject.as_setDummyVisible(visible)
|
[
"[email protected]"
] | |
fbee6131b5c27c16de1bb92dab4b0dbd13d43aef
|
c16ea32a4cddb6b63ad3bacce3c6db0259d2bacd
|
/google/ads/googleads/v4/googleads-py/tests/unit/gapic/googleads.v4/services/test_topic_constant_service.py
|
13358a59a4b724a7d623ec984941bbeb263d41da
|
[
"Apache-2.0"
] |
permissive
|
dizcology/googleapis-gen
|
74a72b655fba2565233e5a289cfaea6dc7b91e1a
|
478f36572d7bcf1dc66038d0e76b9b3fa2abae63
|
refs/heads/master
| 2023-06-04T15:51:18.380826 | 2021-06-16T20:42:38 | 2021-06-16T20:42:38 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 29,849 |
py
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from unittest import mock
import grpc
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from google.ads.googleads.v4.resources.types import topic_constant
from google.ads.googleads.v4.services.services.topic_constant_service import TopicConstantServiceClient
from google.ads.googleads.v4.services.services.topic_constant_service import transports
from google.ads.googleads.v4.services.types import topic_constant_service
from google.api_core import client_options
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.oauth2 import service_account
from google.protobuf import wrappers_pb2 # type: ignore
import google.auth
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert TopicConstantServiceClient._get_default_mtls_endpoint(None) is None
assert TopicConstantServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint
assert TopicConstantServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint
assert TopicConstantServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint
assert TopicConstantServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint
assert TopicConstantServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi
def test_topic_constant_service_client_from_service_account_info():
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory:
factory.return_value = creds
info = {"valid": True}
client = TopicConstantServiceClient.from_service_account_info(info)
assert client.transport._credentials == creds
assert client.transport._host == 'googleads.googleapis.com:443'
def test_topic_constant_service_client_from_service_account_file():
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory:
factory.return_value = creds
client = TopicConstantServiceClient.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
client = TopicConstantServiceClient.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert client.transport._host == 'googleads.googleapis.com:443'
def test_topic_constant_service_client_get_transport_class():
transport = TopicConstantServiceClient.get_transport_class()
assert transport == transports.TopicConstantServiceGrpcTransport
transport = TopicConstantServiceClient.get_transport_class("grpc")
assert transport == transports.TopicConstantServiceGrpcTransport
@mock.patch.object(TopicConstantServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(TopicConstantServiceClient))
def test_topic_constant_service_client_client_options():
# Check that if channel is provided we won't create a new one.
with mock.patch('google.ads.googleads.v4.services.services.topic_constant_service.TopicConstantServiceClient.get_transport_class') as gtc:
transport = transports.TopicConstantServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials()
)
client = TopicConstantServiceClient(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch('google.ads.googleads.v4.services.services.topic_constant_service.TopicConstantServiceClient.get_transport_class') as gtc:
client = TopicConstantServiceClient(transport="grpc")
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch('google.ads.googleads.v4.services.services.topic_constant_service.transports.TopicConstantServiceGrpcTransport.__init__') as grpc_transport:
grpc_transport.return_value = None
client = TopicConstantServiceClient(client_options=options)
grpc_transport.assert_called_once_with(
ssl_channel_credentials=None,
credentials=None,
host="squid.clam.whelk",
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT
# is "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch('google.ads.googleads.v4.services.services.topic_constant_service.transports.TopicConstantServiceGrpcTransport.__init__') as grpc_transport:
grpc_transport.return_value = None
client = TopicConstantServiceClient()
grpc_transport.assert_called_once_with(
ssl_channel_credentials=None,
credentials=None,
host=client.DEFAULT_ENDPOINT,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch('google.ads.googleads.v4.services.services.topic_constant_service.transports.TopicConstantServiceGrpcTransport.__init__') as grpc_transport:
grpc_transport.return_value = None
client = TopicConstantServiceClient()
grpc_transport.assert_called_once_with(
ssl_channel_credentials=None,
credentials=None,
host=client.DEFAULT_MTLS_ENDPOINT,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = TopicConstantServiceClient()
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}):
with pytest.raises(ValueError):
client = TopicConstantServiceClient()
@mock.patch.object(TopicConstantServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(TopicConstantServiceClient))
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
@pytest.mark.parametrize("use_client_cert_env", ["true", "false"])
def test_topic_constant_service_client_mtls_env_auto(use_client_cert_env):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}):
options = client_options.ClientOptions(client_cert_source=client_cert_source_callback)
with mock.patch('google.ads.googleads.v4.services.services.topic_constant_service.transports.TopicConstantServiceGrpcTransport.__init__') as grpc_transport:
ssl_channel_creds = mock.Mock()
with mock.patch('grpc.ssl_channel_credentials', return_value=ssl_channel_creds):
grpc_transport.return_value = None
client = TopicConstantServiceClient(client_options=options)
if use_client_cert_env == "false":
expected_ssl_channel_creds = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_ssl_channel_creds = ssl_channel_creds
expected_host = client.DEFAULT_MTLS_ENDPOINT
grpc_transport.assert_called_once_with(
ssl_channel_credentials=expected_ssl_channel_creds,
credentials=None,
host=expected_host,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}):
with mock.patch('google.ads.googleads.v4.services.services.topic_constant_service.transports.TopicConstantServiceGrpcTransport.__init__') as grpc_transport:
with mock.patch('google.auth.transport.grpc.SslCredentials.__init__', return_value=None):
with mock.patch('google.auth.transport.grpc.SslCredentials.is_mtls', new_callable=mock.PropertyMock) as is_mtls_mock:
with mock.patch('google.auth.transport.grpc.SslCredentials.ssl_credentials', new_callable=mock.PropertyMock) as ssl_credentials_mock:
if use_client_cert_env == "false":
is_mtls_mock.return_value = False
ssl_credentials_mock.return_value = None
expected_host = client.DEFAULT_ENDPOINT
expected_ssl_channel_creds = None
else:
is_mtls_mock.return_value = True
ssl_credentials_mock.return_value = mock.Mock()
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_ssl_channel_creds = ssl_credentials_mock.return_value
grpc_transport.return_value = None
client = TopicConstantServiceClient()
grpc_transport.assert_called_once_with(
ssl_channel_credentials=expected_ssl_channel_creds,
credentials=None,
host=expected_host,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}):
with mock.patch('google.ads.googleads.v4.services.services.topic_constant_service.transports.TopicConstantServiceGrpcTransport.__init__') as grpc_transport:
with mock.patch('google.auth.transport.grpc.SslCredentials.__init__', return_value=None):
with mock.patch('google.auth.transport.grpc.SslCredentials.is_mtls', new_callable=mock.PropertyMock) as is_mtls_mock:
is_mtls_mock.return_value = False
grpc_transport.return_value = None
client = TopicConstantServiceClient()
grpc_transport.assert_called_once_with(
ssl_channel_credentials=None,
credentials=None,
host=client.DEFAULT_ENDPOINT,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
def test_topic_constant_service_client_client_options_from_dict():
with mock.patch('google.ads.googleads.v4.services.services.topic_constant_service.transports.TopicConstantServiceGrpcTransport.__init__') as grpc_transport:
grpc_transport.return_value = None
client = TopicConstantServiceClient(
client_options={'api_endpoint': 'squid.clam.whelk'}
)
grpc_transport.assert_called_once_with(
ssl_channel_credentials=None,
credentials=None,
host="squid.clam.whelk",
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
def test_get_topic_constant(transport: str = 'grpc', request_type=topic_constant_service.GetTopicConstantRequest):
client = TopicConstantServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_topic_constant),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = topic_constant.TopicConstant(
resource_name='resource_name_value',
)
response = client.get_topic_constant(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == topic_constant_service.GetTopicConstantRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, topic_constant.TopicConstant)
assert response.resource_name == 'resource_name_value'
def test_get_topic_constant_from_dict():
test_get_topic_constant(request_type=dict)
def test_get_topic_constant_field_headers():
client = TopicConstantServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = topic_constant_service.GetTopicConstantRequest()
request.resource_name = 'resource_name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_topic_constant),
'__call__') as call:
call.return_value = topic_constant.TopicConstant()
client.get_topic_constant(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'resource_name=resource_name/value',
) in kw['metadata']
def test_get_topic_constant_flattened():
client = TopicConstantServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_topic_constant),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = topic_constant.TopicConstant()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_topic_constant(
resource_name='resource_name_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].resource_name == 'resource_name_value'
def test_get_topic_constant_flattened_error():
client = TopicConstantServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_topic_constant(
topic_constant_service.GetTopicConstantRequest(),
resource_name='resource_name_value',
)
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.TopicConstantServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = TopicConstantServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.TopicConstantServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = TopicConstantServiceClient(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.TopicConstantServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = TopicConstantServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
assert isinstance(
client.transport,
transports.TopicConstantServiceGrpcTransport,
)
@pytest.mark.parametrize("transport_class", [
transports.TopicConstantServiceGrpcTransport,
])
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, 'default') as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_topic_constant_service_base_transport():
# Instantiate the base transport.
with mock.patch('google.ads.googleads.v4.services.services.topic_constant_service.transports.TopicConstantServiceTransport.__init__') as Transport:
Transport.return_value = None
transport = transports.TopicConstantServiceTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
'get_topic_constant',
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
def test_topic_constant_service_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, 'default') as adc, mock.patch('google.ads.googleads.v4.services.services.topic_constant_service.transports.TopicConstantServiceTransport._prep_wrapped_messages') as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.TopicConstantServiceTransport()
adc.assert_called_once()
def test_topic_constant_service_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, 'default') as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
TopicConstantServiceClient()
adc.assert_called_once_with(scopes=(
'https://www.googleapis.com/auth/adwords',
))
def test_topic_constant_service_transport_auth_adc():
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, 'default') as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transports.TopicConstantServiceGrpcTransport(host="squid.clam.whelk")
adc.assert_called_once_with(scopes=(
'https://www.googleapis.com/auth/adwords',
))
def test_topic_constant_service_host_no_port():
client = TopicConstantServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(api_endpoint='googleads.googleapis.com'),
)
assert client.transport._host == 'googleads.googleapis.com:443'
def test_topic_constant_service_host_with_port():
client = TopicConstantServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(api_endpoint='googleads.googleapis.com:8000'),
)
assert client.transport._host == 'googleads.googleapis.com:8000'
def test_topic_constant_service_grpc_transport_channel():
channel = grpc.insecure_channel('http://localhost/')
# Check that channel is used if provided.
transport = transports.TopicConstantServiceGrpcTransport(
host="squid.clam.whelk",
channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
@pytest.mark.parametrize("transport_class", [transports.TopicConstantServiceGrpcTransport])
def test_topic_constant_service_transport_channel_mtls_with_client_cert_source(
transport_class
):
with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred:
with mock.patch.object(transport_class, "create_channel", autospec=True) as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = ga_credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(google.auth, 'default') as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=(
'https://www.googleapis.com/auth/adwords',
),
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
@pytest.mark.parametrize("transport_class", [transports.TopicConstantServiceGrpcTransport,])
def test_topic_constant_service_transport_channel_mtls_with_adc(
transport_class
):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(transport_class, "create_channel", autospec=True) as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=(
'https://www.googleapis.com/auth/adwords',
),
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_topic_constant_path():
topic_constant = "squid"
expected = "topicConstants/{topic_constant}".format(topic_constant=topic_constant, )
actual = TopicConstantServiceClient.topic_constant_path(topic_constant)
assert expected == actual
def test_parse_topic_constant_path():
expected = {
"topic_constant": "clam",
}
path = TopicConstantServiceClient.topic_constant_path(**expected)
# Check that the path construction is reversible.
actual = TopicConstantServiceClient.parse_topic_constant_path(path)
assert expected == actual
def test_common_billing_account_path():
billing_account = "whelk"
expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, )
actual = TopicConstantServiceClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "octopus",
}
path = TopicConstantServiceClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = TopicConstantServiceClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "oyster"
expected = "folders/{folder}".format(folder=folder, )
actual = TopicConstantServiceClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "nudibranch",
}
path = TopicConstantServiceClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = TopicConstantServiceClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "cuttlefish"
expected = "organizations/{organization}".format(organization=organization, )
actual = TopicConstantServiceClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "mussel",
}
path = TopicConstantServiceClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = TopicConstantServiceClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "winkle"
expected = "projects/{project}".format(project=project, )
actual = TopicConstantServiceClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "nautilus",
}
path = TopicConstantServiceClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = TopicConstantServiceClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "scallop"
location = "abalone"
expected = "projects/{project}/locations/{location}".format(project=project, location=location, )
actual = TopicConstantServiceClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "squid",
"location": "clam",
}
path = TopicConstantServiceClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = TopicConstantServiceClient.parse_common_location_path(path)
assert expected == actual
def test_client_withDEFAULT_CLIENT_INFO():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(transports.TopicConstantServiceTransport, '_prep_wrapped_messages') as prep:
client = TopicConstantServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(transports.TopicConstantServiceTransport, '_prep_wrapped_messages') as prep:
transport_class = TopicConstantServiceClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(),
client_info=client_info,
)
prep.assert_called_once_with(client_info)
|
[
"bazel-bot-development[bot]@users.noreply.github.com"
] |
bazel-bot-development[bot]@users.noreply.github.com
|
e65a195d861dbd5a95fad58ad3981875fc4713b5
|
86335a0ba622ffc1ef9392fa45190123599c92de
|
/ecpy_pulses/infos.py
|
d37602a9eb241dc1d80c8259df0bc937201c0482
|
[
"BSD-3-Clause"
] |
permissive
|
PhilipVinc/ecpy_pulses
|
4e75d2fc4a977ec1f80761609412b453451f967d
|
3ca72e5739e36ac203381ca6ed46a5b18184bd7c
|
refs/heads/master
| 2021-01-17T22:27:44.395230 | 2016-06-08T16:16:59 | 2016-06-08T16:16:59 | 51,396,985 | 1 | 0 | null | 2016-02-09T20:23:16 | 2016-02-09T20:23:15 |
Python
|
UTF-8
|
Python
| false | false | 3,162 |
py
|
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright 2015 by Ecpy Authors, see AUTHORS for more details.
#
# Distributed under the terms of the BSD license.
#
# The full license is in the file LICENCE, distributed with this software.
# -----------------------------------------------------------------------------
"""Objects used to store filters, sequences and configs in the manager.
"""
from __future__ import (division, unicode_literals, print_function,
absolute_import)
from atom.api import Atom, Subclass, Dict, Coerced
import enaml
from .sequences.base_sequences import AbstractSequence
from .configs.base_config import AbstractConfig
from .contexts.base_context import BaseContext
from .shapes.base_shapes import AbstractShape
from .pulse import Pulse
with enaml.imports():
from .sequences.views.abstract_sequence_view import AbstractSequenceView
from .configs.base_config_views import AbstractConfigView
from .shapes.views.base_shapes_views import AbstractShapeView
from .contexts.views.base_context_view import BaseContextView
from .pulse_view import PulseView
class ObjectDependentInfos(Atom):
""" Base info object for everything with dependencies.
"""
#: Runtime dependencies ids of this object.
dependencies = Coerced(set, ())
class SequenceInfos(ObjectDependentInfos):
"""An object used to store the informations about a sequence.
"""
#: Class representing this sequence.
cls = Subclass(AbstractSequence)
#: Widget associated with this sequence.
view = Subclass(AbstractSequenceView)
#: Metadata associated with this sequence such as group, looping
#: capabilities, etc
metadata = Dict()
class PulseInfos(ObjectDependentInfos):
"""An object used to store the informations about a sequence.
"""
#: Class representing this pulse.
cls = Subclass(Pulse)
#: Widget associated with this pulse.
view = Subclass(PulseView)
#: Metadata associated with this sequence such as group, looping
#: capabilities, etc
metadata = Dict()
class ConfigInfos(Atom):
"""An object used to store the informations about a sequence configurer.
"""
#: Class representing this configurer.
cls = Subclass(AbstractConfig)
#: Widget associated with this configurer.
view = Subclass(AbstractConfigView)
class ContextInfos(ObjectDependentInfos):
"""Object used to store informations about a Context, declared in a manifest.
"""
#: Class representing this context.
cls = Subclass(BaseContext)
#: Widget associated with this context.
view = Subclass(BaseContextView)
#: Metadata associated with this context such as who knows what.
metadata = Dict()
class ShapeInfos(ObjectDependentInfos):
"""Object used to store informations about a shape.
"""
#: Class representing this Shape.
cls = Subclass(AbstractShape)
#: Widget associated with this Shape.
view = Subclass(AbstractShapeView)
#: Metadata associated with this shape such as I have no idea what.
metadata = Dict()
|
[
"[email protected]"
] | |
2ccdbae7171dde011530efd66c5e27234901063d
|
d1aa9dc649209d2172c01f19f5121261fb5d6e9e
|
/Monitoring/Monitor/Monitor/Monitor_process.py
|
44d11a5abccb11b471d11af3a6b449ed6d0003cd
|
[] |
no_license
|
Alexanderklau/Amusing_python
|
484e97806bc45ecbe0220f899723fa091a0f088b
|
9ce288eac7eeabb0e21f62936b6eb5ac2a0c934e
|
refs/heads/master
| 2021-12-27T03:33:37.535288 | 2021-12-20T08:00:11 | 2021-12-20T08:00:11 | 107,672,208 | 45 | 11 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,174 |
py
|
# coding: utf-8
__author__ = 'lau.wenbo'
"""
监控分为持续监控和自定义监控
持续监控会每60s统计出占比前十的进程,不停的将其打入日志
自定义监控可以自定监控的频率,监控指定进程,打印所需要的数据
例如固定进程的CPU,内存,线程占用等
"""
import sys
sys.path.append("..")
from Check import check_cpu, check_memory, check_process, check_threading
from Log import monitor_log
import getopt
import json
import time
f = open("/Monitor/setting.json", "r")
setting = json.load(f)
cpu_max = float(setting["CPU_max"])
memeory_max = float(setting["Memory_max"])
check_time = setting["time"]
def run_process_have():
return check_threading.process_have(cpu_max, memeory_max)
def run_check_process(name):
return check_process.get_process(name)
def run_check_process_thread(name):
return check_process.get_process(name)
def run_get_cpu():
return check_cpu.get_cpu_none()
def run_get_memory():
return check_memory.get_memory()
def run_get_cpu_have():
return check_cpu.get_cpu_have(cpu_max)
def run_get_memory_have():
return check_memory.get_memory_have(memeory_max)
|
[
"[email protected]"
] | |
d36e70a597876d7138fd5080d2710fbd0b07c764
|
3d7039903da398ae128e43c7d8c9662fda77fbdf
|
/database/Vue.js/juejin_1573.py
|
1e88def985b6e162a4a4fd1d2942a6a64c47e394
|
[] |
no_license
|
ChenYongChang1/spider_study
|
a9aa22e6ed986193bf546bb567712876c7be5e15
|
fe5fbc1a5562ff19c70351303997d3df3af690db
|
refs/heads/master
| 2023-08-05T10:43:11.019178 | 2021-09-18T01:30:22 | 2021-09-18T01:30:22 | 406,727,214 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 66,303 |
py
|
{"err_no": 0, "err_msg": "success", "data": [{"article_id": "6947326788917952520", "article_info": {"article_id": "6947326788917952520", "user_id": "1380642337065421", "category_id": "6809637767543259144", "tag_ids": [6809640369764958215], "visible_level": 0, "link_url": "", "cover_image": "", "is_gfw": 0, "title": "提升写代码效率:快速通过DOM元素打开对应的Vue文件", "brief_content": "您好,我是沧沧凉凉,是一名前端开发者,目前在掘金、知乎以及个人博客上同步发表一些学习前端时遇到的趣事和知识,欢迎关注。 我就时常会遇到这样的问题,有时候找一个模块对应的文件需要花上1分钟,但是看了本篇文章后你就会习得点击页面上对应的元素,一键在编辑器中打开它对应的Vue文件。 …", "is_english": 0, "is_original": 1, "user_index": 7.866426416027291, "original_type": 0, "original_author": "", "content": "", "ctime": "1617550719", "mtime": "1617593107", "rtime": "1617593107", "draft_id": "6947326268245278751", "view_count": 874, "collect_count": 18, "digg_count": 23, "comment_count": 6, "hot_index": 72, "is_hot": 0, "rank_index": 0.00170894, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "1380642337065421", "user_name": "沧沧凉凉", "company": "", "job_title": "努力的咸鱼", "avatar_large": "https://sf1-ttcdn-tos.pstatp.com/img/user-avatar/1fa95b32722656014ecc7a196ab086a7~300x300.image", "level": 2, "description": "", "followee_count": 13, "follower_count": 37, "post_article_count": 24, "digg_article_count": 101, "got_digg_count": 384, "got_view_count": 21726, "post_shortmsg_count": 0, "digg_shortmsg_count": 0, "isfollowed": false, "favorable_author": 0, "power": 601, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546498, "tag_id": "6809640369764958215", "tag_name": "Vue.js", "color": "#41B883", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/7b5c3eb591b671749fee.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432234520, "mtime": 1631692660, "id_type": 9, "tag_alias": "", "post_article_count": 31256, "concern_user_count": 313520}], "user_interact": {"id": 6947326788917952520, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "202109151604080102121660771300574F"}, {"article_id": "6844903679636553736", "article_info": {"article_id": "6844903679636553736", "user_id": "1380642333405501", "category_id": "6809637767543259144", "tag_ids": [6809640369764958215, 6809640407484334093, 6809640501776482317], "visible_level": 0, "link_url": "https://juejin.im/post/6844903679636553736", "cover_image": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/gold-user-assets/2019/5/9/16a9c5f7397abe35~tplv-t2oaga2asx-image.image", "is_gfw": 0, "title": "《从零构建前后分离的web项目》实战 -5分钟快速构建炒鸡规范的VUE项目骨架", "brief_content": "我通常使用 cli 生成项目骨架再在之基础上进行个人修改。 顾名思义 XXX-CLI 就是使用命令行生成的 XXX 程序。之前写过一款 基于 nodeJs 制作个性 CLI 的教程 如何用node开发自己的cli工具并发布到NPM , 想详细了解制作流程的可以简单看看。 鉴于使…", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1536910508", "mtime": "1599618922", "rtime": "1536910508", "draft_id": "6845075614521638925", "view_count": 8902, "collect_count": 212, "digg_count": 485, "comment_count": 23, "hot_index": 953, "is_hot": 0, "rank_index": 0.00170888, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "1380642333405501", "user_name": "庄文达", "company": "zinglabs", "job_title": "full-stack", "avatar_large": "https://sf6-ttcdn-tos.pstatp.com/img/user-avatar/899fa5eb488b678ed360ddad7df0b49f~300x300.image", "level": 4, "description": "don't know What should be said", "followee_count": 26, "follower_count": 4095, "post_article_count": 27, "digg_article_count": 193, "got_digg_count": 3574, "got_view_count": 104487, "post_shortmsg_count": 11, "digg_shortmsg_count": 16, "isfollowed": false, "favorable_author": 1, "power": 5178, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546498, "tag_id": "6809640369764958215", "tag_name": "Vue.js", "color": "#41B883", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/7b5c3eb591b671749fee.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432234520, "mtime": 1631692660, "id_type": 9, "tag_alias": "", "post_article_count": 31256, "concern_user_count": 313520}, {"id": 2546526, "tag_id": "6809640407484334093", "tag_name": "前端", "color": "#60ADFF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/bac28828a49181c34110.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 1, "ctime": 1435971546, "mtime": 1631692835, "id_type": 9, "tag_alias": "", "post_article_count": 88828, "concern_user_count": 527704}, {"id": 2546594, "tag_id": "6809640501776482317", "tag_name": "架构", "color": "#C679FF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/f27d811ad7e2b2a0bc24.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1439515816, "mtime": 1631692674, "id_type": 9, "tag_alias": "", "post_article_count": 10508, "concern_user_count": 338797}], "user_interact": {"id": 6844903679636553736, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "202109151604080102121660771300574F"}, {"article_id": "6988340121854541838", "article_info": {"article_id": "6988340121854541838", "user_id": "1108772363960590", "category_id": "6809637767543259144", "tag_ids": [6809640369764958215], "visible_level": 0, "link_url": "", "cover_image": "", "is_gfw": 0, "title": "vue CLI 手动创建项目 vue create xxx", "brief_content": "使用 vue CLI 提供的命令,创建一个叫“vue-unit-test-demo”的项目, 默认选择在: Default ([Vue 2] babel, eslint) 默认勾选了: Choose ", "is_english": 0, "is_original": 1, "user_index": 1.49491483101579, "original_type": 0, "original_author": "", "content": "", "ctime": "1627099900", "mtime": "1627100693", "rtime": "1627100693", "draft_id": "6988330312325398535", "view_count": 264, "collect_count": 0, "digg_count": 2, "comment_count": 2, "hot_index": 17, "is_hot": 0, "rank_index": 0.00170516, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "1108772363960590", "user_name": "DandS", "company": "", "job_title": "", "avatar_large": "https://sf3-ttcdn-tos.pstatp.com/img/mosaic-legacy/3791/5035712059~300x300.image", "level": 1, "description": "前端,积硅步,致千里,荀子-劝学。", "followee_count": 0, "follower_count": 1, "post_article_count": 9, "digg_article_count": 11, "got_digg_count": 20, "got_view_count": 794, "post_shortmsg_count": 0, "digg_shortmsg_count": 0, "isfollowed": false, "favorable_author": 0, "power": 27, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546498, "tag_id": "6809640369764958215", "tag_name": "Vue.js", "color": "#41B883", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/7b5c3eb591b671749fee.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432234520, "mtime": 1631692660, "id_type": 9, "tag_alias": "", "post_article_count": 31256, "concern_user_count": 313520}], "user_interact": {"id": 6988340121854541838, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "202109151604080102121660771300574F"}, {"article_id": "6999015025708892167", "article_info": {"article_id": "6999015025708892167", "user_id": "1380642333672440", "category_id": "6809637767543259144", "tag_ids": [6809640369764958215], "visible_level": 0, "link_url": "", "cover_image": "https://p9-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/362c9d07ebfe49539415abe1e63917c8~tplv-k3u1fbpfcp-watermark.image", "is_gfw": 0, "title": "回首Vue3之API篇(九)", "brief_content": "这是我参与8月更文挑战的第22天,活动详情查看:8月更文挑战 这篇文章我们来讲一下refs的使用,以及我们需要注意的地方。 如何使用 ref 在之前的很多文章都提到过ref,ref 对象具有指向内部值", "is_english": 0, "is_original": 1, "user_index": 1.49491483101579, "original_type": 0, "original_author": "", "content": "", "ctime": "1629585844", "mtime": "1629948456", "rtime": "1629620828", "draft_id": "6997669422055833630", "view_count": 63, "collect_count": 0, "digg_count": 2, "comment_count": 0, "hot_index": 5, "is_hot": 0, "rank_index": 0.00170358, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "1380642333672440", "user_name": "也笑", "company": "", "job_title": "前端可视化工程师", "avatar_large": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/gold-user-assets/2017/12/2/160151b374bfe220~tplv-t2oaga2asx-image.image", "level": 1, "description": "清风拂心尘,笑度每一分。", "followee_count": 4, "follower_count": 6, "post_article_count": 36, "digg_article_count": 8, "got_digg_count": 45, "got_view_count": 3496, "post_shortmsg_count": 7, "digg_shortmsg_count": 17, "isfollowed": false, "favorable_author": 0, "power": 79, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546498, "tag_id": "6809640369764958215", "tag_name": "Vue.js", "color": "#41B883", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/7b5c3eb591b671749fee.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432234520, "mtime": 1631692660, "id_type": 9, "tag_alias": "", "post_article_count": 31256, "concern_user_count": 313520}], "user_interact": {"id": 6999015025708892167, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "202109151604080102121660771300574F"}, {"article_id": "6860430490302677006", "article_info": {"article_id": "6860430490302677006", "user_id": "2612095359650712", "category_id": "6809637767543259144", "tag_ids": [6809640402103042061, 6809640369764958215], "visible_level": 0, "link_url": "", "cover_image": "https://p6-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/c39f2380f69b4cc99e3f86480905c313~tplv-k3u1fbpfcp-zoom-1.image", "is_gfw": 0, "title": "Vue执行流程分析", "brief_content": "一切都显得那么自然。不过在百忙之中是否有小伙伴想过,一个小小的Vue实例怎么有这么大的能量,竟然可以构建出如此复杂的前端项目。那么Vue内部是如何运转的呢,做了哪些事情呢,从今天开始跟着我一探究竟。 从Vue的构造函数中可以看到,当我们执行new Vue()的时候,只执行了一个…", "is_english": 0, "is_original": 1, "user_index": 12.975127760193908, "original_type": 0, "original_author": "", "content": "", "ctime": "1597318734", "mtime": "1597318904", "rtime": "1597318904", "draft_id": "6860388246208610311", "view_count": 3882, "collect_count": 49, "digg_count": 44, "comment_count": 4, "hot_index": 241, "is_hot": 0, "rank_index": 0.0017033, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "2612095359650712", "user_name": "快狗打车前端团队", "company": "前端论道", "job_title": "微信公众号", "avatar_large": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/gold-user-assets/2019/3/20/1699a796a1919f2f~tplv-t2oaga2asx-image.image", "level": 5, "description": "专注前端技术分享,定期推送高质量文章,欢迎关注点赞", "followee_count": 5, "follower_count": 10423, "post_article_count": 71, "digg_article_count": 72, "got_digg_count": 9629, "got_view_count": 528720, "post_shortmsg_count": 0, "digg_shortmsg_count": 0, "isfollowed": false, "favorable_author": 1, "power": 14916, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546522, "tag_id": "6809640402103042061", "tag_name": "前端框架", "color": "#F2AB5B", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/f7a198f1e1aeb6d79878.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1435964339, "mtime": 1631690383, "id_type": 9, "tag_alias": "", "post_article_count": 4037, "concern_user_count": 256973}, {"id": 2546498, "tag_id": "6809640369764958215", "tag_name": "Vue.js", "color": "#41B883", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/7b5c3eb591b671749fee.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432234520, "mtime": 1631692660, "id_type": 9, "tag_alias": "", "post_article_count": 31256, "concern_user_count": 313520}], "user_interact": {"id": 6860430490302677006, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "202109151604080102121660771300574F"}, {"article_id": "6898498514813190158", "article_info": {"article_id": "6898498514813190158", "user_id": "483440843559406", "category_id": "6809637767543259144", "tag_ids": [6809640369764958215, 6809640407484334093], "visible_level": 0, "link_url": "", "cover_image": "https://p9-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/e913c28a91334157bb5e9672a95d7bcd~tplv-k3u1fbpfcp-watermark.image", "is_gfw": 0, "title": "Vue3 也能用上 Vue2 组件 From EMP", "brief_content": "上篇文章讨论完怎么在 React 和 Vue 互相调用之后,我们发现了新的需求。 升级 Vue3 之后,因为底层的 render 函数被重写,导致 Vue2 现有丰富并完善的组件不能直接在 Vue3 上使用。因此 EMP 针对这个问题,提出了 Vue3 调用 Vue2 的方案,…", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1606181855", "mtime": "1606197244", "rtime": "1606197244", "draft_id": "6898495140751491080", "view_count": 2636, "collect_count": 11, "digg_count": 29, "comment_count": 12, "hot_index": 172, "is_hot": 0, "rank_index": 0.00170091, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "483440843559406", "user_name": "Efox", "company": "百度", "job_title": "Developer", "avatar_large": "https://sf1-ttcdn-tos.pstatp.com/img/user-avatar/f6a6e5b733365fc53c95a1d1da971cc9~300x300.image", "level": 3, "description": "百度 YY GFE", "followee_count": 15, "follower_count": 1775, "post_article_count": 74, "digg_article_count": 171, "got_digg_count": 1569, "got_view_count": 211912, "post_shortmsg_count": 0, "digg_shortmsg_count": 3, "isfollowed": false, "favorable_author": 0, "power": 3867, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546498, "tag_id": "6809640369764958215", "tag_name": "Vue.js", "color": "#41B883", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/7b5c3eb591b671749fee.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432234520, "mtime": 1631692660, "id_type": 9, "tag_alias": "", "post_article_count": 31256, "concern_user_count": 313520}, {"id": 2546526, "tag_id": "6809640407484334093", "tag_name": "前端", "color": "#60ADFF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/bac28828a49181c34110.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 1, "ctime": 1435971546, "mtime": 1631692835, "id_type": 9, "tag_alias": "", "post_article_count": 88828, "concern_user_count": 527704}], "user_interact": {"id": 6898498514813190158, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "202109151604080102121660771300574F"}, {"article_id": "6844903942321602568", "article_info": {"article_id": "6844903942321602568", "user_id": "1978776661792407", "category_id": "6809637767543259144", "tag_ids": [6809640369764958215], "visible_level": 0, "link_url": "https://juejin.im/post/6844903942321602568", "cover_image": "", "is_gfw": 0, "title": "撸一个 Vue 自定义指令实现一键 Copy的功能", "brief_content": "再按照惯例,大家 ( 假装 ) 看不懂,然后我来举个栗子解释一番。。。好,还是不知所云,本文结束 ( Ctrl + F4 ) ,下一篇。 为了避免上述情况出现,就不解释了。实际上官方提供了很多内置指令,如:v-if、v-for、v-bind and so on。每一个指令都有自…", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1568596887", "mtime": "1606692359", "rtime": "1568599205", "draft_id": "6845076460978307085", "view_count": 6899, "collect_count": 240, "digg_count": 189, "comment_count": 26, "hot_index": 559, "is_hot": 0, "rank_index": 0.00170077, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "1978776661792407", "user_name": "Essentric", "company": "", "job_title": "", "avatar_large": "https://sf3-ttcdn-tos.pstatp.com/img/user-avatar/c4319fb5a654e657254df8a7a3e7ba7d~300x300.image", "level": 2, "description": "", "followee_count": 19, "follower_count": 103, "post_article_count": 3, "digg_article_count": 52, "got_digg_count": 404, "got_view_count": 19622, "post_shortmsg_count": 2, "digg_shortmsg_count": 0, "isfollowed": false, "favorable_author": 0, "power": 600, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546498, "tag_id": "6809640369764958215", "tag_name": "Vue.js", "color": "#41B883", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/7b5c3eb591b671749fee.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432234520, "mtime": 1631692660, "id_type": 9, "tag_alias": "", "post_article_count": 31256, "concern_user_count": 313520}], "user_interact": {"id": 6844903942321602568, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "202109151604080102121660771300574F"}, {"article_id": "6967234522614071310", "article_info": {"article_id": "6967234522614071310", "user_id": "1037564168896600", "category_id": "6809637767543259144", "tag_ids": [6809640369764958215, 6809640407484334093], "visible_level": 0, "link_url": "", "cover_image": "", "is_gfw": 0, "title": "vue自定义组件实现 v-model双向绑定数据", "brief_content": "项目中会遇到自定义公共组件供项目调用,正常情况可以使用 props定义参数接收父组件传的参数,然后通过子组件的$emits()方法回传数据给父组件。 类似如下: 父组件 子组件 但是这种写法需要调用公", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1622185714", "mtime": "1622452608", "rtime": "1622452608", "draft_id": "6967234270238769182", "view_count": 735, "collect_count": 1, "digg_count": 8, "comment_count": 2, "hot_index": 46, "is_hot": 0, "rank_index": 0.0017005, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "1037564168896600", "user_name": "折月", "company": "", "job_title": "", "avatar_large": "https://sf3-ttcdn-tos.pstatp.com/img/mosaic-legacy/3791/5035712059~300x300.image", "level": 1, "description": "", "followee_count": 0, "follower_count": 1, "post_article_count": 21, "digg_article_count": 1, "got_digg_count": 18, "got_view_count": 3038, "post_shortmsg_count": 0, "digg_shortmsg_count": 0, "isfollowed": false, "favorable_author": 0, "power": 48, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546498, "tag_id": "6809640369764958215", "tag_name": "Vue.js", "color": "#41B883", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/7b5c3eb591b671749fee.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432234520, "mtime": 1631692660, "id_type": 9, "tag_alias": "", "post_article_count": 31256, "concern_user_count": 313520}, {"id": 2546526, "tag_id": "6809640407484334093", "tag_name": "前端", "color": "#60ADFF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/bac28828a49181c34110.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 1, "ctime": 1435971546, "mtime": 1631692835, "id_type": 9, "tag_alias": "", "post_article_count": 88828, "concern_user_count": 527704}], "user_interact": {"id": 6967234522614071310, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "202109151604080102121660771300574F"}, {"article_id": "6979127906807054366", "article_info": {"article_id": "6979127906807054366", "user_id": "2172290705137415", "category_id": "6809637767543259144", "tag_ids": [6809640369764958215, 6809640407484334093], "visible_level": 0, "link_url": "", "cover_image": "", "is_gfw": 0, "title": "element-ui表单的多层循环验证处理", "brief_content": "当时在使用element-ui开发后台项目的时候,遇到表单中的数组数据字段需要验证,试了好几种写法都没能弄出来,甚至当时都没想到该怎么描述去百度查找别人的经验参考一下,现在我将这个现象记录下来,方便自", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1624954914", "mtime": "1625042874", "rtime": "1625042874", "draft_id": "6979115636576223262", "view_count": 425, "collect_count": 2, "digg_count": 4, "comment_count": 5, "hot_index": 30, "is_hot": 0, "rank_index": 0.00170016, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "2172290705137415", "user_name": "wangpq", "company": "", "job_title": "web前端工程师", "avatar_large": "https://sf6-ttcdn-tos.pstatp.com/img/user-avatar/cd90e0a0cdb37d475b49830abe055f79~300x300.image", "level": 1, "description": "", "followee_count": 5, "follower_count": 3, "post_article_count": 4, "digg_article_count": 0, "got_digg_count": 15, "got_view_count": 1253, "post_shortmsg_count": 0, "digg_shortmsg_count": 0, "isfollowed": false, "favorable_author": 0, "power": 27, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546498, "tag_id": "6809640369764958215", "tag_name": "Vue.js", "color": "#41B883", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/7b5c3eb591b671749fee.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432234520, "mtime": 1631692660, "id_type": 9, "tag_alias": "", "post_article_count": 31256, "concern_user_count": 313520}, {"id": 2546526, "tag_id": "6809640407484334093", "tag_name": "前端", "color": "#60ADFF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/bac28828a49181c34110.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 1, "ctime": 1435971546, "mtime": 1631692835, "id_type": 9, "tag_alias": "", "post_article_count": 88828, "concern_user_count": 527704}], "user_interact": {"id": 6979127906807054366, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "202109151604080102121660771300574F"}, {"article_id": "6991844363055661063", "article_info": {"article_id": "6991844363055661063", "user_id": "474636479897303", "category_id": "6809637767543259144", "tag_ids": [6809640369764958215], "visible_level": 0, "link_url": "", "cover_image": "", "is_gfw": 0, "title": "vue的变化侦测-Array篇| 8月更文挑战", "brief_content": "vue的变化侦测-Array篇 上次我们说过Object数据的变化侦测。Object的变化侦测主要依赖的是Object原型上的方法Object.defineProperty来监听其中的get和set方", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1627915637", "mtime": "1628084175", "rtime": "1627962417", "draft_id": "6991841158993281037", "view_count": 171, "collect_count": 0, "digg_count": 5, "comment_count": 1, "hot_index": 14, "is_hot": 0, "rank_index": 0.00169968, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "474636479897303", "user_name": "敲代码有瘾", "company": "", "job_title": "前端", "avatar_large": "https://sf3-ttcdn-tos.pstatp.com/img/user-avatar/e1d7446b60cec3cceb23431f49737188~300x300.image", "level": 1, "description": "敲代码有瘾", "followee_count": 9, "follower_count": 6, "post_article_count": 7, "digg_article_count": 35, "got_digg_count": 30, "got_view_count": 665, "post_shortmsg_count": 0, "digg_shortmsg_count": 0, "isfollowed": false, "favorable_author": 0, "power": 36, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546498, "tag_id": "6809640369764958215", "tag_name": "Vue.js", "color": "#41B883", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/7b5c3eb591b671749fee.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432234520, "mtime": 1631692660, "id_type": 9, "tag_alias": "", "post_article_count": 31256, "concern_user_count": 313520}], "user_interact": {"id": 6991844363055661063, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "202109151604080102121660771300574F"}, {"article_id": "6982958703179726861", "article_info": {"article_id": "6982958703179726861", "user_id": "360295547023176", "category_id": "6809637767543259144", "tag_ids": [6809640369764958215, 6809640407484334093], "visible_level": 0, "link_url": "", "cover_image": "", "is_gfw": 0, "title": "学完 Vue3、TypeScript 干什么,先来一个\"网抑云\"", "brief_content": "1、前言 没错又是仿网易云,那么多了网易云项目了还写?纯粹是为了学习罢了。 之前学习的 Vue3、Vite2、TypeScript 一直没有新项目可用,控制不住自己的小手了必须写写,也为了要看源码熟悉", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1625846812", "mtime": "1626076799", "rtime": "1626076799", "draft_id": "6982947725515948068", "view_count": 347, "collect_count": 4, "digg_count": 6, "comment_count": 1, "hot_index": 24, "is_hot": 0, "rank_index": 0.0016996, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "360295547023176", "user_name": "haiweilian", "company": "", "job_title": "前端", "avatar_large": "https://sf3-ttcdn-tos.pstatp.com/img/user-avatar/727635eb93cf868532159fc0b31a1f70~300x300.image", "level": 1, "description": "大前端开发", "followee_count": 12, "follower_count": 8, "post_article_count": 19, "digg_article_count": 29, "got_digg_count": 47, "got_view_count": 1977, "post_shortmsg_count": 1, "digg_shortmsg_count": 1, "isfollowed": false, "favorable_author": 0, "power": 66, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546498, "tag_id": "6809640369764958215", "tag_name": "Vue.js", "color": "#41B883", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/7b5c3eb591b671749fee.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432234520, "mtime": 1631692660, "id_type": 9, "tag_alias": "", "post_article_count": 31256, "concern_user_count": 313520}, {"id": 2546526, "tag_id": "6809640407484334093", "tag_name": "前端", "color": "#60ADFF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/bac28828a49181c34110.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 1, "ctime": 1435971546, "mtime": 1631692835, "id_type": 9, "tag_alias": "", "post_article_count": 88828, "concern_user_count": 527704}], "user_interact": {"id": 6982958703179726861, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "202109151604080102121660771300574F"}, {"article_id": "6987947238873907231", "article_info": {"article_id": "6987947238873907231", "user_id": "1267110542837726", "category_id": "6809637767543259144", "tag_ids": [6809640369764958215, 6809640407484334093], "visible_level": 0, "link_url": "", "cover_image": "", "is_gfw": 0, "title": "Vue自带的组件keep-alive", "brief_content": "Vue中的所有的组件都是有声明周期的,从出生到消亡,一般要经历8个声明周期或者6个,声明周期就是钩子函数。 出现的原因和使用的场景 如果有一个组件我要重复使用,但是我要重复切换组件,那这个组件就不断的", "is_english": 0, "is_original": 1, "user_index": 1.48514727978413, "original_type": 0, "original_author": "", "content": "", "ctime": "1627008260", "mtime": "1627271517", "rtime": "1627271517", "draft_id": "6987947070397087752", "view_count": 204, "collect_count": 1, "digg_count": 5, "comment_count": 1, "hot_index": 16, "is_hot": 0, "rank_index": 0.00169941, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "1267110542837726", "user_name": "前端_ZLB", "company": "", "job_title": "", "avatar_large": "https://sf1-ttcdn-tos.pstatp.com/img/user-avatar/56a409021bf987376f220b475cd7a024~300x300.image", "level": 2, "description": "", "followee_count": 12, "follower_count": 12, "post_article_count": 31, "digg_article_count": 36, "got_digg_count": 85, "got_view_count": 6207, "post_shortmsg_count": 0, "digg_shortmsg_count": 2, "isfollowed": false, "favorable_author": 0, "power": 147, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546498, "tag_id": "6809640369764958215", "tag_name": "Vue.js", "color": "#41B883", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/7b5c3eb591b671749fee.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432234520, "mtime": 1631692660, "id_type": 9, "tag_alias": "", "post_article_count": 31256, "concern_user_count": 313520}, {"id": 2546526, "tag_id": "6809640407484334093", "tag_name": "前端", "color": "#60ADFF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/bac28828a49181c34110.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 1, "ctime": 1435971546, "mtime": 1631692835, "id_type": 9, "tag_alias": "", "post_article_count": 88828, "concern_user_count": 527704}], "user_interact": {"id": 6987947238873907231, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "202109151604080102121660771300574F"}, {"article_id": "6981329984811958280", "article_info": {"article_id": "6981329984811958280", "user_id": "3386151544826551", "category_id": "6809637767543259144", "tag_ids": [6809640369764958215, 6809640629728083981, 6809640407484334093], "visible_level": 0, "link_url": "", "cover_image": "", "is_gfw": 0, "title": "vue-cli中配置使用Vuex", "brief_content": "在vue开发用我们常常会用到一些全局的数据,比如用户信息、用户权限、一些状态等等。我们传统的数据都是单向的,所以我们得一层一层的传递,这样我们遇到一个多组件共享一个数据的时候,vuex是个不错选择。", "is_english": 0, "is_original": 1, "user_index": 7.508716229440013, "original_type": 0, "original_author": "", "content": "", "ctime": "1625467606", "mtime": "1625470547", "rtime": "1625470547", "draft_id": "6976119337765601317", "view_count": 313, "collect_count": 0, "digg_count": 3, "comment_count": 2, "hot_index": 20, "is_hot": 0, "rank_index": 0.00169933, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "3386151544826551", "user_name": "MarJet", "company": "", "job_title": "前端开发", "avatar_large": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/gold-user-assets/2019/4/5/169ed0ec64ec01b2~tplv-t2oaga2asx-image.image", "level": 2, "description": "不断在前端领域探索中...........................", "followee_count": 32, "follower_count": 18, "post_article_count": 6, "digg_article_count": 225, "got_digg_count": 40, "got_view_count": 9860, "post_shortmsg_count": 0, "digg_shortmsg_count": 7, "isfollowed": false, "favorable_author": 0, "power": 138, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546498, "tag_id": "6809640369764958215", "tag_name": "Vue.js", "color": "#41B883", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/7b5c3eb591b671749fee.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432234520, "mtime": 1631692660, "id_type": 9, "tag_alias": "", "post_article_count": 31256, "concern_user_count": 313520}, {"id": 2546687, "tag_id": "6809640629728083981", "tag_name": "Vuex", "color": "#000000", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/134df42926494412054c.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1462324321, "mtime": 1631676264, "id_type": 9, "tag_alias": "", "post_article_count": 1620, "concern_user_count": 87962}, {"id": 2546526, "tag_id": "6809640407484334093", "tag_name": "前端", "color": "#60ADFF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/bac28828a49181c34110.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 1, "ctime": 1435971546, "mtime": 1631692835, "id_type": 9, "tag_alias": "", "post_article_count": 88828, "concern_user_count": 527704}], "user_interact": {"id": 6981329984811958280, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "202109151604080102121660771300574F"}, {"article_id": "7000927351039066125", "article_info": {"article_id": "7000927351039066125", "user_id": "1460592760848989", "category_id": "6809637767543259144", "tag_ids": [6809640407484334093, 6809640369764958215], "visible_level": 0, "link_url": "", "cover_image": "", "is_gfw": 0, "title": "vue 中虚拟dom的理解", "brief_content": "这是我参与8月更文挑战的第27天,活动详情查看:8月更文挑战 虚拟dom 频繁且复杂的dom操作通常是前端性能瓶颈的产生点,Vue提供了虚拟dom的解决办法 虚拟的DOM的核心思想是:对复杂的文档D", "is_english": 0, "is_original": 1, "user_index": 0.733112461525629, "original_type": 0, "original_author": "", "content": "", "ctime": "1630030464", "mtime": "1630055756", "rtime": "1630055756", "draft_id": "6999890227032490015", "view_count": 95, "collect_count": 0, "digg_count": 0, "comment_count": 0, "hot_index": 4, "is_hot": 0, "rank_index": 0.00169809, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "1460592760848989", "user_name": "多喝热水__", "company": "", "job_title": "前端搬砖工程师", "avatar_large": "https://sf6-ttcdn-tos.pstatp.com/img/user-avatar/1040d1e1af762b0cacf560cd0f0fc7ab~300x300.image", "level": 1, "description": "", "followee_count": 0, "follower_count": 0, "post_article_count": 31, "digg_article_count": 18, "got_digg_count": 26, "got_view_count": 1612, "post_shortmsg_count": 1, "digg_shortmsg_count": 1, "isfollowed": false, "favorable_author": 0, "power": 42, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546526, "tag_id": "6809640407484334093", "tag_name": "前端", "color": "#60ADFF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/bac28828a49181c34110.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 1, "ctime": 1435971546, "mtime": 1631692835, "id_type": 9, "tag_alias": "", "post_article_count": 88828, "concern_user_count": 527704}, {"id": 2546498, "tag_id": "6809640369764958215", "tag_name": "Vue.js", "color": "#41B883", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/7b5c3eb591b671749fee.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432234520, "mtime": 1631692660, "id_type": 9, "tag_alias": "", "post_article_count": 31256, "concern_user_count": 313520}], "user_interact": {"id": 7000927351039066125, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "202109151604080102121660771300574F"}, {"article_id": "6844904195758243848", "article_info": {"article_id": "6844904195758243848", "user_id": "2348212570298807", "category_id": "6809637767543259144", "tag_ids": [6809640369764958215], "visible_level": 0, "link_url": "https://juejin.im/post/6844904195758243848", "cover_image": "", "is_gfw": 0, "title": "vue中使用vue-socket.io的一些心得、踩坑记录", "brief_content": "vue项目中有使用到socket长连接,之前一直都是都是使用vue-socke.io[github地址],但最近在使用时,出了一些莫名奇妙的bug,也是为此掉了不少头发,最后还是解决了。关于socket相关内容介绍以及使用场景,这里不会做太多介绍(主要是懒),可以翻看其他文章。…", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1592642740", "mtime": "1599035093", "rtime": "1592658088", "draft_id": "6845076830844616711", "view_count": 5413, "collect_count": 30, "digg_count": 22, "comment_count": 7, "hot_index": 299, "is_hot": 0, "rank_index": 0.00169795, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "2348212570298807", "user_name": "今晚不下班我说的", "company": "", "job_title": "前端菜狗", "avatar_large": "https://sf1-ttcdn-tos.pstatp.com/img/user-avatar/1caa9a36a852901c05a6d000929cbc44~300x300.image", "level": 2, "description": "", "followee_count": 47, "follower_count": 27, "post_article_count": 4, "digg_article_count": 1, "got_digg_count": 74, "got_view_count": 7434, "post_shortmsg_count": 21, "digg_shortmsg_count": 43, "isfollowed": false, "favorable_author": 0, "power": 148, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546498, "tag_id": "6809640369764958215", "tag_name": "Vue.js", "color": "#41B883", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/7b5c3eb591b671749fee.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432234520, "mtime": 1631692660, "id_type": 9, "tag_alias": "", "post_article_count": 31256, "concern_user_count": 313520}], "user_interact": {"id": 6844904195758243848, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "202109151604080102121660771300574F"}, {"article_id": "6844903824956588040", "article_info": {"article_id": "6844903824956588040", "user_id": "4054654613718350", "category_id": "6809637767543259144", "tag_ids": [6809640369764958215], "visible_level": 0, "link_url": "https://juejin.im/post/6844903824956588040", "cover_image": "", "is_gfw": 0, "title": "Vue SSR 踩坑之旅", "brief_content": "本文并不是Vue SSR的入门指南,没有一步步介绍Vue SSR入门,如果你想要Vue SSR入门教程,建议阅读Vue官网的《Vue SSR指南》,那应该是最详细的Vue SSR入门教程了。这篇文章的意义是,主要介绍如何在SSR服务端渲染中使用最受欢迎的vue ui 库elem…", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1555483393", "mtime": "1598500012", "rtime": "1555485877", "draft_id": "6845076255646171143", "view_count": 10915, "collect_count": 327, "digg_count": 153, "comment_count": 15, "hot_index": 713, "is_hot": 0, "rank_index": 0.00169697, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "4054654613718350", "user_name": "我是你的超级英雄", "company": "", "job_title": "一个前端", "avatar_large": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/gold-user-assets/2018/11/20/1672ebc1f06c9ffa~tplv-t2oaga2asx-image.image", "level": 6, "description": "成为前端领域有影响力的人", "followee_count": 29, "follower_count": 8067, "post_article_count": 27, "digg_article_count": 37, "got_digg_count": 13447, "got_view_count": 792172, "post_shortmsg_count": 0, "digg_shortmsg_count": 1, "isfollowed": false, "favorable_author": 1, "power": 21368, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546498, "tag_id": "6809640369764958215", "tag_name": "Vue.js", "color": "#41B883", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/7b5c3eb591b671749fee.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432234520, "mtime": 1631692660, "id_type": 9, "tag_alias": "", "post_article_count": 31256, "concern_user_count": 313520}], "user_interact": {"id": 6844903824956588040, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "202109151604080102121660771300574F"}, {"article_id": "6974577066121363464", "article_info": {"article_id": "6974577066121363464", "user_id": "984814516984919", "category_id": "6809637767543259144", "tag_ids": [6809640369764958215, 6809640407484334093], "visible_level": 0, "link_url": "", "cover_image": "https://p9-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/82accb78537e48f08bd2f9667aed638e~tplv-k3u1fbpfcp-watermark.image", "is_gfw": 0, "title": "vue3中的元编程&代码抽离", "brief_content": "vue3中的元编程&vue3代码抽离 前言 最近两天看了一篇文章, https://mp.weixin.qq.com/s/dHDaOSnSoH6X4gmHo7LrLg, 这篇文章讲的是react中元编", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1623895323", "mtime": "1623999471", "rtime": "1623999471", "draft_id": "6974391763859406884", "view_count": 488, "collect_count": 3, "digg_count": 7, "comment_count": 5, "hot_index": 36, "is_hot": 0, "rank_index": 0.00169684, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "984814516984919", "user_name": "VELOMA", "company": "嗯..", "job_title": "前端开发", "avatar_large": "https://sf6-ttcdn-tos.pstatp.com/img/user-avatar/a0a8fc4faadd828b9f67e5f81ae03fd2~300x300.image", "level": 1, "description": "嗯....喜欢代码", "followee_count": 31, "follower_count": 10, "post_article_count": 7, "digg_article_count": 28, "got_digg_count": 39, "got_view_count": 2149, "post_shortmsg_count": 0, "digg_shortmsg_count": 1, "isfollowed": false, "favorable_author": 0, "power": 60, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546498, "tag_id": "6809640369764958215", "tag_name": "Vue.js", "color": "#41B883", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/7b5c3eb591b671749fee.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432234520, "mtime": 1631692660, "id_type": 9, "tag_alias": "", "post_article_count": 31256, "concern_user_count": 313520}, {"id": 2546526, "tag_id": "6809640407484334093", "tag_name": "前端", "color": "#60ADFF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/bac28828a49181c34110.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 1, "ctime": 1435971546, "mtime": 1631692835, "id_type": 9, "tag_alias": "", "post_article_count": 88828, "concern_user_count": 527704}], "user_interact": {"id": 6974577066121363464, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "202109151604080102121660771300574F"}, {"article_id": "6986898845569384462", "article_info": {"article_id": "6986898845569384462", "user_id": "3096655823910872", "category_id": "6809637767543259144", "tag_ids": [6809640369764958215, 6809640407484334093], "visible_level": 0, "link_url": "", "cover_image": "https://p1-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/517540e3908c45fc8cee576cbe9b59f6~tplv-k3u1fbpfcp-watermark.image", "is_gfw": 0, "title": "Vue2原理、代码分析", "brief_content": "划水不能停,分析vue2 vue源码地址:https://github.com/vuejs/vue 最好我们把这个源码给克隆下来方便我们以后阅读,测试。 源码结构 benchmarks 基准测试,与其", "is_english": 0, "is_original": 1, "user_index": 3.709511291351455, "original_type": 0, "original_author": "", "content": "", "ctime": "1626764226", "mtime": "1626839577", "rtime": "1626839577", "draft_id": "6986897858087616548", "view_count": 184, "collect_count": 1, "digg_count": 6, "comment_count": 1, "hot_index": 16, "is_hot": 0, "rank_index": 0.00169571, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "3096655823910872", "user_name": "泽泽泽啊", "company": "", "job_title": "", "avatar_large": "https://sf3-ttcdn-tos.pstatp.com/img/user-avatar/3900c6f0a1072b285795b07427870bd0~300x300.image", "level": 1, "description": "", "followee_count": 18, "follower_count": 5, "post_article_count": 5, "digg_article_count": 0, "got_digg_count": 39, "got_view_count": 1073, "post_shortmsg_count": 0, "digg_shortmsg_count": 0, "isfollowed": false, "favorable_author": 0, "power": 49, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546498, "tag_id": "6809640369764958215", "tag_name": "Vue.js", "color": "#41B883", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/7b5c3eb591b671749fee.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432234520, "mtime": 1631692660, "id_type": 9, "tag_alias": "", "post_article_count": 31256, "concern_user_count": 313520}, {"id": 2546526, "tag_id": "6809640407484334093", "tag_name": "前端", "color": "#60ADFF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/bac28828a49181c34110.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 1, "ctime": 1435971546, "mtime": 1631692835, "id_type": 9, "tag_alias": "", "post_article_count": 88828, "concern_user_count": 527704}], "user_interact": {"id": 6986898845569384462, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "202109151604080102121660771300574F"}, {"article_id": "6973833760429047845", "article_info": {"article_id": "6973833760429047845", "user_id": "2682464103060541", "category_id": "6809637767543259144", "tag_ids": [6809640369764958215, 6809640407484334093], "visible_level": 0, "link_url": "", "cover_image": "", "is_gfw": 0, "title": "利用Vue自定义指令 - 让你的开发变得更简单", "brief_content": "前段时间在用框架开发H5页面时,碰到框架中的组件内置了一个属性用于适配异形屏,虽然是组件内部实现的,但这个方式让我萌生一个想法:", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1623722250", "mtime": "1626500975", "rtime": "1623831257", "draft_id": "6973472883636961317", "view_count": 460, "collect_count": 6, "digg_count": 14, "comment_count": 0, "hot_index": 37, "is_hot": 0, "rank_index": 0.00169545, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "2682464103060541", "user_name": "茶无味的一天", "company": "ZAKER", "job_title": "代码钢琴师", "avatar_large": "https://sf3-ttcdn-tos.pstatp.com/img/user-avatar/6bb4e5be6357ab9396e67cfbf9eaf136~300x300.image", "level": 2, "description": "代码首先是写给人看的,附带能在机器上运行。", "followee_count": 13, "follower_count": 37, "post_article_count": 31, "digg_article_count": 60, "got_digg_count": 238, "got_view_count": 20168, "post_shortmsg_count": 0, "digg_shortmsg_count": 0, "isfollowed": false, "favorable_author": 0, "power": 439, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546498, "tag_id": "6809640369764958215", "tag_name": "Vue.js", "color": "#41B883", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/7b5c3eb591b671749fee.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432234520, "mtime": 1631692660, "id_type": 9, "tag_alias": "", "post_article_count": 31256, "concern_user_count": 313520}, {"id": 2546526, "tag_id": "6809640407484334093", "tag_name": "前端", "color": "#60ADFF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/bac28828a49181c34110.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 1, "ctime": 1435971546, "mtime": 1631692835, "id_type": 9, "tag_alias": "", "post_article_count": 88828, "concern_user_count": 527704}], "user_interact": {"id": 6973833760429047845, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "202109151604080102121660771300574F"}, {"article_id": "6995814565946589192", "article_info": {"article_id": "6995814565946589192", "user_id": "2233811653625502", "category_id": "6809637767543259144", "tag_ids": [6809640369764958215, 6809640407484334093], "visible_level": 0, "link_url": "", "cover_image": "https://p6-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/b5b03381dad94342bc1b09988d21e61d~tplv-k3u1fbpfcp-watermark.image", "is_gfw": 0, "title": "Vue3-组合式Api和响应式原理解读(三)_响应式数据的原理", "brief_content": "实现响应式的原理 Vue2与Vue3响应式原理对比 Proxy语法以及使用 target 被proxy代理的模板对象 handler 捕捉器(处理器对象) handler.get() 属性读取操作的捕", "is_english": 0, "is_original": 1, "user_index": 3.799204938088558, "original_type": 0, "original_author": "", "content": "", "ctime": "1628840023", "mtime": "1628844931", "rtime": "1628844931", "draft_id": "6995792126542168095", "view_count": 89, "collect_count": 0, "digg_count": 2, "comment_count": 0, "hot_index": 6, "is_hot": 0, "rank_index": 0.00169504, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "2233811653625502", "user_name": "haozzzzzzz", "company": "", "job_title": "前端开发工程师", "avatar_large": "https://sf3-ttcdn-tos.pstatp.com/img/user-avatar/8e701582a429b95e09a3c410491278a9~300x300.image", "level": 1, "description": "吃饭睡觉写代码,coding...", "followee_count": 2, "follower_count": 4, "post_article_count": 4, "digg_article_count": 6, "got_digg_count": 14, "got_view_count": 508, "post_shortmsg_count": 0, "digg_shortmsg_count": 0, "isfollowed": false, "favorable_author": 0, "power": 19, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546498, "tag_id": "6809640369764958215", "tag_name": "Vue.js", "color": "#41B883", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/7b5c3eb591b671749fee.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432234520, "mtime": 1631692660, "id_type": 9, "tag_alias": "", "post_article_count": 31256, "concern_user_count": 313520}, {"id": 2546526, "tag_id": "6809640407484334093", "tag_name": "前端", "color": "#60ADFF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/bac28828a49181c34110.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 1, "ctime": 1435971546, "mtime": 1631692835, "id_type": 9, "tag_alias": "", "post_article_count": 88828, "concern_user_count": 527704}], "user_interact": {"id": 6995814565946589192, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "202109151604080102121660771300574F"}], "cursor": "eyJ2IjoiNzAwNzI1MjQ2NDcyNjQ1ODM5OSIsImkiOjI5NjB9", "count": 11059, "has_more": true}
|
[
"[email protected]"
] | |
ed39fe3c9346697d2fd9e046484b54ce38a345b5
|
09e57dd1374713f06b70d7b37a580130d9bbab0d
|
/data/p4VQE/R1/benchmark/startQiskit142.py
|
bcf544e34e2f6180c27c19e7ab373d5da0db3295
|
[
"BSD-3-Clause"
] |
permissive
|
UCLA-SEAL/QDiff
|
ad53650034897abb5941e74539e3aee8edb600ab
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
refs/heads/main
| 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,447 |
py
|
# qubit number=3
# total number=12
import numpy as np
from qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ
import networkx as nx
from qiskit.visualization import plot_histogram
from typing import *
from pprint import pprint
from math import log2
from collections import Counter
from qiskit.test.mock import FakeVigo, FakeYorktown
kernel = 'circuit/bernstein'
def make_circuit(n:int) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
prog = QuantumCircuit(input_qubit)
prog.h(input_qubit[0]) # number=1
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[0]) # number=9
prog.cz(input_qubit[3],input_qubit[0]) # number=10
prog.h(input_qubit[0]) # number=11
prog.z(input_qubit[3]) # number=7
prog.cx(input_qubit[3],input_qubit[0]) # number=8
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[3]) # number=4
for edge in E:
k = edge[0]
l = edge[1]
prog.cp(-2 * gamma, input_qubit[k-1], input_qubit[l-1])
prog.p(gamma, k)
prog.p(gamma, l)
prog.rx(2 * beta, range(len(V)))
# circuit end
return prog
if __name__ == '__main__':
n = 4
V = np.arange(0, n, 1)
E = [(0, 1, 1.0), (0, 2, 1.0), (1, 2, 1.0), (3, 2, 1.0), (3, 1, 1.0)]
G = nx.Graph()
G.add_nodes_from(V)
G.add_weighted_edges_from(E)
step_size = 0.1
a_gamma = np.arange(0, np.pi, step_size)
a_beta = np.arange(0, np.pi, step_size)
a_gamma, a_beta = np.meshgrid(a_gamma, a_beta)
F1 = 3 - (np.sin(2 * a_beta) ** 2 * np.sin(2 * a_gamma) ** 2 - 0.5 * np.sin(4 * a_beta) * np.sin(4 * a_gamma)) * (
1 + np.cos(4 * a_gamma) ** 2)
result = np.where(F1 == np.amax(F1))
a = list(zip(result[0], result[1]))[0]
gamma = a[0] * step_size
beta = a[1] * step_size
prog = make_circuit(4)
sample_shot =5200
writefile = open("../data/startQiskit142.csv", "w")
# prog.draw('mpl', filename=(kernel + '.png'))
backend = BasicAer.get_backend('qasm_simulator')
circuit1 = transpile(prog, FakeYorktown())
circuit1.measure_all()
prog = circuit1
info = execute(prog,backend=backend, shots=sample_shot).result().get_counts()
print(info, file=writefile)
print("results end", file=writefile)
print(circuit1.depth(), file=writefile)
print(circuit1, file=writefile)
writefile.close()
|
[
"[email protected]"
] | |
d34bbc0300e2610300bf3f8097c18d4608516f1f
|
94bd78e63de94859eb076e52683f73f6ea91eae3
|
/199.py
|
93d67efac4b025be260275ea48441696363cd9de
|
[] |
no_license
|
MadSkittles/leetcode
|
70598c1c861a8ff5d2f7c921a311307d55770acc
|
817bbb73dfe095b9c9358dc459ba6605a2a9a256
|
refs/heads/master
| 2021-11-30T04:56:02.432749 | 2021-11-12T03:28:47 | 2021-11-12T03:28:47 | 123,558,601 | 2 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,026 |
py
|
class Solution:
def rightSideView(self, root):
if not root:
return []
from queue import Queue
q, m = Queue(), {}
q.put((root, 0))
while not q.empty():
node, floor = q.get()
m[floor] = node.val
if node.left:
q.put((node.left, floor + 1))
if node.right:
q.put((node.right, floor + 1))
return [m[i] for i in range(len(m))]
def rightSideView1(self, root):
self.result = []
res = ()
if root:
self.f(root, (root.val,))
for p in self.result:
if len(p) > len(res):
res += p[len(res) - len(p) :]
return res
def f(self, node, path):
if not node.left and not node.right:
self.result.append(path)
return
if node.right:
self.f(node.right, (*path, node.right.val))
if node.left:
self.f(node.left, (*path, node.left.val))
|
[
"[email protected]"
] | |
03005788a36d01937289261a7e288f6883d64b8a
|
bd8400dae9bf43922d043c22999dcfdea08b3797
|
/5 Matplotlib/51.scatter_plot.py
|
7feeea8712ca3fe366dd650f48d5411a11831a7a
|
[] |
no_license
|
srikeshnagoji/Python_Fundamental_DataScience
|
dfc3bd5003a3bc357f8b08f0084cb5b2fc766bda
|
3982cce2b69bed7128aeb7ce8adbd22f71890fcf
|
refs/heads/master
| 2020-07-04T20:12:38.779942 | 2019-08-14T04:36:53 | 2019-08-14T04:36:53 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 334 |
py
|
import matplotlib.pyplot as plt
x = [1, 2, 3, 4, 5, 6, 7, 8, 9]
y = [5, 3, 6, 1, 7, 9, 3, 3, 2]
plt.scatter(x, y, label='Tes', color='k', marker='*', s=200)
# plt.plot(x,y,'*')
# marker = '*' 'x' 'o' baca docs
plt.title('Tes Plotting Data\nby Lintang Wisesa')
plt.xlabel('Nilai x')
plt.ylabel('Nilai y')
# plt.legend()
plt.show()
|
[
"[email protected]"
] | |
b7ef1fb30f414e36f09eb45f57a68beaea974a31
|
18239524612cf572bfeaa3e001a3f5d1b872690c
|
/clients/keto/python/test/test_remove_ory_access_control_policy_role_members.py
|
6b90ab385d91bf24e7c7509df87b9ae9bddd4327
|
[
"Apache-2.0"
] |
permissive
|
simoneromano96/sdk
|
2d7af9425dabc30df830a09b26841fb2e8781bf8
|
a6113d0daefbbb803790297e4b242d4c7cbbcb22
|
refs/heads/master
| 2023-05-09T13:50:45.485951 | 2021-05-28T12:18:27 | 2021-05-28T12:18:27 | 371,689,133 | 0 | 0 |
Apache-2.0
| 2021-05-28T12:11:41 | 2021-05-28T12:11:40 | null |
UTF-8
|
Python
| false | false | 1,208 |
py
|
# coding: utf-8
"""
ORY Keto
A cloud native access control server providing best-practice patterns (RBAC, ABAC, ACL, AWS IAM Policies, Kubernetes Roles, ...) via REST APIs. # noqa: E501
The version of the OpenAPI document: v0.0.0-alpha.1
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import ory_keto_client
from ory_keto_client.models.remove_ory_access_control_policy_role_members import RemoveOryAccessControlPolicyRoleMembers # noqa: E501
from ory_keto_client.rest import ApiException
class TestRemoveOryAccessControlPolicyRoleMembers(unittest.TestCase):
"""RemoveOryAccessControlPolicyRoleMembers unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testRemoveOryAccessControlPolicyRoleMembers(self):
"""Test RemoveOryAccessControlPolicyRoleMembers"""
# FIXME: construct object with mandatory attributes with example values
# model = ory_keto_client.models.remove_ory_access_control_policy_role_members.RemoveOryAccessControlPolicyRoleMembers() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"[email protected]"
] | |
0b17e6a5303ffe9a2c6a6cdd3cb22ae5d6303f11
|
8ffcf5ce3f3861300f5ca6ba355600c1c65a9ede
|
/release/scripts/mgear/shifter_classic_components/chain_FK_spline_02/guide.py
|
ad2ead23257d2a7401ca5a26401e2542bc140b80
|
[
"MIT"
] |
permissive
|
mottosso/mgear4
|
a5db6d712e07fcec607aa877576e7beee6b8b45e
|
e84362aa86e2049cf160dc516e023070e3071e53
|
refs/heads/master
| 2023-05-23T20:47:46.761469 | 2021-06-09T00:13:39 | 2021-06-09T00:13:39 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,973 |
py
|
"""Guide chain FK spline 01 module"""
from functools import partial
from mgear.shifter.component import guide
from mgear.core import pyqt
from mgear.vendor.Qt import QtWidgets, QtCore
from maya.app.general.mayaMixin import MayaQWidgetDockableMixin
from maya.app.general.mayaMixin import MayaQDockWidget
from . import settingsUI as sui
# guide info
AUTHOR = "Miquel Campos"
URL = "www.miquel-campos.com"
EMAIL = ""
VERSION = [1, 0, 0]
TYPE = "chain_FK_spline_02"
NAME = "chain"
DESCRIPTION = "FK chain with a spline driven joints. And Extra IK controls \
for each segment. ADD option for extra Tweak for each joint"
##########################################################
# CLASS
##########################################################
class Guide(guide.ComponentGuide):
"""Component Guide Class"""
compType = TYPE
compName = NAME
description = DESCRIPTION
author = AUTHOR
url = URL
email = EMAIL
version = VERSION
def postInit(self):
"""Initialize the position for the guide"""
self.save_transform = ["root", "#_loc"]
self.save_blade = ["blade"]
self.addMinMax("#_loc", 1, -1)
def addObjects(self):
"""Add the Guide Root, blade and locators"""
self.root = self.addRoot()
self.locs = self.addLocMulti("#_loc", self.root)
self.blade = self.addBlade("blade", self.root, self.locs[0])
centers = [self.root]
centers.extend(self.locs)
self.dispcrv = self.addDispCurve("crv", centers)
self.addDispCurve("crvRef", centers, 3)
def addParameters(self):
"""Add the configurations settings"""
self.pNeutralPose = self.addParam("neutralpose", "bool", False)
self.pOverrideNegate = self.addParam("overrideNegate", "bool", False)
self.pKeepLength = self.addParam("keepLength", "bool", False)
self.pOverrideJointNb = self.addParam("overrideJntNb", "bool", False)
self.pJntNb = self.addParam("jntNb", "long", 3, 1)
self.pExtraTweak = self.addParam("extraTweak", "bool", False)
self.pUseIndex = self.addParam("useIndex", "bool", False)
self.pParentJointIndex = self.addParam(
"parentJointIndex", "long", -1, None, None)
##########################################################
# Setting Page
##########################################################
class settingsTab(QtWidgets.QDialog, sui.Ui_Form):
def __init__(self, parent=None):
super(settingsTab, self).__init__(parent)
self.setupUi(self)
class componentSettings(MayaQWidgetDockableMixin, guide.componentMainSettings):
def __init__(self, parent=None):
self.toolName = TYPE
# Delete old instances of the componet settings window.
pyqt.deleteInstances(self, MayaQDockWidget)
super(self.__class__, self).__init__(parent=parent)
self.settingsTab = settingsTab()
self.setup_componentSettingWindow()
self.create_componentControls()
self.populate_componentControls()
self.create_componentLayout()
self.create_componentConnections()
def setup_componentSettingWindow(self):
self.mayaMainWindow = pyqt.maya_main_window()
self.setObjectName(self.toolName)
self.setWindowFlags(QtCore.Qt.Window)
self.setWindowTitle(TYPE)
self.resize(280, 350)
def create_componentControls(self):
return
def populate_componentControls(self):
"""Populate Controls
Populate the controls values from the custom attributes of the
component.
"""
# populate tab
self.tabs.insertTab(1, self.settingsTab, "Component Settings")
# populate component settings
self.populateCheck(self.settingsTab.neutralPose_checkBox,
"neutralpose")
self.populateCheck(self.settingsTab.overrideNegate_checkBox,
"overrideNegate")
self.populateCheck(self.settingsTab.keepLength_checkBox,
"keepLength")
self.populateCheck(self.settingsTab.overrideJntNb_checkBox,
"overrideJntNb")
self.populateCheck(self.settingsTab.extraTweak_checkBox,
"extraTweak")
self.settingsTab.jntNb_spinBox.setValue(self.root.attr("jntNb").get())
def create_componentLayout(self):
self.settings_layout = QtWidgets.QVBoxLayout()
self.settings_layout.addWidget(self.tabs)
self.settings_layout.addWidget(self.close_button)
self.setLayout(self.settings_layout)
def create_componentConnections(self):
self.settingsTab.neutralPose_checkBox.stateChanged.connect(
partial(self.updateCheck,
self.settingsTab.neutralPose_checkBox,
"neutralpose"))
self.settingsTab.overrideNegate_checkBox.stateChanged.connect(
partial(self.updateCheck,
self.settingsTab.overrideNegate_checkBox,
"overrideNegate"))
self.settingsTab.keepLength_checkBox.stateChanged.connect(
partial(self.updateCheck,
self.settingsTab.keepLength_checkBox,
"keepLength"))
self.settingsTab.overrideJntNb_checkBox.stateChanged.connect(
partial(self.updateCheck,
self.settingsTab.overrideJntNb_checkBox,
"overrideJntNb"))
self.settingsTab.jntNb_spinBox.valueChanged.connect(
partial(self.updateSpinBox,
self.settingsTab.jntNb_spinBox,
"jntNb"))
self.settingsTab.extraTweak_checkBox.stateChanged.connect(
partial(self.updateCheck,
self.settingsTab.extraTweak_checkBox,
"extraTweak"))
def dockCloseEventTriggered(self):
pyqt.deleteInstances(self, MayaQDockWidget)
|
[
"[email protected]"
] | |
f1d583cd5a8a3a870e8664adc5fb87da3b4769f1
|
24cf6992d9c9b8523a0a7d3a7a45e701cd49fce3
|
/djangotest/base/migrations/0017_auto_20200901_1402.py
|
da6b531e64b9f8dfb91ac1b8f9d08a3dd8c75441
|
[] |
no_license
|
yannickkiki/stuffs
|
f4e150a61eb0426791753f5da558dba09940d240
|
d46e1ec56eb4f0f3486e72ffce5c7bba7f2a1796
|
refs/heads/master
| 2023-08-10T20:05:38.255427 | 2021-10-04T05:38:39 | 2021-10-04T05:38:39 | 353,716,213 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,185 |
py
|
# Generated by Django 2.2.11 on 2020-09-01 14:02
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('base', '0016_auto_20200901_1233'),
]
operations = [
migrations.RemoveField(
model_name='foxxdisplay',
name='card',
),
migrations.RemoveField(
model_name='lianacard',
name='metacard_ptr',
),
migrations.AlterField(
model_name='product',
name='metacard',
field=models.OneToOneField(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='product', to='base.MetaCard'),
),
migrations.AlterField(
model_name='trelliscard',
name='metacard',
field=models.OneToOneField(on_delete=django.db.models.deletion.PROTECT, related_name='trellis', to='base.MetaCard'),
),
migrations.DeleteModel(
name='FoxxCard',
),
migrations.DeleteModel(
name='FoxxDisplay',
),
migrations.DeleteModel(
name='LianaCard',
),
]
|
[
"[email protected]"
] | |
5dea6c310618ced92764a30c813fc80187d1ff6d
|
aee144770c8f4ec5987777aebe5b064e558fc474
|
/doc/integrations/pytorch/projects/wizard_of_wikipedia/wizard_transformer_ranker/wizard_dict.py
|
3765563494288bbf158bfd857c651ffd0d48e010
|
[
"CC-BY-SA-3.0",
"Apache-2.0",
"AGPL-3.0-only",
"MIT"
] |
permissive
|
adgang/cortx
|
1d8e6314643baae0e6ee93d4136013840ead9f3b
|
a73e1476833fa3b281124d2cb9231ee0ca89278d
|
refs/heads/main
| 2023-04-22T04:54:43.836690 | 2021-05-11T00:39:34 | 2021-05-11T00:39:34 | 361,394,462 | 1 | 0 |
Apache-2.0
| 2021-04-25T10:12:59 | 2021-04-25T10:12:59 | null |
UTF-8
|
Python
| false | false | 3,299 |
py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from parlai.core.build_data import modelzoo_path
from parlai.core.dict import DictionaryAgent
from collections import defaultdict
import copy
import os
import re
RETOK = re.compile(r'\w+|[^\w\s]|\n', re.UNICODE)
class WizardDictAgent(DictionaryAgent):
def __init__(self, opt, shared=None):
# initialize fields
self.opt = copy.deepcopy(opt)
self.minfreq = opt.get('dict_minfreq', DictionaryAgent.default_minfreq)
self.null_token = '__PAD__'
self.end_token = '__SOC__'
self.unk_token = '__UNK__'
self.start_token = '__SOC__'
self.tokenizer = opt.get('dict_tokenizer', 'whitespace')
self.lower = opt.get('dict_lower', DictionaryAgent.default_lower)
self.maxtokens = opt.get('dict_maxtokens', DictionaryAgent.default_maxtokens)
self.textfields = opt.get(
'dict_textfields', DictionaryAgent.default_textfields
).split(",")
if shared:
self.freq = shared.get('freq', {})
self.tok2ind = shared.get('tok2ind', {})
self.ind2tok = shared.get('ind2tok', {})
else:
self.freq = defaultdict(int)
self.tok2ind = {}
self.ind2tok = {}
if opt.get('dict_file') and os.path.isfile(opt['dict_file']):
# load pre-existing dictionary
self.load(opt['dict_file'])
elif opt.get('dict_initpath'):
# load seed dictionary
opt['dict_initpath'] = modelzoo_path(
opt.get('datapath'), opt['dict_initpath']
)
self.load(opt['dict_initpath'])
self.add_token(self.null_token)
self.add_token(self.start_token)
self.add_token(self.end_token)
self.add_token(self.unk_token)
if not shared:
if opt.get('dict_file'):
self.save_path = opt['dict_file']
# cache unk token for later
self._unk_token_idx = self.tok2ind.get(self.unk_token)
def tokenize(self, text, building=False):
"""
Returns a sequence of tokens from the iterable.
"""
if self.lower:
text = text.lower()
if self.tokenizer == 're':
return self.re_tokenize(text)
elif self.tokenizer == 'whitespace':
return text.split(' ')
word_tokens = (
text.replace('.', ' . ')
.replace('. . .', '...')
.replace(',', ' , ')
.replace(';', ' ; ')
.replace(':', ' : ')
.replace('!', ' ! ')
.replace('?', ' ? ')
.replace(' ', ' ')
.replace(' ', ' ')
.strip()
.split(" ")
)
return word_tokens
def re_tokenize(self, text):
"""
This splits along whitespace and punctuation and keeps the newline as a token in
the returned list.
"""
return RETOK.findall(text)
|
[
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.