blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2ebcefcec2c64fbf9f76368c9e52f2c4f6031297
|
1a80c38ea020a8b18bb2c61b55caff8a38f553b9
|
/SWEA/sol/5356.py
|
ad8db3bcc47a8c35ae33aab7759fffd9c7fb8cff
|
[] |
no_license
|
jiwookseo/problem-solving
|
775a47825dc73f8a29616ef7011e8ee7be346f80
|
eefbefb21608ae0a2b3c75c010ae14995b7fc646
|
refs/heads/master
| 2020-04-19T03:11:02.659816 | 2019-08-14T08:59:06 | 2019-08-14T08:59:06 | 167,926,883 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 275 |
py
|
for tc in range(1, int(input())+1):
s = [input() for _ in range(5)]
l = [len(i) for i in s]
ml = max(l)
temp = ""
for c in range(ml):
for r in range(5):
if l[r] > c:
temp += s[r][c]
print("#{} {}".format(tc, temp))
|
[
"[email protected]"
] | |
952f0ccca47807b4540c47e2a8a72c32c763961a
|
a8a5772674e62beaa4f5b1f115d280103fd03749
|
/persistence.py
|
00a776c4f7564a2e7aae6994a1f3c1b497b94024
|
[] |
no_license
|
tahentx/pv_workbook
|
c6fb3309d9acde5302dd3ea06a34ad2aee0de4b7
|
08912b0ef36a5226d23fa0430216a3f277aca33b
|
refs/heads/master
| 2022-12-12T20:39:35.688510 | 2021-03-30T03:20:54 | 2021-03-30T03:20:54 | 172,827,250 | 0 | 1 | null | 2022-12-08T16:47:39 | 2019-02-27T02:25:24 |
Python
|
UTF-8
|
Python
| false | false | 188 |
py
|
n = 38941
value = [int(x) for x in str(n)]
persist = value[0] * value[1]
next_value = [int(x) for x in str(persist)]
persist_again = next_value[0] * next_value[1]
print(str(persist_again)
|
[
"[email protected]"
] | |
05f6da044977d12f49574500ccb24d84c43ab32d
|
88ae8695987ada722184307301e221e1ba3cc2fa
|
/third_party/webdriver/pylib/selenium/webdriver/common/desired_capabilities.py
|
0f97e7273aeda07105d9a8c34258dad8554e9e60
|
[
"BSD-3-Clause",
"Apache-2.0",
"LGPL-2.0-or-later",
"MIT",
"GPL-1.0-or-later"
] |
permissive
|
iridium-browser/iridium-browser
|
71d9c5ff76e014e6900b825f67389ab0ccd01329
|
5ee297f53dc7f8e70183031cff62f37b0f19d25f
|
refs/heads/master
| 2023-08-03T16:44:16.844552 | 2023-07-20T15:17:00 | 2023-07-23T16:09:30 | 220,016,632 | 341 | 40 |
BSD-3-Clause
| 2021-08-13T13:54:45 | 2019-11-06T14:32:31 | null |
UTF-8
|
Python
| false | false | 2,994 |
py
|
# Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
The Desired Capabilities implementation.
"""
class DesiredCapabilities(object):
"""
Set of default supported desired capabilities.
Use this as a starting point for creating a desired capabilities object for
requesting remote webdrivers for connecting to selenium server or selenium grid.
Usage Example::
from selenium import webdriver
selenium_grid_url = "http://198.0.0.1:4444/wd/hub"
# Create a desired capabilities object as a starting point.
capabilities = DesiredCapabilities.FIREFOX.copy()
capabilities['platform'] = "WINDOWS"
capabilities['version'] = "10"
# Instantiate an instance of Remote WebDriver with the desired capabilities.
driver = webdriver.Remote(desired_capabilities=capabilities,
command_executor=selenium_grid_url)
Note: Always use '.copy()' on the DesiredCapabilities object to avoid the side
effects of altering the Global class instance.
"""
FIREFOX = {
"browserName": "firefox",
"acceptInsecureCerts": True,
"moz:debuggerAddress": True,
}
INTERNETEXPLORER = {
"browserName": "internet explorer",
"platformName": "windows",
}
EDGE = {
"browserName": "MicrosoftEdge",
}
CHROME = {
"browserName": "chrome",
}
OPERA = {
"browserName": "opera",
}
SAFARI = {
"browserName": "safari",
"platformName": "mac",
}
HTMLUNIT = {
"browserName": "htmlunit",
"version": "",
"platform": "ANY",
}
HTMLUNITWITHJS = {
"browserName": "htmlunit",
"version": "firefox",
"platform": "ANY",
"javascriptEnabled": True,
}
IPHONE = {
"browserName": "iPhone",
"version": "",
"platform": "mac",
}
IPAD = {
"browserName": "iPad",
"version": "",
"platform": "mac",
}
WEBKITGTK = {
"browserName": "MiniBrowser",
"version": "",
"platform": "ANY",
}
WPEWEBKIT = {
"browserName": "MiniBrowser",
"version": "",
"platform": "ANY",
}
|
[
"[email protected]"
] | |
7d4aaa5e2ea4a2279deba143f873f693f7394bc4
|
c5148bc364dac753c0872bd5676027a30b260486
|
/biosteam/_facility.py
|
be6ea781f47ebbbb09b743864d6630a54816bf2b
|
[
"MIT",
"NCSA",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
Ecoent/biosteam
|
86f47c713a2cae5d6261b6c2c7734ccf7a90fb4e
|
f1371386d089df3aa8ce041175f210c0318c1fe0
|
refs/heads/master
| 2021-02-24T14:10:23.158984 | 2020-03-05T03:43:17 | 2020-03-05T03:43:17 | 245,433,768 | 1 | 0 |
NOASSERTION
| 2020-03-06T13:59:27 | 2020-03-06T13:59:26 | null |
UTF-8
|
Python
| false | false | 270 |
py
|
# -*- coding: utf-8 -*-
"""
Created on Sat Jul 13 02:24:35 2019
@author: yoelr
"""
from ._unit import Unit
__all__ = ('Facility',)
class Facility(Unit, isabstract=True, new_graphics=False):
@property
def system(self):
return self._system
|
[
"[email protected]"
] | |
ef4ace6d77e93557af3874532ced9981d737fdd6
|
a5a4cee972e487512275c34f308251e6cc38c2fa
|
/examples/Ni__eam__born_exp_fs__postprocessing/CCA_param_clusters_in_qoi_space/configuration/configure_qoi_pca_transform.py
|
7c3b9ff7b710945bd9e4fc499cce2f5621fb418b
|
[
"MIT"
] |
permissive
|
eragasa/pypospack
|
4f54983b33dcd2dce5b602bc243ea8ef22fee86b
|
21cdecaf3b05c87acc532d992be2c04d85bfbc22
|
refs/heads/master
| 2021-06-16T09:24:11.633693 | 2019-12-06T16:54:02 | 2019-12-06T16:54:02 | 99,282,824 | 4 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,110 |
py
|
from collections import OrderedDict
from pypospack.pyposmat.data.pipeline import PyposmatPipeline
pipeline_configuration = OrderedDict()
# define first segment (normalization)
pipeline_configuration[0] = OrderedDict() # int keys indicate step number
pipeline_configuration[0]['segment_type'] = 'preprocess'
pipeline_configuration[0]['function_calls'] = OrderedDict()
pipeline_configuration[0]['function_calls'][0]= OrderedDict() # int keys allow multiple calls to same function
pipeline_configuration[0]['function_calls'][0]['function'] = 'normalize_standard_scaler'
pipeline_configuration[0]['function_calls'][0]['args'] = OrderedDict()
pipeline_configuration[0]['function_calls'][0]['args']['cols'] = ['qoi']
pipeline_configuration[0]['function_calls'][0]['args']['clusters'] = None
pipeline_configuration[0]['function_calls'][0]['args']['kwargs'] = OrderedDict()
pipeline_configuration[0]['function_calls'][0]['args']['kwargs']['standard_scaler'] = OrderedDict()
pipeline_configuration[0]['function_calls'][0]['args']['kwargs']['standard_scaler']['with_mean'] = True
pipeline_configuration[0]['function_calls'][0]['args']['kwargs']['standard_scaler']['with_std'] = True
# define second segment (CCA transformation)
pipeline_configuration[1] = OrderedDict()
pipeline_configuration[1]['segment_type'] = 'pca'
pipeline_configuration[1]['function_calls'] = OrderedDict()
pipeline_configuration[1]['function_calls'][0]= OrderedDict()
pipeline_configuration[1]['function_calls'][0]['function'] = 'transform_cca'
pipeline_configuration[1]['function_calls'][0]['args'] = OrderedDict()
pipeline_configuration[1]['function_calls'][0]['args']['cols'] = ['n_qoi']
pipeline_configuration[1]['function_calls'][0]['args']['clusters'] = None
pipeline_configuration[1]['function_calls'][0]['args']['kwargs'] = OrderedDict()
pipeline_configuration[1]['function_calls'][0]['args']['kwargs']['cca'] = OrderedDict()
if __name__ == "__main__":
pipeline = PyposmatPipeline()
fn = __file__.replace('.py', '.in')
pipeline.write_configuration(filename=fn,
d=pipeline_configuration)
|
[
"[email protected]"
] | |
777b2f147135a023870de3cce3193786a5c9b525
|
55f60b7ec448eb48b75118b01b3878c8345242bb
|
/tests/scripts/waf-tools/f_guidelines/__init__.py
|
de2b6d8f5507598e6072beabb216d9c336060fc1
|
[
"BSD-3-Clause",
"CC-BY-4.0"
] |
permissive
|
dd-rrc-ps/foxbms-2
|
cd8d272afa24187c85c6fa747226bebed4cefc5e
|
555704a9c4af3dd1c2213e6f0be9860f34e2b1b3
|
refs/heads/master
| 2023-08-18T13:49:42.503755 | 2021-10-21T12:15:09 | 2021-10-21T12:15:09 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,136 |
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (c) 2010 - 2021, Fraunhofer-Gesellschaft zur Foerderung der angewandten Forschung e.V.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# We kindly request you to use one or more of the following phrases to refer to
# foxBMS in your hardware, software, documentation or advertising materials:
#
# - "This product uses parts of foxBMS®"
# - "This product includes parts of foxBMS®"
# - "This product is derived from foxBMS®"
# f_guidelines is not a proper python module name, but this is OK since we need
# it just for the unit test discovery
# pylint: disable-all
|
[
"[email protected]"
] | |
fe6aaab1a8339dd6dc8d16d83021eb02079bdd3c
|
cc352b04dc8eb5033399a8925274f23be51ae3bf
|
/leonardo/__init__.py
|
778f36dfa838719fd5e13576cde4c652cb4a8cd6
|
[
"BSD-2-Clause"
] |
permissive
|
lukaszle/django-leonardo
|
1dcb16f0155495d4ef0e52f667450ee53f2b58be
|
a54dd0822c3d8fbf4a52547d0ad3ae17c04b88b7
|
refs/heads/master
| 2021-01-18T09:36:08.203184 | 2016-02-01T20:25:37 | 2016-02-01T20:25:37 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 520 |
py
|
default_app_config = 'leonardo.apps.LeonardoConfig'
__import__('pkg_resources').declare_namespace(__name__)
try:
from leonardo.base import leonardo # noqa
except ImportError:
import warnings
def simple_warn(message, category, filename, lineno, file=None, line=None):
return '%s: %s' % (category.__name__, message)
msg = ("Could not import Leonardo dependencies. "
"This is normal during installation.\n")
warnings.formatwarning = simple_warn
warnings.warn(msg, Warning)
|
[
"[email protected]"
] | |
c86b19c4c30e2fabbe0d81972a65af9e5be88efe
|
de6dc75873bd8615d22dd25c51f2fe3bc82cd7f8
|
/069.猜数字游戏.py
|
07de0ac470912f9fd6bb2e924865ff59a1419c0a
|
[] |
no_license
|
cuimin07/LeetCode-test
|
b9e87b4e353b09dfa84f62c24c2950d57656fff2
|
8f02b78dcbdefa154bb52c14a271998361e92a86
|
refs/heads/master
| 2020-08-14T13:37:27.799071 | 2020-01-13T03:11:40 | 2020-01-13T03:11:40 | 215,178,295 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,670 |
py
|
'''
你正在和你的朋友玩 猜数字(Bulls and Cows)游戏:你写下一个数字让你的朋友猜。
每次他猜测后,你给他一个提示,告诉他有多少位数字和确切位置都猜对了(称为“Bulls”, 公牛),
有多少位数字猜对了但是位置不对(称为“Cows”, 奶牛)。你的朋友将会根据提示继续猜,直到猜出秘密数字。
请写出一个根据秘密数字和朋友的猜测数返回提示的函数,用 A 表示公牛,用 B 表示奶牛。
请注意秘密数字和朋友的猜测数都可能含有重复数字。
示例 1:
输入: secret = "1807", guess = "7810"
输出: "1A3B"
解释: 1 公牛和 3 奶牛。公牛是 8,奶牛是 0, 1 和 7。
示例 2:
输入: secret = "1123", guess = "0111"
输出: "1A1B"
解释: 朋友猜测数中的第一个 1 是公牛,第二个或第三个 1 可被视为奶牛。
说明: 你可以假设秘密数字和朋友的猜测数都只包含数字,并且它们的长度永远相等。
'''
#答:
class Solution:
def getHint(self, secret: str, guess: str) -> str:
A,B=0,0
dic1,dic2={},{}
siz=len(secret)
for i in range(siz):
if secret[i]==guess[i]:
A+=1
else:
if secret[i] not in dic1:
dic1[secret[i]]=1
else:
dic1[secret[i]]+=1
if guess[i] not in dic2:
dic2[guess[i]]=1
else:
dic2[guess[i]]+=1
for x in dic1:
if x in dic2:
B+=min(dic1[x],dic2[x])
return str(A)+'A'+str(B)+'B'
|
[
"[email protected]"
] | |
e686b01403ab17049ad212cde428b766ca9b55f6
|
973b40c806bfcfdfbe4258b3decd9e52f8d4b574
|
/vmware_exporter/helpers.py
|
e6df9262e3710f638050d836a6405a66c56421ae
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
gitter-badger/vmware_exporter
|
467507e83551134f2e89b7fb3125bccb949113d4
|
d805dde7ff768d55e96719fcd727a6f4b5e81dc7
|
refs/heads/master
| 2020-04-13T17:06:59.370635 | 2018-12-24T05:19:48 | 2018-12-24T05:19:48 | 163,339,090 | 0 | 0 | null | 2018-12-27T21:53:31 | 2018-12-27T21:53:31 | null |
UTF-8
|
Python
| false | false | 1,375 |
py
|
from pyVmomi import vmodl
def batch_fetch_properties(content, obj_type, properties):
view_ref = content.viewManager.CreateContainerView(
container=content.rootFolder,
type=[obj_type],
recursive=True
)
PropertyCollector = vmodl.query.PropertyCollector
# Describe the list of properties we want to fetch for obj_type
property_spec = PropertyCollector.PropertySpec()
property_spec.type = obj_type
property_spec.pathSet = properties
# Describe where we want to look for obj_type
traversal_spec = PropertyCollector.TraversalSpec()
traversal_spec.name = 'traverseEntities'
traversal_spec.path = 'view'
traversal_spec.skip = False
traversal_spec.type = view_ref.__class__
obj_spec = PropertyCollector.ObjectSpec()
obj_spec.obj = view_ref
obj_spec.skip = True
obj_spec.selectSet = [traversal_spec]
filter_spec = PropertyCollector.FilterSpec()
filter_spec.objectSet = [obj_spec]
filter_spec.propSet = [property_spec]
props = content.propertyCollector.RetrieveContents([filter_spec])
results = {}
for obj in props:
properties = {}
properties['obj'] = obj.obj
properties['id'] = obj.obj._moId
for prop in obj.propSet:
properties[prop.name] = prop.val
results[obj.obj._moId] = properties
return results
|
[
"[email protected]"
] | |
7fe4ba0f5ad62a80601a216373746ad51ac9e09f
|
2e00398c4b77ab6e1996dbbefa167e13a8ad40a9
|
/products/urls.py
|
fab0250699fb90757ba44b5592f3d12ac5e94b7e
|
[] |
no_license
|
cleliofavoccia/PurBeurre
|
d754b83ed28b1240447243f149080058a60ccdfb
|
e2b5a51fbd91412e68ddb1c3c785713c7988cc41
|
refs/heads/main
| 2023-03-20T11:06:32.466520 | 2021-03-12T16:02:22 | 2021-03-12T16:02:22 | 331,650,830 | 0 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 266 |
py
|
"""URLS of products app"""
from django.urls import path
from . import views
app_name = 'products'
urlpatterns = [
path('<int:pk>/', views.ProductDetailView.as_view(), name='product'),
path('results/', views.ResultsListView.as_view(), name='results')
]
|
[
"[email protected]"
] | |
d7347ded155d726c1280eaa8b4a1b75779976483
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_169/ch167_2020_06_19_15_17_32_858494.py
|
a7645291dc1cbd46804f0758f0845bbdbddb6e65
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 436 |
py
|
def bairro_mais_custoso(dicionario):
dicionario2={}
lista=[]
dicionario3={}
for i in dicionario:
dicionario2[i]=0
for e in dicionario[i][6:]:
dicionario2[i]+=e
for k in dicionario2:
lista.append(dicionario2[k])
dicionario3[dicionario2[k]]=k
for e in lista:
if e == dicionario3[dicionario2[k]]:
return k
return k
|
[
"[email protected]"
] | |
5b04d15420f559f7c75a1cf772a31cb8aa898403
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2566/60627/239561.py
|
e69aa50e41e94e98b56e97cd11efa5bf495bf257
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 441 |
py
|
# 20
n = int(input())
s = '['
for i in range(n):
inp = input()
s += ('[' + inp + '],')
s = s[:-1] + ']'
from ast import literal_eval
num = literal_eval(s)
l = []
def f(num,i,j,t):
lis = range(len(num))
global l
t += num[i][j]
if i==len(num) - 1 and j==len(num) - 1:
l.append(t)
return
if i+1 in lis:
f(num,i+1,j,t)
if j+1 in lis:
f(num,i,j+1,t)
f(num,0,0,0)
print(min(l))
|
[
"[email protected]"
] | |
c1da5d4f2e2d43b82c977f498ea155098ae2e99d
|
c77a40408bc40dc88c466c99ab0f3522e6897b6a
|
/Programming_basics/Exercise_1/VacationBooksList.py
|
3ce9b5265267af70f22eb065be20cff43206264f
|
[] |
no_license
|
vbukovska/SoftUni
|
3fe566d8e9959d390a61a4845381831929f7d6a3
|
9efd0101ae496290313a7d3b9773fd5111c5c9df
|
refs/heads/main
| 2023-03-09T17:47:20.642393 | 2020-12-12T22:14:27 | 2021-02-16T22:14:37 | 328,805,705 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 171 |
py
|
book_pages = int(input())
reading_speed = int(input())
time_limit = int(input())
tot_hours = book_pages / reading_speed
per_day = tot_hours / time_limit
print(per_day)
|
[
"[email protected]"
] | |
1189ee43148ae71e4e63174d6f48d775698a66d8
|
235c4b3aa630737b379050a420923efadd432da8
|
/1stRound/Easy/599 Minimum Index Sum of Two Lists/Heap.py
|
509359ed759a98a80c7b55d98f9e9ee6e90ae456
|
[
"MIT"
] |
permissive
|
ericchen12377/Leetcode-Algorithm-Python
|
4e5dc20062280ef46194da5480600b2459fd89f8
|
eb58cd4f01d9b8006b7d1a725fc48910aad7f192
|
refs/heads/master
| 2023-02-22T22:43:55.612650 | 2021-01-28T04:00:20 | 2021-01-28T04:00:20 | 258,058,468 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 971 |
py
|
import heapq
class Solution:
def findRestaurant(self, list1, list2):
"""
:type list1: List[str]
:type list2: List[str]
:rtype: List[str]
"""
interest = dict()
for i, l in enumerate(list1):
interest[l] = [i, 100000]
for j, l in enumerate(list2):
if l in interest:
interest[l][1] = j
heap = [(sum(v), l) for l, v in interest.items()]
heapq.heapify(heap)
res = []
smallest = -1
while heap:
cursum, curl = heapq.heappop(heap)
if smallest == -1:
smallest = cursum
if smallest == cursum:
res.append(curl)
else:
break
return res
list1 = ["Shogun", "Tapioca Express", "Burger King", "KFC"]
list2 = ["Piatti", "The Grill at Torrey Pines", "Hungry Hunter Steakhouse", "Shogun"]
p = Solution()
print(p.findRestaurant(list1,list2))
|
[
"[email protected]"
] | |
6d36d7e25b88308e58d0b8062d820079f9529fc8
|
8f8ac99fd3ed9ceb36778b404f6fdd0b6899d3f4
|
/pyobjc-framework-Metal/PyObjCTest/test_mtlaccelerationstructuretypes.py
|
c3bd2771327be119bf00faa1fd5e34797066345f
|
[
"MIT"
] |
permissive
|
strogo/pyobjc
|
ac4201c7742eb75348328eeecb7eedf4e3458de3
|
2579c5eaf44b0c5af77ee195c417d2c65e72dfda
|
refs/heads/master
| 2023-07-13T00:41:56.448005 | 2021-08-24T06:42:53 | 2021-08-24T06:42:53 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,162 |
py
|
import Metal
from PyObjCTools.TestSupport import TestCase
class TestMTLAccelerationStructureTypes(TestCase):
def test_structs(self):
self.assertNotHasAttr(Metal, "MTLPackedFloat3")
self.assertNotHasAttr(Metal, "MTLPackedFloat4x3")
self.assertNotHasAttr(Metal, "MTLAccelerationStructureInstanceDescriptor")
# v = Metal.MTLPackedFloat3()
# self.assertIsInstance(v.x, float)
# self.assertIsInstance(v.y, float)
# self.assertIsInstance(v.z, float)
# self.asssertNotHasattr(v, "elements")
# v = Metal.MTLPackedFloat4x3()
# self.assertHasattr(v, "columns")
# v = Metal.MTLAccelerationStructureInstanceDescriptor()
# self.assertIsInstance(v.transformationMatrix, Metal.MTLPackedFloat4x3)
# self.assertIsInstance(v.flags, int)
# self.assertIsInstance(v.mask, int)
# self.assertIsInstance(v.intersectionFunctionTableOffset, int)
# self.assertIsInstance(v.accelerationStructureIndex, int)
def test_functions(self):
# MTLPackedFloat3 is not available (See above)
self.assertNotHasAttr(Metal, "MTLPackedFloat3Make")
|
[
"[email protected]"
] | |
04e5c29fd3536e5ffc4f03ada2434ad4101b1362
|
e9d7689655887232b652ef369c7eaf3a1ef06955
|
/old/ePhy/in vivo multi/convert v3.py
|
30b7047a231015b6843c8b5de6d3593611be8041
|
[] |
no_license
|
Gilles-D/main
|
81ac13cdb1614eb0c82afb3d0e847a30b78cad30
|
f3714d2cbe4aae22ab36f4f94c94067159270820
|
refs/heads/master
| 2023-08-31T06:20:48.554237 | 2023-08-30T20:33:27 | 2023-08-30T20:33:27 | 222,518,250 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,954 |
py
|
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 28 11:57:30 2019
@author: lspaeth (modified by flareno)
Created on Mon Nov 12 14:14:18 2018
This class loads HdF5 recordings from MCS acquisition system as matrices of shape ((channel,data))
Allows to load Raw signals
+ associated time vectors
+ associated sampling rates
All in Volts and Seconds
Hope it will work
Then all you have to do is to load HdF5IO from eletroPy package; init class with smthg = HdF5IO(filepath)
After that u can load every instance with associated function, they are all described bellow.
"""
import matplotlib.pyplot as plt
import numpy as np
class HdF5IO:
def __init__(self,filepath):
import h5py as h5
file_ = h5.File(filepath,'r')
self.file = file_['Data'] #Loads first node
#----------RAW RECORDINGS---------------------------------------------------------------------------------------------
def raw_record(self): #Gets Raw Records as matrix ((channel,data))
raw = self.file['Recording_0']['AnalogStream']['Stream_0']['ChannelData']
import numpy as np
raw_record = np.zeros((raw.shape[0],raw.shape[1]))
raw_conv = float(self.file['Recording_0']['AnalogStream']['Stream_0']['InfoChannel'][0][10]) #Scaling Factor
for i in range(raw.shape[0]): #Stores data in new matrix
raw_record[i,:] = raw[i,:]/raw_conv #From pV to V
return raw_record
def raw_time(self): #Gets time vector for raw records
import numpy as np
raw_tick = int(self.file['Recording_0']['AnalogStream']['Stream_0']['InfoChannel'][0][9])/1000000.0 #exp6 to pass from us to s
raw_length = len(self.file['Recording_0']['AnalogStream']['Stream_0']['ChannelData'][0])
raw_time = np.arange(0,raw_length*raw_tick,raw_tick)
return raw_time
def raw_sampling_rate(self): #Gets sampling rate
raw_tick = float(self.file['Recording_0']['AnalogStream']['Stream_0']['InfoChannel'][0][9])/1000000.0
return 1.0/raw_tick #In Hz
#---------CONVERT H5 to RAW BINARY-----------------------------------------------------------------------------------
def convert_folder(folderpath, newpath, data_type='raw'):
import os, re
import numpy as np
list_dir = os.listdir(folderpath)
# folderpath = folderpath
# newpath = newpath
concatenated_file=[]
for file in list_dir:
if file.endswith('.h5'):
print ('Converting ' + file + '...')
new_path = '%s/%s'%(folderpath,file)
data = HdF5IO(new_path)
traces = data.raw_record()
concatenated_file.append(traces)
print ('Conversion DONE')
else:
print (file + ' is not an h5 file, will not be converted')
return concatenated_file
# new_path = '%s/'%(folderpath)
data = HdF5IO(new_path)
traces = data.raw_record()
# sampling_rate = int(data.raw_sampling_rate())
# name = re.sub('\.h5$', '', "concatenated")
# file_save = '%s/%s_%sHz.rbf'%(newpath,name,sampling_rate)
# with open(file_save, mode='wb') as file :
# traces.tofile(file,sep='')
# print ('Whole directory has been converted successfully')
if __name__ == '__main__':
folderpath = r'C:/Users/Gilles.DELBECQ/Desktop/In vivo Février 2022/H5/15-02'
newpath = r'C:\Users\Gilles.DELBECQ\Desktop\In vivo Février 2022\RBF/15-02'
a = convert_folder(folderpath, newpath)
array_final = np.array([])
array_final = np.concatenate(a,axis=0)
file_save = 'C:/Users/Gilles.DELBECQ/Desktop/In vivo Février 2022/H5/15-02/concatenated.rbf'
with open(file_save, mode='wb') as file :
array_final.tofile(file,sep='')
|
[
"[email protected]"
] | |
2c8834ff912fd0b52c11a67b58347b14e20a59c2
|
18310e7bb4e7c46d7d3fd51046a5bd92ca5f9c48
|
/gaping/parameters.py
|
6509d009f0bbb6100dfbd4420f7302283a6bba73
|
[] |
no_license
|
shawwn/gaping
|
c91b6b4b2e2ef2ab6b868403f02e0e237b7b2761
|
41d477c79814b37f8a09715433c0c489a56c92d2
|
refs/heads/master
| 2023-03-15T05:42:37.086420 | 2021-03-16T21:21:01 | 2021-03-16T21:21:01 | 323,994,300 | 11 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 163 |
py
|
import gin
import gin.tf.external_configurables
import tensorflow as tf
from .util import EasyDict
@gin.configurable
def options(**kws):
return EasyDict(kws)
|
[
"[email protected]"
] | |
48c663aa2a5710c161b3eb746a960ff8252ec051
|
709b1549033c9a547c67ee507fdc10b7e5d234ad
|
/test/test_worker_pools_api.py
|
a9689158dab784197bf9245cf0d64ca7dd1eb230
|
[
"Apache-2.0"
] |
permissive
|
cvent/octopus-deploy-api-client
|
d622417286b348c0be29678a86005a809c77c005
|
0e03e842e1beb29b132776aee077df570b88366a
|
refs/heads/master
| 2020-12-05T14:17:46.229979 | 2020-01-07T05:06:58 | 2020-01-07T05:06:58 | 232,135,963 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,971 |
py
|
# coding: utf-8
"""
Octopus Server API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 2019.6.7+Branch.tags-2019.6.7.Sha.aa18dc6809953218c66f57eff7d26481d9b23d6a
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import octopus_deploy_swagger_client
from octopus_deploy_client.worker_pools_api import WorkerPoolsApi # noqa: E501
from octopus_deploy_swagger_client.rest import ApiException
class TestWorkerPoolsApi(unittest.TestCase):
"""WorkerPoolsApi unit test stubs"""
def setUp(self):
self.api = octopus_deploy_client.worker_pools_api.WorkerPoolsApi() # noqa: E501
def tearDown(self):
pass
def test_create_response_descriptor_worker_pools_worker_pool_worker_pool_resource(self):
"""Test case for create_response_descriptor_worker_pools_worker_pool_worker_pool_resource
Create a WorkerPoolResource # noqa: E501
"""
pass
def test_create_response_descriptor_worker_pools_worker_pool_worker_pool_resource_spaces(self):
"""Test case for create_response_descriptor_worker_pools_worker_pool_worker_pool_resource_spaces
Create a WorkerPoolResource # noqa: E501
"""
pass
def test_custom_action_response_descriptor_octopus_server_web_api_actions_sort_worker_pools_responder(self):
"""Test case for custom_action_response_descriptor_octopus_server_web_api_actions_sort_worker_pools_responder
"""
pass
def test_custom_action_response_descriptor_octopus_server_web_api_actions_sort_worker_pools_responder_spaces(self):
"""Test case for custom_action_response_descriptor_octopus_server_web_api_actions_sort_worker_pools_responder_spaces
"""
pass
def test_custom_query_response_descriptor_octopus_server_web_api_actions_infrastructure_summary_worker_pools_summary_responder(self):
"""Test case for custom_query_response_descriptor_octopus_server_web_api_actions_infrastructure_summary_worker_pools_summary_responder
"""
pass
def test_custom_query_response_descriptor_octopus_server_web_api_actions_infrastructure_summary_worker_pools_summary_responder_spaces(self):
"""Test case for custom_query_response_descriptor_octopus_server_web_api_actions_infrastructure_summary_worker_pools_summary_responder_spaces
"""
pass
def test_custom_query_response_descriptor_octopus_server_web_api_actions_worker_pools_workers_responder(self):
"""Test case for custom_query_response_descriptor_octopus_server_web_api_actions_worker_pools_workers_responder
"""
pass
def test_custom_query_response_descriptor_octopus_server_web_api_actions_worker_pools_workers_responder_spaces(self):
"""Test case for custom_query_response_descriptor_octopus_server_web_api_actions_worker_pools_workers_responder_spaces
"""
pass
def test_delete_on_background_response_descriptor_worker_pools_worker_pool_worker_pool_resource(self):
"""Test case for delete_on_background_response_descriptor_worker_pools_worker_pool_worker_pool_resource
Delete a WorkerPoolResource by ID # noqa: E501
"""
pass
def test_delete_on_background_response_descriptor_worker_pools_worker_pool_worker_pool_resource_spaces(self):
"""Test case for delete_on_background_response_descriptor_worker_pools_worker_pool_worker_pool_resource_spaces
Delete a WorkerPoolResource by ID # noqa: E501
"""
pass
def test_index_response_descriptor_worker_pools_worker_pool_worker_pool_resource(self):
"""Test case for index_response_descriptor_worker_pools_worker_pool_worker_pool_resource
Get a list of WorkerPoolResources # noqa: E501
"""
pass
def test_index_response_descriptor_worker_pools_worker_pool_worker_pool_resource_spaces(self):
"""Test case for index_response_descriptor_worker_pools_worker_pool_worker_pool_resource_spaces
Get a list of WorkerPoolResources # noqa: E501
"""
pass
def test_list_all_response_descriptor_worker_pools_worker_pool_worker_pool_resource(self):
"""Test case for list_all_response_descriptor_worker_pools_worker_pool_worker_pool_resource
Get a list of WorkerPoolResources # noqa: E501
"""
pass
def test_list_all_response_descriptor_worker_pools_worker_pool_worker_pool_resource_spaces(self):
"""Test case for list_all_response_descriptor_worker_pools_worker_pool_worker_pool_resource_spaces
Get a list of WorkerPoolResources # noqa: E501
"""
pass
def test_load_response_descriptor_worker_pools_worker_pool_worker_pool_resource(self):
"""Test case for load_response_descriptor_worker_pools_worker_pool_worker_pool_resource
Get a WorkerPoolResource by ID # noqa: E501
"""
pass
def test_load_response_descriptor_worker_pools_worker_pool_worker_pool_resource_spaces(self):
"""Test case for load_response_descriptor_worker_pools_worker_pool_worker_pool_resource_spaces
Get a WorkerPoolResource by ID # noqa: E501
"""
pass
def test_modify_response_descriptor_worker_pools_worker_pool_worker_pool_resource(self):
"""Test case for modify_response_descriptor_worker_pools_worker_pool_worker_pool_resource
Modify a WorkerPoolResource by ID # noqa: E501
"""
pass
def test_modify_response_descriptor_worker_pools_worker_pool_worker_pool_resource_spaces(self):
"""Test case for modify_response_descriptor_worker_pools_worker_pool_worker_pool_resource_spaces
Modify a WorkerPoolResource by ID # noqa: E501
"""
pass
if __name__ == '__main__':
unittest.main()
|
[
"[email protected]"
] | |
cfb080d14c05e5ba70f3611fba5c7802c11373c9
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02882/s256412363.py
|
88b565ca18c9c84c582fb7237d25bd5927bd6b85
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 166 |
py
|
from math import atan,pi
a,b,x=map(int,input().split())
if b-x/a**2 <= x/a**2:print(atan((b-x/a**2)/(a/2))*(180/pi))
else:
y = x/a*2/b
print(atan(b/y)*(180/pi))
|
[
"[email protected]"
] | |
14de914eafa10449b77e6e446ba593c4617271a1
|
12d007b50d20030c4a0d8ecceaeb532b3de4f966
|
/setup.py
|
1ccbc3c4e37c98f182e6b3eedb9ea81800bdaf3a
|
[
"MIT"
] |
permissive
|
Tygs/ayo
|
8be03cf1854d122b763272ba256e3fa87135e776
|
27b2225770581e19f3abdb8db0721776f0cfb195
|
refs/heads/master
| 2021-11-08T02:09:37.979755 | 2021-11-01T10:44:35 | 2021-11-01T10:44:35 | 136,607,852 | 32 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 203 |
py
|
"""
Install: python setup.py install
Dev mode: python setup.py develop
Test: pip install pytest && pytest tests
All the config is in setup.cfg
"""
import setuptools
setuptools.setup()
|
[
"[email protected]"
] | |
a9b33f6c6c2f40ad46017f0a75775c17579f1e0a
|
0b98732dcd3dd94a97555a8f3e8dd3524bb8ec86
|
/configs/ms_rcnn/ms_rcnn_x101_64x4d_fpn_1x.py
|
a6b720332cf33263295dcfeeae0d85b793e5166d
|
[
"Apache-2.0"
] |
permissive
|
hasanirtiza/Pedestron
|
e89fea2ec676f150a7266f6b65963dd6c4ec35c9
|
8ab23ec38982cfaf0ae82c77c30f10b2fff62d12
|
refs/heads/master
| 2023-08-06T02:53:06.368937 | 2023-04-06T13:46:27 | 2023-04-06T13:46:27 | 247,410,025 | 723 | 161 |
Apache-2.0
| 2022-10-02T10:17:44 | 2020-03-15T05:52:52 |
Python
|
UTF-8
|
Python
| false | false | 5,628 |
py
|
# model settings
model = dict(
type='MaskScoringRCNN',
pretrained='open-mmlab://resnext101_64x4d',
backbone=dict(
type='ResNeXt',
depth=101,
groups=64,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
style='pytorch'),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5),
rpn_head=dict(
type='RPNHead',
in_channels=256,
feat_channels=256,
anchor_scales=[8],
anchor_ratios=[0.5, 1.0, 2.0],
anchor_strides=[4, 8, 16, 32, 64],
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0],
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)),
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=dict(
type='SharedFCBBoxHead',
num_fcs=2,
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=81,
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2],
reg_class_agnostic=False,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
mask_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', out_size=14, sample_num=2),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
mask_head=dict(
type='FCNMaskHead',
num_convs=4,
in_channels=256,
conv_out_channels=256,
num_classes=81,
loss_mask=dict(
type='CrossEntropyLoss', use_mask=True, loss_weight=1.0)),
mask_iou_head=dict(
type='MaskIoUHead',
num_convs=4,
num_fcs=2,
roi_feat_size=14,
in_channels=256,
conv_out_channels=256,
fc_out_channels=1024,
num_classes=81))
# model training and testing settings
train_cfg = dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_across_levels=False,
nms_pre=2000,
nms_post=2000,
max_num=2000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
mask_size=28,
pos_weight=-1,
mask_thr_binary=0.5,
debug=False))
test_cfg = dict(
rpn=dict(
nms_across_levels=False,
nms_pre=1000,
nms_post=1000,
max_num=1000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
score_thr=0.05,
nms=dict(type='nms', iou_thr=0.5),
max_per_img=100,
mask_thr_binary=0.5))
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
data = dict(
imgs_per_gpu=2,
workers_per_gpu=2,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
img_scale=(1333, 800),
img_norm_cfg=img_norm_cfg,
size_divisor=32,
flip_ratio=0.5,
with_mask=True,
with_crowd=True,
with_label=True),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
img_scale=(1333, 800),
img_norm_cfg=img_norm_cfg,
size_divisor=32,
flip_ratio=0,
with_mask=True,
with_crowd=True,
with_label=True),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
img_scale=(1333, 800),
img_norm_cfg=img_norm_cfg,
size_divisor=32,
flip_ratio=0,
with_mask=False,
with_label=False,
test_mode=True))
# optimizer
optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=1.0 / 3,
step=[8, 11])
checkpoint_config = dict(interval=1)
# yapf:disable
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
# yapf:enable
# runtime settings
total_epochs = 12
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = './work_dirs/ms_rcnn_x101_64x4d_fpn_1x'
load_from = None
resume_from = None
workflow = [('train', 1)]
|
[
"[email protected]"
] | |
2b09af06835e7474ad61e8d98f0c2a72f6f3ed6b
|
dc37f36199b107933e33486761125cef2f492ae2
|
/export_contacts.py
|
9eb70ffd28bd589f83971c6a335fa94871265327
|
[] |
no_license
|
spookylukey/christchurch_django
|
ca3acd67df1695a1cd7cb462b729ad72a37e43b7
|
d489e400b201b8ac56ee4065b3d6bc0f861f92f2
|
refs/heads/master
| 2022-12-20T03:27:26.081809 | 2015-10-15T18:36:20 | 2015-10-15T18:36:20 | 300,521,884 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,339 |
py
|
#!/usr/bin/env python
from __future__ import unicode_literals
import os
os.environ['DJANGO_SETTINGS_MODULE'] = 'christchurch.settings'
import csv
writer = csv.writer(open("contact-list.csv", "w"))
writer.writerow(["First Name", "Last Name", "Gender (M/F)", "Student (Y/N)", "Address", "Email Address", "Phone Number", "Mobile", "Photo File Name", "Home Group", "Username", "Password", "Admin User (Y/N)", "Church member", "Include on email lists"])
from django.contrib.auth.models import User
from contacts.models import Contact
admins = {u.email: u for u in User.objects.all().filter(is_staff=True)}
for contact in Contact.objects.all():
try:
first_name, last_name = contact.name.split(' ', 2)
except ValueError:
first_name, last_name = contact.name, ""
writer.writerow([
first_name,
last_name,
"",
"N",
contact.address.strip() + "\n" + contact.post_code,
contact.email,
contact.phone_number,
contact.mobile_number,
"",
contact.home_group.name if contact.home_group else "",
admins[contact.email].username if contact.email in admins else "",
"",
"Y" if contact.email in admins else "N",
"Y" if contact.church_member else "N",
"Y" if contact.include_on_email_lists else "N",
])
|
[
"[email protected]"
] | |
6a5d15682bbaa458fe83a7acb7339950b92acdcb
|
795caca6c497891e2fcd2b0253a209500744c56d
|
/src/models/continuous_encoder_decoder_models/encoder_decoder_variants/enc_dec_out.py
|
fe36585660ddbd55eae5ad88b1e6f06abb913378
|
[] |
no_license
|
RitaRamo/remote-sensing-images-caption
|
29c0e0a6b5352b9b3d62c7315cd4d7ac6b0b7076
|
426d97b5d3688f6c52c51ef6e33872554d55751a
|
refs/heads/master
| 2021-11-24T03:02:00.238003 | 2021-11-04T09:23:20 | 2021-11-04T09:23:20 | 244,619,672 | 3 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,534 |
py
|
import torchvision
from torch import nn
import torch
from torch.nn.utils.rnn import pack_padded_sequence
from models.basic_encoder_decoder_models.encoder_decoder import Encoder, Decoder
from models.abtract_model import AbstractEncoderDecoderModel
import torch.nn.functional as F
from embeddings.embeddings import get_embedding_layer
from sklearn.metrics.pairwise import cosine_similarity
import numpy as np
from data_preprocessing.preprocess_tokens import OOV_TOKEN
from embeddings.embeddings import EmbeddingsType
from models.continuous_encoder_decoder_models.encoder_decoder import ContinuousEncoderDecoderModel
from embeddings.embeddings import EmbeddingsType
class VocabAttention(nn.Module):
"""
Attention Network.
"""
def __init__(self, vocab_dim, decoder_dim, embedding_vocab):
"""
:param encoder_dim: feature size of encoded images
:param decoder_dim: size of decoder's RNN
:param attention_dim: size of the attention network
"""
super(VocabAttention, self).__init__()
# linear layer to transform decoder's output
self.decoder_att = nn.Linear(decoder_dim, vocab_dim)
self.full_att = nn.Linear(vocab_dim, 1)
self.relu = nn.ReLU()
self.softmax = nn.Softmax(dim=1) # softmax layer to calculate weights
self.embedding_vocab = embedding_vocab
def forward(self, decoder_hidden):
"""
Forward propagation.
:param encoder_out: encoded images, a tensor of dimension (batch_size, num_pixels, encoder_dim)
:param decoder_hidden: previous decoder output, a tensor of dimension (batch_size, decoder_dim)
:return: attention weighted encoding, weights
"""
# (batch_size, l_regions (512), regions_dim (300))
vocab = self.embedding_vocab.repeat(decoder_hidden.size()[0], 1, 1)
query = self.decoder_att(decoder_hidden) # (batch_size, 1, encoder_dim)
att2 = self.decoder_att(decoder_hidden) # (batch_size, attention_dim)
# (batch_size, num_pixels,1) -> com squeeze(2) fica (batch_size, l_regions)
att = self.full_att(self.relu(vocab + query.unsqueeze(1))).squeeze(2)
alpha = self.softmax(att) # (batch_size, l_regions)
attention_weighted_encoding = (
vocab * alpha.unsqueeze(2)).sum(dim=1) # (batch_size, encoder_dim)
return attention_weighted_encoding, alpha
class ContinuousDecoderWithOut(Decoder):
def __init__(self, decoder_dim, embed_dim, embedding_type, vocab_size, token_to_id, post_processing, device,
encoder_dim=2048, dropout=0.5):
super(ContinuousDecoderWithOut, self).__init__(decoder_dim, embed_dim,
embedding_type, vocab_size, token_to_id, post_processing, encoder_dim, dropout)
# replace softmax with a embedding layer
self.fc = nn.Linear(decoder_dim, embed_dim)
list_wordid = list(range(vocab_size)) # ignore first 4 special tokens : "start,end, unknow, padding"
vocab = torch.transpose(torch.tensor(list_wordid).unsqueeze(-1), 0, 1)
embedding_vocab = self.embedding(vocab).to(device)
self.attention_out = VocabAttention(embed_dim, decoder_dim, embedding_vocab) # attention network
def forward(self, word, encoder_out, decoder_hidden_state, decoder_cell_state):
embeddings = self.embedding(word)
decoder_hidden_state, decoder_cell_state = self.decode_step(
embeddings, (decoder_hidden_state, decoder_cell_state)
)
scores, alpha_out = self.attention_out(self.dropout(decoder_hidden_state))
return scores, decoder_hidden_state, decoder_cell_state, alpha_out
class ContinuousEncoderDecoderOutModel(ContinuousEncoderDecoderModel):
def __init__(self,
args,
vocab_size,
token_to_id,
id_to_token,
max_len,
device
):
super().__init__(args, vocab_size, token_to_id, id_to_token, max_len, device)
def _initialize_encoder_and_decoder(self):
if (self.args.embedding_type not in [embedding.value for embedding in EmbeddingsType]):
raise ValueError(
"Continuous model should use pretrained embeddings...")
self.encoder = Encoder(self.args.image_model_type,
enable_fine_tuning=self.args.fine_tune_encoder)
self.decoder = ContinuousDecoderWithOut(
encoder_dim=self.encoder.encoder_dim,
decoder_dim=self.args.decoder_dim,
embedding_type=self.args.embedding_type,
embed_dim=self.args.embed_dim,
vocab_size=self.vocab_size,
token_to_id=self.token_to_id,
post_processing=self.args.post_processing,
device=self.device,
dropout=self.args.dropout
)
self.decoder.normalize_embeddings(self.args.no_normalization)
self.encoder = self.encoder.to(self.device)
self.decoder = self.decoder.to(self.device)
def _predict(self, encoder_out, caps, caption_lengths):
batch_size = encoder_out.size(0)
num_pixels = encoder_out.size(1)
# Create tensors to hold word predicion scores and alphas
all_predictions = torch.zeros(batch_size, max(
caption_lengths), self.decoder.embed_dim).to(self.device)
all_alphas_out = torch.zeros(batch_size, max(
caption_lengths), self.vocab_size).to(self.device)
h, c = self.decoder.init_hidden_state(encoder_out)
# Predict
for t in range(max(
caption_lengths)):
# batchsizes of current time_step are the ones with lenght bigger than time-step (i.e have not fineshed yet)
batch_size_t = sum([l > t for l in caption_lengths])
predictions, h, c, alpha_out = self.decoder(
caps[:batch_size_t, t], encoder_out[:batch_size_t], h[:batch_size_t], c[:batch_size_t])
all_predictions[:batch_size_t, t, :] = predictions
all_alphas_out[:batch_size_t, t, :] = alpha_out
return {"predictions": all_predictions, "alpha_out": all_alphas_out}
def generate_output_index(self, input_word, encoder_out, h, c):
predictions, h, c, _ = self.decoder(
input_word, encoder_out, h, c)
current_output_index = self._convert_prediction_to_output(predictions)
return current_output_index, h, c
|
[
"[email protected]"
] | |
4a2fb9f16742d3718a5490b53140ab00b8c65f5a
|
f6ed7bc808f5536bc77166fe5c3571e5c028f308
|
/neptune/internal/common/utils/files.py
|
c694ca7a2144941196bdd9a68e8df828c7b73206
|
[
"Apache-2.0"
] |
permissive
|
jiji-online/neptune-cli
|
d086bb59725b7545f3e0f80bd89e8f99ff3851a0
|
50cf680a80d141497f9331ab7cdaee49fcb90b0c
|
refs/heads/main
| 2023-07-18T17:56:10.671562 | 2021-09-14T07:54:13 | 2021-09-14T07:54:13 | 406,275,162 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 858 |
py
|
#
# Copyright (c) 2016, deepsense.io
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import errno
import os
import io
def create_empty_file(path):
io.open(path, 'w').close()
def create_dir_if_nonexistent(dir_path):
try:
os.makedirs(dir_path)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
|
[
"[email protected]"
] | |
3034b0e2dc2f6fae511f9a14f1f4e669ce99bf43
|
b8e6b9ac7d92941e3b8ee2f97952ff8048d9fed6
|
/django_app/config/celery.py
|
5f2ee85922eaca996254f9a6b2d7da3b932d1cf8
|
[] |
no_license
|
LeeHanYeong/Elasticbeanstalk-Celery-Redis-Elasticache
|
00e571d90141ecf987ed5d86a90797f3de7ccae1
|
63c0b8f519a2c90f82d796affa884d3b1a440732
|
refs/heads/master
| 2020-06-26T10:00:30.498364 | 2017-11-16T19:59:16 | 2017-11-16T19:59:16 | 97,014,281 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 319 |
py
|
import os
from celery import Celery
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'config.settings.debug')
app = Celery('config')
app.config_from_object('django.conf:settings', namespace='CELERY')
app.autodiscover_tasks()
@app.task(bind=True)
def debug_task(self):
print('Request: {0!r}'.format(self.request))
|
[
"[email protected]"
] | |
21bc6ecaca89a962b6c47a14a1809fc53cb6ae5e
|
ed90fcbfd1112545fa742e07131159bb3a68246a
|
/smry/server-auth/ls/google-cloud-sdk/lib/googlecloudsdk/compute/subcommands/firewall_rules/list.py
|
8d62e0bafc6eecf56466830a10565be1b2193749
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
wemanuel/smry
|
2588f2a2a7b7639ebb6f60b9dc2833f1b4dee563
|
b7f676ab7bd494d71dbb5bda1d6a9094dfaedc0a
|
refs/heads/master
| 2021-01-10T21:56:55.226753 | 2015-08-01T13:37:06 | 2015-08-01T13:37:06 | 40,047,329 | 0 | 1 |
Apache-2.0
| 2020-07-24T18:32:40 | 2015-08-01T13:26:17 |
Python
|
UTF-8
|
Python
| false | false | 441 |
py
|
# Copyright 2014 Google Inc. All Rights Reserved.
"""Command for listing firewall rules."""
from googlecloudsdk.compute.lib import base_classes
class List(base_classes.GlobalLister):
"""List Google Compute Engine firewall rules."""
@property
def service(self):
return self.compute.firewalls
@property
def resource_type(self):
return 'firewalls'
List.detailed_help = base_classes.GetGlobalListerHelp('firewall rules')
|
[
"[email protected]"
] | |
ef8050413a53ba46fbf7838ae42ee7b94417348b
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03304/s635103583.py
|
df29bcaa3b5769da632eaa3ea1863d89e01068ee
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 851 |
py
|
import sys
import math
import copy
from heapq import heappush, heappop, heapify
from functools import cmp_to_key
from bisect import bisect_left, bisect_right
from collections import defaultdict, deque, Counter
# sys.setrecursionlimit(1000000)
# input aliases
input = sys.stdin.readline
getS = lambda: input().strip()
getN = lambda: int(input())
getList = lambda: list(map(int, input().split()))
getZList = lambda: [int(x) - 1 for x in input().split()]
INF = float("inf")
MOD = 10**9 + 7
divide = lambda x: pow(x, MOD-2, MOD)
def solve():
n, m, d = getList()
if d == 0:
each = n
else:
each = (n - d) * 2
# igai = pow(n, m-2)
all = each * (m-1) / (n * n)
ans = all
print(ans)
def main():
n = getN()
for _ in range(n):
solve()
return
if __name__ == "__main__":
# main()
solve()
|
[
"[email protected]"
] | |
6470e5104a790f16c984bcde668a934317ac2e95
|
1e8142725aa06844713d18fa38c6779aff8f8171
|
/tndata_backend/notifications/migrations/0018_gcmmessage_queue_id.py
|
64dd06bd40b6ed39edc8bd2ae0a208bec73ed197
|
[
"MIT"
] |
permissive
|
tndatacommons/tndata_backend
|
8f4db3e5cf5272901c9087a85e21d7560240bb3b
|
3d22179c581ab3da18900483930d5ecc0a5fca73
|
refs/heads/master
| 2020-12-03T07:53:17.339769 | 2017-03-27T06:18:58 | 2017-03-27T06:18:58 | 68,407,220 | 1 | 2 | null | 2017-03-27T06:18:59 | 2016-09-16T18:59:16 |
Python
|
UTF-8
|
Python
| false | false | 437 |
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('notifications', '0017_auto_20151217_2000'),
]
operations = [
migrations.AddField(
model_name='gcmmessage',
name='queue_id',
field=models.CharField(max_length=128, default='', blank=True),
),
]
|
[
"[email protected]"
] | |
7b122931a2d1665b2d483991ac0a54efe644b77e
|
612325535126eaddebc230d8c27af095c8e5cc2f
|
/src/net/log/stitch_net_log_files.py
|
aea6d7b0f58ca282bcb4daf53c9837ae3b963544
|
[
"BSD-3-Clause"
] |
permissive
|
TrellixVulnTeam/proto-quic_1V94
|
1a3a03ac7a08a494b3d4e9857b24bb8f2c2cd673
|
feee14d96ee95313f236e0f0e3ff7719246c84f7
|
refs/heads/master
| 2023-04-01T14:36:53.888576 | 2019-10-17T02:23:04 | 2019-10-17T02:23:04 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,998 |
py
|
#!/usr/bin/env python
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''
This script stitches the NetLog files in a specified directory.
The complete NetLog will be written to net-internals-log.json in the directory
passed as argument to --path.
'''
import argparse, os
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--path', action='store',
help="Specifies the complete filepath of the directory where the log "
"files are located.")
# TODO(dconnol): Automatically pull all event files matching the format
# event_file_<num>.json and remove the num_files argument.
parser.add_argument('--num_files', action='store',
help="Specifies the number of event files (not including the constants "
"file or the end_netlog file) that need need to be stitched together. "
"The number of event files passed to the script must not be greater "
"than the number of event files in the directory.")
args = parser.parse_args()
num_files = int(args.num_files)
filepath = args.path
if filepath[-1:] != "/":
filepath += "/"
os.chdir(filepath)
with open("net-internals-log.json", "w") as stitched_file:
try:
file = open("constants.json")
with file:
for line in file:
stitched_file.write(line)
except IOError:
os.remove("net-internals-log.json")
print "File \"constants.json\" not found."
return
events_written = False;
for i in range(num_files):
try:
file = open("event_file_%d.json" % i)
with file:
if not events_written:
line = file.readline();
events_written = True
for next_line in file:
if next_line.strip() == "":
line += next_line
else:
stitched_file.write(line)
line = next_line
except IOError:
os.remove("net-internals-log.json")
print "File \"event_file_%d.json\" not found." % i
return
# Remove hanging comma from last event
# TODO(dconnol): Check if the last line is a valid JSON object. If not,
# do not write the line to file. This handles incomplete logs.
line = line.strip()
if line[-1:] == ",":
stitched_file.write(line[:-1])
elif line:
raise ValueError('Last event is not properly formed')
try:
file = open("end_netlog.json")
with file:
for line in file:
stitched_file.write(line)
except IOError:
os.remove("net-internals-log.json")
print "File \"end_netlog\" not found."
return
# Delete old NetLog files
for i in range (num_files):
os.remove("event_file_%d.json" % i)
os.remove("constants.json")
os.remove("end_netlog.json")
if __name__ == "__main__":
main()
|
[
"[email protected]"
] | |
c279470529493caf6dca7d09df9d96753ca09dc2
|
d4280eca1a9badb0a4ad2aa22598616eedece373
|
/Automate The Boring Stuff With Python/03/04-sameName.py
|
c723f7c075712db216aaaf5d638a7face06363b8
|
[] |
no_license
|
Little-Captain/py
|
77ec12bb2aaafe9f709a70831266335b03f63663
|
74ba3c3449e7b234a77500a17433e141e68169f7
|
refs/heads/master
| 2021-06-09T11:33:23.205388 | 2019-11-22T01:17:44 | 2019-11-22T01:17:44 | 131,844,918 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 193 |
py
|
#!/usr/bin/env python
def spam():
eggs = 'spam local'
print(eggs)
def bacon():
eggs = 'bacon local'
print(eggs)
spam()
print(eggs)
eggs = 'global'
bacon()
print(eggs)
|
[
"[email protected]"
] | |
269e0ffaa05096b410f812324e38587094ee38df
|
24a52b2b363417a8bdfeb8f669ee53b7ee19f4d6
|
/playa/conf.py
|
7579c8aef6242a240ea812a489b5517cb84d0ca7
|
[
"Apache-2.0"
] |
permissive
|
isabella232/playa
|
e203997e2660babe333d4915f294530cde57ccb0
|
a93335e592aa596645a60497a7c030a36ae7fec2
|
refs/heads/master
| 2023-03-18T23:51:35.577746 | 2011-07-15T01:07:53 | 2011-07-15T01:07:53 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 588 |
py
|
"""
playa.conf
~~~~~~~~~~
Represents the default values for all settings.
:copyright: (c) 2011 DISQUS.
:license: Apache License 2.0, see LICENSE for more details.
"""
import os
import os.path
class PlayaConfig(object):
ROOT = os.path.normpath(os.path.dirname(__file__))
DEBUG = True
AUDIO_PATHS = []
WEB_HOST = '0.0.0.0'
WEB_PORT = 9000
WEB_LOG_FILE = os.path.join(ROOT, 'playa.log')
WEB_PID_FILE = os.path.join(ROOT, 'playa.pid')
DATA_PATH = os.path.join(ROOT, 'data')
SECRET_KEY = '_#(wkvb#@%%!x-dd!xt&i-1g5rylz4q&t6%m5u@3&7hyuqd437'
|
[
"[email protected]"
] | |
2013df2811af303bf28b622389c22251a0e40bff
|
99cd943ad5deed305608a516c0596cf3e1b552e5
|
/src/py/VendingMachine/vending_machine1.py
|
c9767d0aef06e0102daeaf59a770b9d458689ecd
|
[] |
no_license
|
koukyo1994/algorithms
|
da8beebafe95768890a88babdba5951b01a3f975
|
6cb3350f89ddbc244071c1bc3e1a10ec9e0760ed
|
refs/heads/master
| 2021-06-23T19:04:22.618607 | 2021-04-24T08:33:01 | 2021-04-24T08:33:01 | 200,551,709 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 639 |
py
|
import sys
if __name__ == "__main__":
insert_price = input("insert: ")
if not insert_price.isdecimal():
print("整数を入力してください")
sys.exit()
product_price = input("product: ")
if not product_price.isdecimal():
print("整数を入力してください")
sys.exit()
change = int(insert_price) - int(product_price)
if change < 0:
print("金額が不足しています")
sys.exit()
coins = [5000, 1000, 500, 100, 50, 10, 5, 1]
for coin in coins:
n_coin = change // coin
change = change % coin
print(f"{coin}: {n_coin}")
|
[
"[email protected]"
] | |
96b751bafee5bfec57c1900b3f0737d33f666c7b
|
729ee5bcb31708a82b08509775786597dac02263
|
/coding-challenges/week09/day05/ccQ1.py
|
01507bc127c3a7c3790250ee8b5756ef255aa621
|
[] |
no_license
|
pandey-ankur-au17/Python
|
67c2478316df30c2ac8ceffa6704cf5701161c27
|
287007646a694a0dd6221d02b47923935a66fcf4
|
refs/heads/master
| 2023-08-30T05:29:24.440447 | 2021-09-25T16:07:23 | 2021-09-25T16:07:23 | 358,367,687 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 907 |
py
|
"""
Q-1 ) Squares of a Sorted Array:(5 marks) (easy)
https://leetcode.com/problems/squares-of-a-sorted-array/
Given an integer array nums sorted in non-decreasing order, return an array of the
squares of each number sorted in non-decreasing order.
Example 1:
Input: nums = [-4,-1,0,3,10]
Output: [0,1,9,16,100]
Explanation: After squaring, the array becomes [16,1,0,9,100].
After sorting, it becomes [0,1,9,16,100].
"""
def SortedArray(nums):
n = len(nums)
i = 0
j = n - 1
k = n - 1
result = list(range(n))
while i <= j:
SqrNg = nums[i] * nums[i]
SqrPo = nums[j] * nums[j]
if SqrNg < SqrPo:
result[k] = SqrPo
j = j - 1
else:
result[k] = SqrNg
i = i + 1
k = k - 1
return result
if __name__ == "__main__":
nums = [-4,-1,0,3,10]
res = SortedArray(nums)
print(res)
|
[
"[email protected]"
] | |
feed39e1f437c4d336656b405b1148f3b07bb364
|
cfc7eed97d4987dbe80026205b7a127f89974d51
|
/ebcli/controllers/codesource.py
|
6fc3968ac2ad924babbabd2783fc67143c6b4fbd
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
stefansundin/awsebcli
|
bf71872328c4d94f073d5d0ae0740a0316d56fcf
|
8e17c8ad3d24e3c4cef9a4c5dfc6cae61bd7066d
|
refs/heads/main
| 2022-12-06T06:34:52.601029 | 2022-02-04T05:40:53 | 2022-11-20T01:38:26 | 230,182,128 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,387 |
py
|
# Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from ebcli.lib import utils
from ebcli.core import io
from ebcli.core.abstractcontroller import AbstractBaseController
from ebcli.resources.strings import strings, flag_text, prompts
from ebcli.operations import gitops
class CodeSourceController(AbstractBaseController):
class Meta(AbstractBaseController.Meta):
label = 'codesource'
description = strings['codesource.info']
arguments = [
(
['sourcename'],
dict(
action='store',
nargs='?',
help=flag_text['codesource.sourcename'],
choices=['codecommit', 'local'],
type=str.lower
)
),
]
usage = 'eb codesource <sourcename> [options ...]'
def do_command(self):
sourcename = self.app.pargs.sourcename
if sourcename is not None:
if sourcename == 'local':
gitops.print_current_codecommit_settings()
self.set_local()
if sourcename == 'codecommit':
self.set_codecommit()
else:
self.prompt_for_codesource()
def prompt_for_codesource(self):
gitops.print_current_codecommit_settings()
io.echo(prompts['codesource.codesourceprompt'])
setup_choices = ['CodeCommit', 'Local']
choice = utils.prompt_for_item_in_list(setup_choices, 2)
if choice == setup_choices[0]:
self.set_codecommit()
elif choice == setup_choices[1]:
self.set_local()
def set_local(self):
gitops.disable_codecommit()
io.echo(strings['codesource.localmsg'])
def set_codecommit(self):
gitops.initialize_codecommit()
|
[
"[email protected]"
] | |
c7fbb95fa05343cc561f50c34178cda5f263255f
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_363/ch18_2020_09_16_12_12_05_478212.py
|
d5e7f259a6b779b713536a1cdce9be08e76ba7cf
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 298 |
py
|
def testa_maioridade(idade):
if idade >= 21:
return 'Liberado EUA e BRASIL'
else:
if idade >= 18:
return 'Liberado BRASIL'
else:
return 'Não está liberado'
print(testa_maioridade(17))
print(testa_maioridade(20))
print(testa_maioridade(21))
|
[
"[email protected]"
] | |
85dd60d1a0c3316bda5a5dcf3306e7bf740b7417
|
b07c4f4b99a46689a650d52bf1bd1d32160f06d3
|
/tests/test_cps324.py
|
f14fcdd50b208eaae6ee51e93dfb35fd723dfb9a
|
[] |
no_license
|
nuxeo-cps/products--CPSUpgradeTests
|
2d67652c26fc212c9ec9864a76b0a7b1f819e2c9
|
e3b1f94eaf78278b529561b2384ea3a3479123b3
|
refs/heads/main
| 2023-01-22T00:46:51.434789 | 2006-09-02T08:22:30 | 2006-09-02T08:22:30 | 317,994,831 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,371 |
py
|
# Upgrade from CPS 3.2.4
DB_NAME = 'cps324'
import os
import unittest
# Warning, nifty tapdance ahead:
# When you import testing, it sets testing home to
# $SOFTWARE_HOME/lib/python/Testing
import Testing
# But we want it to be in a directory with our custom_zodb.py, so we set it,
# but only after importing Testing (or it will be reset later).
import App.config
cfg = App.config.getConfiguration()
cfg.testinghome = os.path.join(os.path.dirname(__file__), DB_NAME)
# During the import of the ZopeLite module, the Zope Application will be
# started, and it will now use our testinghome, find our custom_zodb.py and
# use our custom ZODB.
# Actually, we import upgradetestcase, which in turn imports ZopeTestCase,
# which in turn imports ZopeLite, which in turns starts Zope.
from upgradetestcase import PreGenericSetupTestCase
# Tapdance ends.
class TestUpgrade(PreGenericSetupTestCase):
db_dir = DB_NAME
def test_upgrade(self):
self._upgrade()
self._verifyDocument()
self._verifyPublishing()
self._verifyCalendaring()
self._verifyNewsItem()
self._checkSubGroupSupport()
self._verifyFolderDestruction()
def test_suite():
return unittest.TestSuite((
unittest.makeSuite(TestUpgrade),
))
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
|
[
"devnull@localhost"
] |
devnull@localhost
|
8c09c475eebebeba17d6965c5c16882309111a9f
|
b8441dc1987be9e64fa3081d456b2a3060ec44d1
|
/mars/core/graph/builder/tileable.py
|
ddfbf93711c35982d8d457f21204d791adbbb977
|
[
"BSD-3-Clause",
"MIT",
"ISC",
"Apache-2.0",
"CC0-1.0",
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
mars-project/mars
|
f99fefbce999d58a9249bc72046787a9731c9c73
|
c36c53fa22e10ef9477d9c454401a2f281375f31
|
refs/heads/master
| 2023-07-23T00:23:55.133015 | 2023-07-03T11:44:54 | 2023-07-03T11:44:54 | 160,543,708 | 2,704 | 362 |
Apache-2.0
| 2023-09-11T07:57:35 | 2018-12-05T16:04:03 |
Python
|
UTF-8
|
Python
| false | false | 1,230 |
py
|
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Union, Generator
from ...mode import enter_mode
from ..entity import TileableGraph, ChunkGraph
from .base import AbstractGraphBuilder
class TileableGraphBuilder(AbstractGraphBuilder):
_graph: TileableGraph
def __init__(self, graph: TileableGraph):
super().__init__(graph=graph)
@enter_mode(build=True, kernel=True)
def _build(self) -> Union[TileableGraph, ChunkGraph]:
self._add_nodes(self._graph, list(self._graph.result_tileables), set())
return self._graph
def build(self) -> Generator[Union[TileableGraph, ChunkGraph], None, None]:
yield self._build()
|
[
"[email protected]"
] | |
e38060a8c7d9bb18f3deb109b85e49558db91fda
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/59/usersdata/147/61122/submittedfiles/testes.py
|
06774e42c5ec729f01f08e760f84be3690f8d627
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 132 |
py
|
# -*- coding: utf-8 -*-
#COMECE AQUI ABAIXO
n=int(input('digite n:'))
x1=n//1000
b=n//1000
b2=b%100
x2=b2//100
print(x1)
print(x2)
|
[
"[email protected]"
] | |
48fd13cd46e26454f058944a362e8996ca192344
|
2edf3a0d21117c65dffe87c3da81365c77d66679
|
/dfirtrack_main/tests/system/test_system_importer_file_csv_config_based_forms.py
|
baa1cddf83741025adb6aacefe2ee628c2689cb3
|
[
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
fxcebx/dfirtrack
|
003748305aa412aa9ec043faa98dac45d3053b5c
|
20acf4e508aeef9faf2ed1d2195918b6640c1307
|
refs/heads/master
| 2022-12-10T02:25:47.676855 | 2020-09-24T23:15:42 | 2020-09-24T23:15:42 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,548 |
py
|
from django.core.files.uploadedfile import SimpleUploadedFile
from django.test import TestCase
from dfirtrack_main.importer.file.csv_importer_forms import SystemImporterFileCsvConfigbasedForm
class SystemImporterFileCsvConfigbasedFormTestCase(TestCase):
""" system importer file CSV config-based form tests """
def test_system_importer_file_csv_config_based_systemcsv_form_label(self):
""" test form label """
# get object
form = SystemImporterFileCsvConfigbasedForm()
# compare
self.assertEqual(form.fields['systemcsv'].label, 'CSV with systems (*)')
def test_system_importer_file_csv_config_based_form_empty(self):
""" test minimum form requirements / INVALID """
# get object
form = SystemImporterFileCsvConfigbasedForm(data = {})
# compare
self.assertFalse(form.is_valid())
def test_system_importer_file_csv_config_based_systemcsv_form_filled(self):
""" test minimum form requirements / VALID """
# get file
upload_csv = open('example_data/dfirtrack_main_importer_file_csv_system__valid.csv', 'rb')
# create dictionaries
data_dict = {}
file_dict = {
'systemcsv': SimpleUploadedFile(upload_csv.name, upload_csv.read()),
}
# get object
form = SystemImporterFileCsvConfigbasedForm(
data = data_dict,
files = file_dict,
)
# close file
upload_csv.close()
# compare
self.assertTrue(form.is_valid())
|
[
"[email protected]"
] | |
7c34356fc7693cae881d92047c8d025ff83373d7
|
41f548fc3052d4cd3a94e3171a0e2120705ed760
|
/Gomine_DOC_Unicode/Old_crawl/shiye/shiye/items.py
|
ecb978c4f13f93ff5406aee5a8d1ec921ae69426
|
[] |
no_license
|
SuperShen9/Scrapy
|
806f972bcd05d85bf02349c5ee7711af550c8568
|
cbe141f697596d5a384bb968d7343194236a541f
|
refs/heads/master
| 2021-01-19T13:04:19.957911 | 2018-06-27T23:47:21 | 2018-06-27T23:47:21 | 88,060,453 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 329 |
py
|
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class ShiyeItem(scrapy.Item):
# define the fields for your item here like:
name = scrapy.Field()
code=scrapy.Field()
url=scrapy.Field()
pass
|
[
"[email protected]"
] | |
e984ed448f3a0a8dc1424728498e0d9e98beb857
|
0032c98333ffc0efdb920ecca31ab224378880e5
|
/rpi-tutorial/Servo2.py
|
a25b57ffaefc303c79cc41c4e84ef8fd55d8d646
|
[] |
no_license
|
raspibrick/install
|
bd1c6f9a8cb524f2ab5a2c17ad8c5463b768dffa
|
96288d6ca21abd8fb993cc376e37c16473b54dd5
|
refs/heads/master
| 2021-01-10T05:00:39.159879 | 2019-07-25T09:46:04 | 2019-07-25T09:46:04 | 40,703,681 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 759 |
py
|
# Servo2.py
# Two servo motors driven by PCA9685 chip
from smbus import SMBus
from PCA9685 import PWM
import time
i2c_address = 0x40
fPWM = 50
channel = 1
a = 8.5
b = 3
def setup():
global pwm
bus = SMBus(1) # Raspberry Pi revision 2
pwm = PWM(bus, i2c_address)
pwm.setFreq(fPWM)
def setDirection(direction):
duty = a / 180 * direction + b
pwm.setDuty(channel, duty)
print "direction =", direction, "-> duty =", duty
time.sleep(0.5) # allow to settle
print "starting"
setup()
channel = 0
for direction in range(0, 91, 10):
setDirection(direction)
direction = 0
setDirection(0)
channel = 1
for direction in range(0, 91, 10):
setDirection(direction)
direction = 0
setDirection(0)
print "done"
|
[
"[email protected]"
] | |
d0585631be5a98578e7397c70df0b3441eda5577
|
72d6b3ab3fc2c7014967a156de082d1c617cbf0f
|
/操作数据库/使用Flask连接MySQL_将话务数据入库.py
|
04fe27cd42ae98af04094148fdc468a3a171760e
|
[] |
no_license
|
fengmingshan/python
|
19a1732591ad061a8291c7c84e6f00200c106f38
|
b35dbad091c9feb47d1f0edd82e568c066f3c6e9
|
refs/heads/master
| 2021-06-03T08:35:50.019745 | 2021-01-19T15:12:01 | 2021-01-19T15:12:01 | 117,310,092 | 4 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,285 |
py
|
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 1 10:16:45 2020
@author: Administrator
"""
from flask import Flask, render_template
from flask_sqlalchemy import SQLAlchemy
import pandas as pd
import os
work_path = 'd:/_python/python/操作数据库/'
os.chdir(work_path)
app = Flask(__name__)
app.config["SQLALCHEMY_DATABASE_URI"] = "mysql+pymysql://root:a123456@localhost:3306/eric_traffic?charset=utf8"
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
app.config['SQLALCHEMY_COMMMIT_ON_TEARDOWN'] = True
# 建立数据库对象
db = SQLAlchemy(app)
#db = SQLAlchemy(app, use_native_unicode='utf8')
title = ['key',
'week',
'eNodeB',
'EUTRANCELLFDD',
'Acc_WirelessConnSucRate',
'Acc_ERAB_droppingrate',
'AirInterface_Traffic_Volume_UL_MBytes',
'AirInterface_Traffic_Volume_DL_MBytes',
'Int_DownlinkLatency',
'MaxnumberofUEinRRc',
'pmCellDowntimeAuto1',
'pmCellDowntimeMan1',
'Data_Coverage',
'Ava_CellAvail',
'NumofLTERedirectto3G',
'AvgNumberofULActiveUsers',
'AvgNumberofDLActiveUsers',
'DL_Util_of_PRB',
'DLactiveuesum',
'CellPDCPDLbit',
'AvgUserFellThroughput_Mbps'
]
df_eric = pd.read_csv('./爱立信0224-0301_mini.csv', header=None, names=title)
df_eric = df_eric[['key',
'week',
'eNodeB',
'EUTRANCELLFDD',
'Acc_WirelessConnSucRate',
'Acc_ERAB_droppingrate',
'AirInterface_Traffic_Volume_UL_MBytes',
'AirInterface_Traffic_Volume_DL_MBytes',
'Int_DownlinkLatency',
'MaxnumberofUEinRRc',
'AvgNumberofULActiveUsers',
'AvgNumberofDLActiveUsers',
'DL_Util_of_PRB',
'AvgUserFellThroughput_Mbps'
]]
# 建立数据库类,用来映射到数据库中的表。
class Eric_day(db.Model):
# 声明表名
__tablename__ = 'eric_day'
# 建立字段函数
key = db.Column(db.String(200), primary_key=True)
week = db.Column(db.Integer)
eNodeB = db.Column(db.String(200))
EUTRANCELLFDD = db.Column(db.String(200))
Acc_WirelessConnSucRate = db.Column(db.Float)
Acc_ERAB_droppingrate = db.Column(db.Float)
AirInterface_Traffic_Volume_UL_MBytes = db.Column(db.Float)
AirInterface_Traffic_Volume_DL_MBytes = db.Column(db.Float)
Int_DownlinkLatency = db.Column(db.Float)
MaxnumberofUEinRRc = db.Column(db.Integer)
AvgNumberofULActiveUsers = db.Column(db.Float)
AvgNumberofDLActiveUsers = db.Column(db.Float)
DL_Util_of_PRB = db.Column(db.Float)
AvgUserFellThroughput_Mbps = db.Column(db.Float)
def __repr__(self):
return '<User key: {}, week: {}, eNodeB: {}, EUTRANCELLFDD: {}, Acc_WirelessConnSucRate: {}, Acc_ERAB_droppingrate: {}>'.format(
self.key, self.week, self.eNodeB, self.EUTRANCELLFDD, self.Acc_WirelessConnSucRate, self.Acc_ERAB_droppingrate)
#db.drop_all()
db.create_all()
# =============================================================================
# 导入数据
# =============================================================================
traffic_data = [Eric_day(
key=key,
week=wk,
eNodeB=enb,
EUTRANCELLFDD=cell,
Acc_WirelessConnSucRate=accrate,
Acc_ERAB_droppingrate=drop,
AirInterface_Traffic_Volume_UL_MBytes=uth,
AirInterface_Traffic_Volume_DL_MBytes=dth,
Int_DownlinkLatency=lat,
MaxnumberofUEinRRc=mrrc,
AvgNumberofULActiveUsers=uact,
AvgNumberofDLActiveUsers=dact,
DL_Util_of_PRB=prb,
AvgUserFellThroughput_Mbps=fell
) for key,wk, enb, cell, accrate, drop, uth, dth, lat, mrrc, uact, dact, prb, fell in zip(
df_eric['key'],
df_eric['week'],
df_eric['eNodeB'],
df_eric['EUTRANCELLFDD'],
df_eric['Acc_WirelessConnSucRate'],
df_eric['Acc_ERAB_droppingrate'],
df_eric['AirInterface_Traffic_Volume_UL_MBytes'],
df_eric['AirInterface_Traffic_Volume_DL_MBytes'],
df_eric['Int_DownlinkLatency'],
df_eric['MaxnumberofUEinRRc'],
df_eric['AvgNumberofULActiveUsers'],
df_eric['AvgNumberofDLActiveUsers'],
df_eric['DL_Util_of_PRB'],
df_eric['AvgUserFellThroughput_Mbps']
)]
for item in traffic_data:
db.session.add(item)
db.session.commit()
# 原生SQL语句方式
#db.session.execute(r'insert into user values (8, "wjz", "test123")')
#db.session.execute(r'insert into user values (9, "wjz", "admin123")')
#
#db.session.commit()
# =============================================================================
# 查表
# =============================================================================
# ORM方式
btslist = Eric_day.query.order_by('eNodeB').all()
# 使用class User定义好的格式进行print
for bts in btslist:
print(bts)
# 自定义格式print
for bts in btslist:
print(bts.week, ' ', bts.eNodeB, ' ', bts.EUTRANCELLFDD, ' ', bts.Acc_WirelessConnSucRate, ' ', bts.Acc_ERAB_droppingrate)
# 原生数据库语句_推荐
item = db.session.execute('select * from user order by id asc')
# #将结果集强转为list
item = list(item)
for i in item:
print(i)
# =============================================================================
# 删除内容
# =============================================================================
# ORM方式
# User.query.filter_by(id=6).delete()
# User.query.filter_by(id=7).delete()
# User.query.filter_by(id=8).delete()
# User.query.filter_by(id=9).delete()
# db.session.commit()
#
# 原生SQL语句方式
#db.session.execute(r'delete from user where id = 7')
# db.session.commit()
# =============================================================================
# 修改内容
# =============================================================================
# ORM方式
# User.query.filter_by(id=3).update({'name':'张三'})
# User.query.filter_by(id=4).update({'name':'李四'})
# db.session.commit()
#
# 原生SQL语句方式
#db.session.execute(r'update user set name="李四" where id= 4')
#db.session.execute(r'update user set name="王二" where id= 5')
# db.session.commit()
#
#userlist1 = User.query.order_by('id').all()
|
[
"[email protected]"
] | |
341214ce0a249bddd010f09c10ca7e03d99e3426
|
b76615ff745c6d66803506251c3d4109faf50802
|
/pyobjc-core/Examples/Scripts/wmEnable.py
|
df5e11a37bc3d66f12b37c05d46dbc5fd132e7d2
|
[
"MIT"
] |
permissive
|
danchr/pyobjc-git
|
6ef17e472f54251e283a0801ce29e9eff9c20ac0
|
62b787fddeb381184043c7ff136f1c480755ab69
|
refs/heads/master
| 2021-01-04T12:24:31.581750 | 2020-02-02T20:43:02 | 2020-02-02T20:43:02 | 240,537,392 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,513 |
py
|
#!/usr/bin/pythonw
"""
This is an evil undocumented SPI hack that shows how to enable GUI operation
from a console application.
BUNDLES ARE RECOMMENDED, USE THIS AT YOUR OWN RISK!!
"""
import os
import sys
import objc
from Foundation import *
def S(*args):
return b"".join(args)
OSErr = objc._C_SHT
OUTPSN = b"o^{ProcessSerialNumber=LL}"
INPSN = b"n^{ProcessSerialNumber=LL}"
FUNCTIONS = [
# These two are public API
("GetCurrentProcess", S(OSErr, OUTPSN)),
("SetFrontProcess", S(OSErr, INPSN)),
# This is undocumented SPI
("CPSSetProcessName", S(OSErr, INPSN, objc._C_CHARPTR)),
("CPSEnableForegroundOperation", S(OSErr, INPSN)),
]
def WMEnable(name="Python"):
if not isinstance(name, bytes):
name = name.encode("utf8")
mainBundle = NSBundle.mainBundle()
bPath = os.path.split(os.path.split(os.path.split(sys.executable)[0])[0])[0]
if mainBundle.bundlePath() == bPath:
return True
bndl = NSBundle.bundleWithPath_(
objc.pathForFramework(
"/System/Library/Frameworks/ApplicationServices.framework"
)
)
if bndl is None:
print >>sys.stderr, "ApplicationServices missing"
return False
d = {}
objc.loadBundleFunctions(bndl, d, FUNCTIONS)
for (fn, sig) in FUNCTIONS:
if fn not in d:
print >>sys.stderr, "Missing", fn
return False
err, psn = d["GetCurrentProcess"](None)
if err:
print >>sys.stderr, "GetCurrentProcess", (err, psn)
return False
err = d["CPSSetProcessName"](psn, name)
if err:
print >>sys.stderr, "CPSSetProcessName", (err, psn)
return False
err = d["CPSEnableForegroundOperation"](psn)
if err:
print >>sys.stderr, "CPSEnableForegroundOperation", (err, psn)
return False
err = d["SetFrontProcess"](psn)
if err:
print >>sys.stderr, "SetFrontProcess", (err, psn)
return False
return True
class AppDelegate(NSObject):
def applicationDidFinishLaunching_(self, sender):
rval = AppKit.NSRunAlertPanel("WM Enabled", "WM was enabled!", None, None, None)
AppKit.NSApp().terminate_(self)
if __name__ == "__main__":
import sys
if WMEnable(os.path.basename(os.path.splitext(sys.argv[0])[0])):
import AppKit
app = AppKit.NSApplication.sharedApplication()
delegate = AppDelegate.alloc().init()
app.setDelegate_(delegate)
app.run()
else:
print("WM was not enabled")
|
[
"[email protected]"
] | |
23431939ada901e854bbd6ac06687c0c52e512f9
|
23a3c76882589d302b614da5f4be0fc626b4f3cd
|
/python_modules/dagster/dagster/core/definitions/trigger.py
|
ac2193f821092157e9e91f5367bb6b2bc68ba5d4
|
[
"Apache-2.0"
] |
permissive
|
DavidKatz-il/dagster
|
3641d04d387cdbe5535ae4f9726ce7dc1981a8c3
|
7c6d16eb8b3610a21020ecb479101db622d1535f
|
refs/heads/master
| 2022-12-20T13:08:36.462058 | 2020-09-14T18:12:12 | 2020-09-14T22:43:26 | 264,703,873 | 0 | 0 |
Apache-2.0
| 2020-06-16T09:49:00 | 2020-05-17T15:56:57 |
Python
|
UTF-8
|
Python
| false | false | 4,312 |
py
|
from collections import namedtuple
from dagster import check
from dagster.core.instance import DagsterInstance
from dagster.utils.backcompat import experimental_class_warning
from .mode import DEFAULT_MODE_NAME
class TriggeredExecutionContext(namedtuple("TriggeredExecutionContext", "instance")):
"""Trigger-specific execution context.
An instance of this class is made available as the first argument to the
TriggeredExecutionDefinition execution_params_fn
Attributes:
instance (DagsterInstance): The instance configured to run the triggered execution
"""
def __new__(
cls, instance,
):
experimental_class_warning("TriggeredExecutionContext")
return super(TriggeredExecutionContext, cls).__new__(
cls, check.inst_param(instance, "instance", DagsterInstance),
)
class TriggeredExecutionDefinition(object):
"""Define a pipeline execution that responds to a trigger
Args:
name (str): The name of this triggered execution to create.
pipeline_name (str): The name of the pipeline to execute when the trigger fires.
run_config_fn (Callable[[TriggeredExecutionContext], [Dict]]): A function that takes a
TriggeredExecutionContext object and returns the environment configuration that
parameterizes this execution, as a dict.
tags_fn (Optional[Callable[[TriggeredExecutionContext], Optional[Dict[str, str]]]]): A
function that generates tags to attach to the triggered execution. Takes a
:py:class:`~dagster.TriggeredExecutionContext` and returns a dictionary of tags (string
key-value pairs).
should_execute_fn (Optional[Callable[[TriggeredExecutionContext], bool]]): A function that
runs at trigger time to determine whether a pipeline execution should be initiated or
skipped. Takes a :py:class:`~dagster.TriggeredExecutionContext` and returns a boolean
(``True`` if a pipeline run should be execute). Defaults to a function that always
returns ``True``.
mode (Optional[str]): The mode to apply when executing this pipeline. (default: 'default')
solid_selection (Optional[List[str]]): A list of solid subselection (including single
solid names) to execute when the trigger fires. e.g. ``['*some_solid+', 'other_solid']``
"""
__slots__ = [
"_name",
"_pipeline_name",
"_tags_fn",
"_run_config_fn",
"_should_execute_fn",
"_mode",
"_solid_selection",
]
def __init__(
self,
name,
pipeline_name,
run_config_fn=None,
tags_fn=None,
should_execute_fn=None,
mode="default",
solid_selection=None,
):
experimental_class_warning("TriggeredExecutionDefinition")
self._name = check.str_param(name, "name")
self._pipeline_name = check.str_param(pipeline_name, "pipeline_name")
self._run_config_fn = check.opt_callable_param(
run_config_fn, "run_config_fn", lambda _context: {}
)
self._tags_fn = check.opt_callable_param(tags_fn, "tags_fn", lambda _context: {})
self._should_execute_fn = check.opt_callable_param(
should_execute_fn, "should_execute_fn", lambda _context: True
)
self._mode = check.opt_str_param(mode, "mode", DEFAULT_MODE_NAME)
self._solid_selection = check.opt_nullable_list_param(
solid_selection, "solid_selection", of_type=str
)
@property
def pipeline_name(self):
return self._pipeline_name
@property
def solid_selection(self):
return self._solid_selection
@property
def name(self):
return self._name
@property
def mode(self):
return self._mode
def get_run_config(self, context):
check.inst_param(context, "context", TriggeredExecutionContext)
return self._run_config_fn(context)
def get_tags(self, context):
check.inst_param(context, "context", TriggeredExecutionContext)
return self._tags_fn(context)
def should_execute(self, context):
check.inst_param(context, "context", TriggeredExecutionContext)
return self._should_execute_fn(context)
|
[
"[email protected]"
] | |
8344d1a9044d83a7d0867f09887cb1d7af8c0729
|
3259ffe73a1b2f1a17f0cf0512452d47f47f441d
|
/Leet Code/268_missing_number.py
|
f3cae5afa7eb9cf57fddd9e5bb12c9d9ea15054c
|
[
"MIT"
] |
permissive
|
aayushmaru18/Competitive-Programming
|
a9160509afe32ee3eced0b7d830c33d62ba6f146
|
0ef237a140901005371a792eea4676b5386c7c50
|
refs/heads/master
| 2023-05-04T22:03:01.224426 | 2021-06-01T08:37:19 | 2021-06-01T08:37:19 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 183 |
py
|
class Solution:
def missingNumber(self, nums: List[int]) -> int:
n = len(nums)
s = n*(n+1)//2
for i in nums:
s -= i
return s
|
[
"[email protected]"
] | |
ffc3e0a708efdb334677d8fcea0d1a1dc4ef2f87
|
81c8beba79c93c50df57ae9654ed23a6b5a1546f
|
/more/highrest/model.py
|
539ab0b21c09af56c5c1161765d2bf3524b4d785
|
[] |
no_license
|
morepath/more.highrest
|
d80a0f3813b246ce636e63b3bf62954ac899ee2f
|
c15b700b647cd59f4a4dc8bb422e8eb7f9574c4d
|
refs/heads/master
| 2021-01-23T03:21:27.649821 | 2017-03-24T16:41:56 | 2017-03-24T16:41:56 | 86,072,014 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 918 |
py
|
class CollectionBase(object):
def clone(self, offset, limit, *args, **kw):
raise NotImplementedError()
def add(self, data):
raise NotImplementedError()
def query(self):
raise NotImplementedError()
@staticmethod
def load(request):
raise NotImplementedError()
def count(self):
raise NotImplementedError()
def previous(self):
if self.offset == 0:
return None
offset = self.offset - self.limit
if offset < 0:
offset = 0
return self.clone(offset, self.limit)
def next(self):
if self.offset + self.limit >= self.count():
return None
offset = self.offset + self.limit
return self.clone(offset, self.limit)
class ItemBase(object):
def update(self, data):
raise NotImplementedError()
def remove(self):
raise NotImplementedError()
|
[
"[email protected]"
] | |
27f37a64fd56abf5c84a6de0d251780d79d6574c
|
f889bc01147869459c0a516382e7b95221295a7b
|
/swagger_client/models/body_19.py
|
c3a0a50364961c0b3731d25640fcee6a6b0617c9
|
[] |
no_license
|
wildatheart/magento2-api-client
|
249a86f5c0289743f8df5b0324ccabd76f326512
|
e6a707f85b37c6c3e4ef3ff78507a7deb8f71427
|
refs/heads/master
| 2021-07-14T16:01:17.644472 | 2017-10-18T13:33:08 | 2017-10-18T13:33:08 | 107,412,121 | 1 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,764 |
py
|
# coding: utf-8
"""
Magento Community
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class Body19(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'product': 'CatalogDataProductInterface',
'save_options': 'bool'
}
attribute_map = {
'product': 'product',
'save_options': 'saveOptions'
}
def __init__(self, product=None, save_options=None):
"""
Body19 - a model defined in Swagger
"""
self._product = None
self._save_options = None
self.product = product
if save_options is not None:
self.save_options = save_options
@property
def product(self):
"""
Gets the product of this Body19.
:return: The product of this Body19.
:rtype: CatalogDataProductInterface
"""
return self._product
@product.setter
def product(self, product):
"""
Sets the product of this Body19.
:param product: The product of this Body19.
:type: CatalogDataProductInterface
"""
if product is None:
raise ValueError("Invalid value for `product`, must not be `None`")
self._product = product
@property
def save_options(self):
"""
Gets the save_options of this Body19.
:return: The save_options of this Body19.
:rtype: bool
"""
return self._save_options
@save_options.setter
def save_options(self, save_options):
"""
Sets the save_options of this Body19.
:param save_options: The save_options of this Body19.
:type: bool
"""
self._save_options = save_options
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, Body19):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
[
"[email protected]"
] | |
f4bac242f4ebcee19ff5f95406e835f40256a054
|
6118f2fa2be32a1b1d50a0965f7fa3e137b408bc
|
/examples/increment.py
|
63228df71029b9ba4cb0701f57bd88b8a8616fee
|
[
"MIT"
] |
permissive
|
eerimoq/mqttools
|
66f296c3c76b4909c86d5d287e4a96b6b755bd44
|
a28c86e89af0852249a5d6f33f9e67036c3eb8fe
|
refs/heads/master
| 2021-12-27T04:35:33.868653 | 2021-12-24T12:15:01 | 2021-12-24T12:15:01 | 184,444,451 | 58 | 13 |
MIT
| 2021-11-30T19:04:53 | 2019-05-01T16:15:41 |
Python
|
UTF-8
|
Python
| false | false | 799 |
py
|
import asyncio
import mqttools
HOST = 'localhost'
PORT = 1883
async def main():
client = mqttools.Client(HOST, PORT)
await client.start()
print(f'Connected to {HOST}:{PORT}.')
await client.subscribe('/mqttools/incrementer/value/request')
print('Subscribed to topic /mqttools/incrementer/value/request.')
while True:
message = await client.messages.get()
if message is None:
print('Broker connection lost!')
break
count = int(message.message)
print(f'Request count: {count}')
count += 1
print(f'Response count: {count}')
client.publish(mqttools.Message('/mqttools/counter-client/value/response',
str(count).encode('ascii')))
asyncio.run(main())
|
[
"[email protected]"
] | |
5c41edc54e8e9283a6870f5b3623e2c2ac088296
|
5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d
|
/alipay/aop/api/response/AlipayOpenPublicTopicModifyResponse.py
|
6966bb50727abcc86c30bc3aab8df6044201e7f2
|
[
"Apache-2.0"
] |
permissive
|
alipay/alipay-sdk-python-all
|
8bd20882852ffeb70a6e929038bf88ff1d1eff1c
|
1fad300587c9e7e099747305ba9077d4cd7afde9
|
refs/heads/master
| 2023-08-27T21:35:01.778771 | 2023-08-23T07:12:26 | 2023-08-23T07:12:26 | 133,338,689 | 247 | 70 |
Apache-2.0
| 2023-04-25T04:54:02 | 2018-05-14T09:40:54 |
Python
|
UTF-8
|
Python
| false | false | 446 |
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class AlipayOpenPublicTopicModifyResponse(AlipayResponse):
def __init__(self):
super(AlipayOpenPublicTopicModifyResponse, self).__init__()
def parse_response_content(self, response_content):
response = super(AlipayOpenPublicTopicModifyResponse, self).parse_response_content(response_content)
|
[
"[email protected]"
] | |
c0501cb9929bcf12f787d370a8a1d9c9a0509d34
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/4/usersdata/133/2972/submittedfiles/swamee.py
|
8bc3612585f4c89c2b305dc9bb6a96027440b5e3
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 456 |
py
|
# -*- coding: utf-8 -*-
from __future__ import division
import math
#COMECE SEU CÓDIGO AQUI
f=input('Digite o valor de f:')
l=input('Digite o valor de l:')
q=input('Digite o valor de q:')
deltah=input('Digite o valor de deltah:')
v=input('Digite o valor de v:')
D = ((8*f*l*(q*q))/(((math.pi)**2)*9.81*deltah))**0.2
Rey = (4*q)/((math.pi)*D*v)
k = (0.25)/((math.log10((0.000002)/(3.7*D) + (5.74)/((Rey)**0.9)))**0.5)
print('%.4f, %.4f, %.4f' %D %Rey %k)
|
[
"[email protected]"
] | |
9d5268559b9f20871c0835f6a0a9edd415c007c8
|
280019d1106e6dd887f1c0fe020bcd433790d8e1
|
/capture_tag/templatetags/capture_tags.py
|
0b5e7b45174cd019e0a95bd44634aada96d9f16d
|
[
"Apache-2.0"
] |
permissive
|
edoburu/django-capture-tag
|
41af5dea34ec791791e03a95e2e52b88dd8c3ea8
|
f63533dd1a5ce3926c36e5795a3767ab4d7eb6fc
|
refs/heads/master
| 2023-08-14T05:05:00.023501 | 2021-11-16T22:04:50 | 2021-11-16T22:04:50 | 56,684,352 | 19 | 4 |
Apache-2.0
| 2023-07-11T08:20:29 | 2016-04-20T12:19:46 |
Python
|
UTF-8
|
Python
| false | false | 2,445 |
py
|
from django.template import Library, Node, TemplateSyntaxError
register = Library()
@register.tag(name="capture")
def do_capture(parser, token):
"""
Capture the contents of a tag output.
Usage:
.. code-block:: html+django
{% capture %}..{% endcapture %} # output in {{ capture }}
{% capture silent %}..{% endcapture %} # output in {{ capture }} only
{% capture as varname %}..{% endcapture %} # output in {{ varname }}
{% capture as varname silent %}..{% endcapture %} # output in {{ varname }} only
For example:
.. code-block:: html+django
{# Allow templates to override the page title/description #}
<meta name="description" content="{% capture as meta_description %}{% block meta-description %}{% endblock %}{% endcapture %}" />
<title>{% capture as meta_title %}{% block meta-title %}Untitled{% endblock %}{% endcapture %}</title>
{# copy the values to the Social Media meta tags #}
<meta property="og:description" content="{% block og-description %}{{ meta_description }}{% endblock %}" />
<meta name="twitter:title" content="{% block twitter-title %}{{ meta_title }}{% endblock %}" />
"""
bits = token.split_contents()
# tokens
t_as = "as"
t_silent = "silent"
var = "capture"
silent = False
num_bits = len(bits)
if len(bits) > 4:
raise TemplateSyntaxError("'capture' node supports '[as variable] [silent]' parameters.")
elif num_bits == 4:
t_name, t_as, var, t_silent = bits
silent = True
elif num_bits == 3:
t_name, t_as, var = bits
elif num_bits == 2:
t_name, t_silent = bits
silent = True
else:
var = "capture"
silent = False
if t_silent != "silent" or t_as != "as":
raise TemplateSyntaxError("'capture' node expects 'as variable' or 'silent' syntax.")
nodelist = parser.parse(("endcapture",))
parser.delete_first_token()
return CaptureNode(nodelist, var, silent)
class CaptureNode(Node):
def __init__(self, nodelist, varname, silent):
self.nodelist = nodelist
self.varname = varname
self.silent = silent
def render(self, context):
output = self.nodelist.render(context)
context[self.varname] = output
if self.silent:
return ""
else:
return output
|
[
"[email protected]"
] | |
07c86867c6a6240b881b7799c91f53d202d3a79c
|
e5e2b7da41fda915cb849f031a0223e2ac354066
|
/sdk/python/pulumi_azure_native/media/v20200201preview/_enums.py
|
60f247261361ebfece18ad62df164c9d945509e3
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
johnbirdau/pulumi-azure-native
|
b7d3bdddeb7c4b319a7e43a892ddc6e25e3bfb25
|
d676cc331caa0694d8be99cb90b93fa231e3c705
|
refs/heads/master
| 2023-05-06T06:48:05.040357 | 2021-06-01T20:42:38 | 2021-06-01T20:42:38 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 416 |
py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
from enum import Enum
__all__ = [
'MediaGraphRtspTransport',
]
class MediaGraphRtspTransport(str, Enum):
"""
Underlying RTSP transport. This can be used to enable or disable HTTP tunneling.
"""
HTTP = "Http"
TCP = "Tcp"
|
[
"[email protected]"
] | |
872530536f3f6682b50288fc140a34b61bc5ebd4
|
566754f63c0d665af01bdad8814873468f8be888
|
/python/learn/numpy/boolean.py
|
c40817a9576b6a15190233b89e8ef2a9284a34e1
|
[
"MIT"
] |
permissive
|
qrsforever/workspace
|
7f7b0363649b73e96526745f85a22e70b1c749c9
|
53c7ce7ca7da62c9fbb3d991ae9e4e34d07ece5f
|
refs/heads/master
| 2022-05-04T18:58:41.562544 | 2020-05-25T04:07:00 | 2020-05-25T04:07:00 | 82,469,335 | 2 | 0 |
MIT
| 2022-04-12T21:54:15 | 2017-02-19T15:36:43 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 1,015 |
py
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import numpy as np
import numpy.random as rand
def test1():
"""
& | 布尔运算
"""
arr = np.zeros((16, 16)) + 3
# 从第4行(列)到倒数第4行(列)
arr[4:-4, 4:-4] = 6
arr[7:-7, 7:-7] = 9
# print(arr)
index1 = arr > 2
index2 = arr < 6
compound_index = index1 & index2
compound_index = (arr > 3) & (arr < 9)
arr2 = np.copy(arr, order='K')
arr2[compound_index] = 0
print(arr2)
compound_index = (arr == 9) | (index1 & index2)
arr3 = np.copy(arr)
arr3[compound_index] = 0
print(arr3)
def test2():
"""
随机处理数据
"""
# 返回高斯分布(0, 1)的一个样本
arr = rand.randn(100)
print(arr)
# 采集数值大于0.2的子集
index = arr > 0.2
res = arr[index]
# 子集中的数据平方减2
res = res ** 2 - 2
# 放回去
arr[index] = res
print(arr)
def main():
test1()
test2()
if __name__ == "__main__":
main()
|
[
"[email protected]"
] | |
340d4560fceeb7f8f5ce6df9db6f28fa1f292720
|
2d4240a03bfa47386677a78250df220c55a7bf6c
|
/PythonCookbookLearning/chapter8/8.7.3.py
|
52c8d9a319d86872ad3ba20726dd89ad240dfb57
|
[] |
no_license
|
Falonie/Notes
|
c7976e9e7514e5d7cddf918c3c54442a89532aab
|
38e980cb5170a696626085b72795a096679e972b
|
refs/heads/master
| 2022-02-13T11:20:39.613115 | 2019-09-02T01:07:27 | 2019-09-02T01:07:27 | 99,218,947 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 443 |
py
|
class Base(object):
def __init__(self):
print('Base.__init__')
class A(Base):
def __init__(self):
super().__init__()
print('A.__init__')
class B(Base):
def __init__(self):
super().__init__()
print('B.__init__')
class C(A, B):
def __init__(self):
super().__init__()
print('C.__init__')
if __name__ == '__main__':
c = C()
print(C.mro())
print(C.__mro__)
|
[
"[email protected]"
] | |
98a4f5409336a01c4f7a38567ca2dfcdf5371cbc
|
c28783b279c89ea98967064304eb3d883940b193
|
/src/di_replication/repl_read_top_row/repl_read_top_row.py
|
b024ae54582d3a03c023813d070c989e91a29ca5
|
[
"MIT"
] |
permissive
|
thhapke/di_replication
|
c7784f7c60dee527c5498e99f66d390e94db8645
|
f23e48d60c0d76603eec5071ea57d0646a44389e
|
refs/heads/master
| 2023-02-14T14:22:54.876677 | 2021-01-14T11:40:55 | 2021-01-14T11:40:55 | 277,468,734 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,994 |
py
|
import sdi_utils.gensolution as gs
import sdi_utils.set_logging as slog
import sdi_utils.textfield_parser as tfp
import sdi_utils.tprogress as tp
import subprocess
import logging
import os
import random
from datetime import datetime, timezone
import pandas as pd
try:
api
except NameError:
class api:
queue = list()
class Message:
def __init__(self, body=None, attributes=""):
self.body = body
self.attributes = attributes
def send(port, msg):
if port == outports[1]['name']:
api.queue.append(msg)
class config:
## Meta data
config_params = dict()
version = '0.0.1'
tags = {'sdi_utils': ''}
operator_name = 'repl_read_top_row'
operator_description = "Read Top Row"
operator_description_long = "Read top row without constraint."
add_readme = dict()
add_readme["References"] = ""
debug_mode = True
config_params['debug_mode'] = {'title': 'Debug mode',
'description': 'Sending debug level information to log port',
'type': 'boolean'}
def process(msg):
att = dict(msg.attributes)
att['operator'] = 'repl_read_top_row'
logger, log_stream = slog.set_logging(att['operator'], loglevel=api.config.debug_mode)
sql = 'SELECT TOP 1 * FROM {table}'.format(table=att['replication_table'])
logger.info('SQL statement: {}'.format(sql))
att['sql'] = sql
api.send(outports[1]['name'], api.Message(attributes=att,body=sql))
log = log_stream.getvalue()
if len(log) > 0 :
api.send(outports[0]['name'], log )
inports = [{'name': 'data', 'type': 'message', "description": "Input data"}]
outports = [{'name': 'log', 'type': 'string', "description": "Logging data"}, \
{'name': 'msg', 'type': 'message', "description": "msg with sql statement"}]
#api.set_port_callback(inports[0]['name'], process)
def test_operator():
msg = api.Message(attributes={'packageid':4711,'replication_table':'repl_table','base_table':'repl_table','latency':30,\
'append_mode' : 'I', 'data_outcome':True},body='')
process(msg)
for msg in api.queue :
print(msg.attributes)
print(msg.body)
if __name__ == '__main__':
test_operator()
if True:
subprocess.run(["rm", '-r','../../../solution/operators/sdi_replication_' + api.config.version])
gs.gensolution(os.path.realpath(__file__), api.config, inports, outports)
solution_name = api.config.operator_name + '_' + api.config.version
subprocess.run(["vctl", "solution", "bundle",'../../../solution/operators/sdi_replication_' + api.config.version, \
"-t", solution_name])
subprocess.run(["mv", solution_name + '.zip', '../../../solution/operators'])
|
[
"[email protected]"
] | |
e7cb9d3d626b68c0e92c3fbeda2d26d8d2812576
|
78d35bb7876a3460d4398e1cb3554b06e36c720a
|
/sdk/recoveryservices/azure-mgmt-recoveryservicesbackup/azure/mgmt/recoveryservicesbackup/operations/_protected_item_operation_results_operations.py
|
686c9dc622990d21ec56f8dc1e20569d14958b9b
|
[
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] |
permissive
|
catchsrinivas/azure-sdk-for-python
|
e35f59b60318a31b3c940a7a3a07b61b28118aa5
|
596227a7738a5342274486e30489239d539b11d1
|
refs/heads/main
| 2023-08-27T09:08:07.986249 | 2021-11-11T11:13:35 | 2021-11-11T11:13:35 | 427,045,896 | 0 | 0 |
MIT
| 2021-11-11T15:14:31 | 2021-11-11T15:14:31 | null |
UTF-8
|
Python
| false | false | 6,058 |
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class ProtectedItemOperationResultsOperations(object):
"""ProtectedItemOperationResultsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.recoveryservicesbackup.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def get(
self,
vault_name, # type: str
resource_group_name, # type: str
fabric_name, # type: str
container_name, # type: str
protected_item_name, # type: str
operation_id, # type: str
**kwargs # type: Any
):
# type: (...) -> Optional["_models.ProtectedItemResource"]
"""Fetches the result of any operation on the backup item.
:param vault_name: The name of the recovery services vault.
:type vault_name: str
:param resource_group_name: The name of the resource group where the recovery services vault is
present.
:type resource_group_name: str
:param fabric_name: Fabric name associated with the backup item.
:type fabric_name: str
:param container_name: Container name associated with the backup item.
:type container_name: str
:param protected_item_name: Backup item name whose details are to be fetched.
:type protected_item_name: str
:param operation_id: OperationID which represents the operation whose result needs to be
fetched.
:type operation_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ProtectedItemResource, or the result of cls(response)
:rtype: ~azure.mgmt.recoveryservicesbackup.models.ProtectedItemResource or None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.ProtectedItemResource"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-08-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'vaultName': self._serialize.url("vault_name", vault_name, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'fabricName': self._serialize.url("fabric_name", fabric_name, 'str'),
'containerName': self._serialize.url("container_name", container_name, 'str'),
'protectedItemName': self._serialize.url("protected_item_name", protected_item_name, 'str'),
'operationId': self._serialize.url("operation_id", operation_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ProtectedItemResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{vaultName}/backupFabrics/{fabricName}/protectionContainers/{containerName}/protectedItems/{protectedItemName}/operationResults/{operationId}'} # type: ignore
|
[
"[email protected]"
] | |
fccdf002d3624704682080cfcfad0a8da497660d
|
2be8a9f06d4003d12c0a727fb83d284c31a53050
|
/HoudiniHotBox17.0/lib/mantrasurface.py
|
14fd36488357f5ba0316020a601a4c3c65b4e2c7
|
[] |
no_license
|
LiuLiangFx/SmileHotBOX
|
7551d9578b2defe612950cb8e3bffdb85024cede
|
8bd8eac69b3c2a9824b9aa4488ca77789bea8d85
|
refs/heads/master
| 2021-01-01T10:22:26.959731 | 2020-02-09T03:16:32 | 2020-02-09T03:16:32 | 239,236,801 | 0 | 0 | null | 2020-02-09T02:47:18 | 2020-02-09T02:47:18 | null |
UTF-8
|
Python
| false | false | 138 |
py
|
import hou
class mantrasurface:
def run(self):
node = hou.node("/shop")
node.createNode("mantrasurface")
|
[
"[email protected]"
] | |
086a8df0c8339b236cf7ca37ad68644942a570e2
|
2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae
|
/python/python_2203.py
|
d82b9273211c6488b6c4ed00ac417bea5f7bc2fb
|
[] |
no_license
|
AK-1121/code_extraction
|
cc812b6832b112e3ffcc2bb7eb4237fd85c88c01
|
5297a4a3aab3bb37efa24a89636935da04a1f8b6
|
refs/heads/master
| 2020-05-23T08:04:11.789141 | 2015-10-22T19:19:40 | 2015-10-22T19:19:40 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 134 |
py
|
# I want the actual file name that is returned by a PHP script
urllib.urlretrieve(URL, directory + "\\" + filename + "." + extension)
|
[
"[email protected]"
] | |
2a5b864a3ebcf588854fa0df2b2e9e32ddbee910
|
3fd6e85c36a7e9e4f9ddec163a55f3602ccfb98c
|
/hw/gimbal/firmware/site_scons/site_tools/arm_none_eabi.py
|
1ed9fbfb9debd2019a680a3816a74982f6a83789
|
[
"Apache-2.0"
] |
permissive
|
SiChiTong/mjmech
|
acc5da4ac6edd9f1446cc13e471aedeea3e1c419
|
a71f35e6ad6bc9c1530a0a33d68c45d073390b79
|
refs/heads/master
| 2020-03-20T03:44:13.276650 | 2018-05-06T02:59:55 | 2018-05-06T03:04:55 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,177 |
py
|
# Copyright 2015 Josh Pieper, [email protected]. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
TOOLS = {
'CC' : 'gcc',
'CXX' : 'g++',
'LD' : 'gcc',
'AR' : 'ar',
'AS' : 'gcc',
'OBJCOPY' : 'objcopy',
'OBJDUMP' : 'objdump',
}
def generate(env, **kwargs):
# Let's assume that the host version of the compiler is here and
# available.
gnu_tools = ['gcc', 'g++', 'gnulink', 'ar', 'gas']
for tool in gnu_tools:
env.Tool(tool)
for key, value in TOOLS.iteritems():
env[key] = 'arm-none-eabi-' + value
env.Append(ASFLAGS=['-c'])
env['PROGSUFFIX'] = '.elf'
def exists(env):
return 1
|
[
"[email protected]"
] | |
e62bb40c823f97a4d88b9ee4884e3edb00c40a0d
|
f4b60f5e49baf60976987946c20a8ebca4880602
|
/lib64/python2.7/site-packages/acimodel-1.3_2j-py2.7.egg/cobra/modelimpl/dhcp/clientaddr.py
|
134e0b93623bfb9ff57a9db85dbee07038399b49
|
[] |
no_license
|
cqbomb/qytang_aci
|
12e508d54d9f774b537c33563762e694783d6ba8
|
a7fab9d6cda7fadcc995672e55c0ef7e7187696e
|
refs/heads/master
| 2022-12-21T13:30:05.240231 | 2018-12-04T01:46:53 | 2018-12-04T01:46:53 | 159,911,666 | 0 | 0 | null | 2022-12-07T23:53:02 | 2018-12-01T05:17:50 |
Python
|
UTF-8
|
Python
| false | false | 3,567 |
py
|
# coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2016 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class ClientAddr(Mo):
"""
The DHCP address received for the client.
"""
meta = ClassMeta("cobra.model.dhcp.ClientAddr")
meta.moClassName = "dhcpClientAddr"
meta.rnFormat = "addr-[%(address)s]"
meta.category = MoCategory.REGULAR
meta.label = "Client Address"
meta.writeAccessMask = 0x8008020040001
meta.readAccessMask = 0x8008020040001
meta.isDomainable = False
meta.isReadOnly = False
meta.isConfigurable = True
meta.isDeletable = True
meta.isContextRoot = False
meta.parentClasses.add("cobra.model.dhcp.ClientIf")
meta.superClasses.add("cobra.model.dhcp.Addr")
meta.rnPrefixes = [
('addr-', True),
]
prop = PropMeta("str", "address", "address", 6133, PropCategory.REGULAR)
prop.label = "Address"
prop.isConfig = True
prop.isAdmin = True
prop.isCreateOnly = True
prop.isNaming = True
meta.props.add("address", prop)
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
meta.namingProps.append(getattr(meta.props, "address"))
getattr(meta.props, "address").needDelimiter = True
def __init__(self, parentMoOrDn, address, markDirty=True, **creationProps):
namingVals = [address]
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
|
[
"[email protected]"
] | |
b8d9f5ee64570bdf0c15ab55e124cd7e677cb144
|
be429a1e5e4903616a4532c1bf238df20fea75c0
|
/6.14/127.单词接龙.py
|
3f21e478f21ed4a7dfac01e5e07586b2c193a049
|
[] |
no_license
|
pythonnewbird/LeetCodeSolution
|
ccc8cc17df4cea3109d84b0c347ae91c1bc33a28
|
2447f760f08fb3879c5f03d8650e30ff74115d3d
|
refs/heads/master
| 2020-03-19T05:06:06.681429 | 2018-07-01T12:39:09 | 2018-07-01T12:39:09 | 135,899,944 | 4 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,296 |
py
|
class Solution(object):
def ladderLength(self, beginWord, endWord, wordList):
"""
:type beginWord: str
:type endWord: str
:type wordList: List[str]
:rtype: int
"""
wdset=set(wordList)
if endWord not in wdset:
return 0
lts="abcdefghijklmnopqrstuvwxyz"
dist=float("inf")
q=[beginWord]
seen={beginWord:0}
graph={beginWord:set()}
while q:
cur=q.pop(0)
d=seen[cur]
if d>=dist:
break
for i in range(len(cur)):
for lt in lts:
if lt!=cur[i]:
new=cur[:i]+lt+cur[i+1:]
if new in wdset and (new not in seen or d+1==seen[new]):
if cur in graph:
graph[cur].add(new)
else:
graph[cur]=set([new])
if new==endWord:
dist=d+1
if new not in seen:
seen[new]=d+1
q.append(new)
if dist!=float('inf') :
return dist+1
else:
return 0
|
[
"[email protected]"
] | |
512a43263d45f6d4fbf19a27ad961a1de09eba30
|
fa5cb3cb27132a330673650afa1d68dd35f15251
|
/newrelic/core/thread_utilization.py
|
fd57ba9f86fc98a03d51ad739747f385a68950b0
|
[
"Apache-2.0"
] |
permissive
|
jbeveland27/newrelic-python-agent
|
95b4fdf253915100bc62bbd143066f589efc3ab9
|
86c78370ace1eba18e05de5e37aadb880f5f3ac4
|
refs/heads/main
| 2023-07-12T06:40:58.741312 | 2021-08-19T23:37:14 | 2021-08-19T23:37:14 | 398,122,410 | 1 | 0 |
Apache-2.0
| 2021-08-20T01:38:35 | 2021-08-20T01:38:33 | null |
UTF-8
|
Python
| false | false | 4,451 |
py
|
# Copyright 2010 New Relic, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
from newrelic.samplers.decorators import data_source_factory
try:
from newrelic.core._thread_utilization import ThreadUtilization
except ImportError:
ThreadUtilization = None
_utilization_trackers = {}
def utilization_tracker(application):
return _utilization_trackers.get(application)
class ThreadUtilizationDataSource(object):
def __init__(self, application):
self._consumer_name = application
self._utilization_tracker = None
self._last_timestamp = None
self._utilization = None
def start(self):
if ThreadUtilization:
utilization_tracker = ThreadUtilization()
_utilization_trackers[self._consumer_name] = utilization_tracker
self._utilization_tracker = utilization_tracker
self._last_timestamp = time.time()
self._utilization = self._utilization_tracker.utilization_count()
def stop(self):
try:
self._utilization_tracker = None
self._last_timestamp = None
self._utilization = None
del _utilization_trackers[self.source_name]
except Exception:
pass
def __call__(self):
if self._utilization_tracker is None:
return
now = time.time()
# TODO This needs to be pushed down into _thread_utilization.c.
# In doing that, need to fix up UtilizationClass count so the
# reset is optional because in this case a read only variant is
# needed for getting a per request custom metric of the
# utilization during period of the request.
#
# TODO This currently doesn't take into consideration coroutines
# and instance bust percentage is percentage of a single thread
# and not of total available coroutines. Not sure whether can
# generate something meaningful for coroutines. Also doesn't
# work for asynchronous systems such as Twisted.
new_utilization = self._utilization_tracker.utilization_count()
elapsed_time = now - self._last_timestamp
utilization = new_utilization - self._utilization
utilization = utilization / elapsed_time
self._last_timestamp = now
self._utilization = new_utilization
total_threads = None
try:
# Recent mod_wsgi versions publish the number of actual
# threads so we can use this real value instead of the
# calculated value. This is important in order to get the
# correct utilization value for mod_wsgi daemon mode as the
# way it manages the thread pool it may not actually
# activate all available threads if the requirement isn't
# there for them. Thus the utilization figure will be too
# high as would only be calculated relative to the activated
# threads and not the total of what is actually available.
import mod_wsgi
total_threads = mod_wsgi.threads_per_process
except Exception:
pass
if total_threads is None:
total_threads = self._utilization_tracker.total_threads()
if total_threads:
# Don't report any metrics if don't detect any threads
# available and in use for handling web transactions,
# otherwise we end up report zero metrics for task systems
# such as Celery which skews the results wrongly.
yield ('Instance/Available', total_threads)
yield ('Instance/Used', utilization)
busy = total_threads and utilization/total_threads or 0.0
yield ('Instance/Busy', busy)
@data_source_factory(name='Thread Utilization')
def thread_utilization_data_source(settings, environ):
return ThreadUtilizationDataSource(environ['consumer.name'])
|
[
"[email protected]"
] | |
bf84a23ac25841aaf18ddc5f2a8785a878f6e123
|
3313419e883041b04bd09f7e905dc9fb24cd8ec8
|
/multi_kmeans_group_line_chart.py
|
7559a28604b94a194b5308ec440890374719a7d0
|
[] |
no_license
|
xiaosean/preprocess_py
|
d6d46a91be0d31c3ac082c4dc21587b27b34bf11
|
fa480a0f8401c4ccff61ea8215bcf40802b2ba36
|
refs/heads/master
| 2023-06-23T17:30:19.296637 | 2017-11-06T23:12:02 | 2017-11-06T23:12:04 | 90,627,403 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,789 |
py
|
import numpy as np
import pandas as pd
from time import time
from sklearn.cluster import KMeans
from pandas.computation import expressions as expr
from bokeh.charts import Line, show, output_file, save
import pprint as pp
import sys
from bokeh.palettes import Spectral11, Category10
# set configure
# path = "./CDR_NORMALIZE_CONCAT/"
path = "./CDR_FINAL/"
filename = "0703normalize_65_cols.csv"
# path = "./CDR_ANALYZE/"
# path = "./CDR_CONCAT/"
# filename = sys.argv[1]
# filename = "CDR_CONCAT_TABLE_4_max_min.csv"
# filename = "CDR_CONCAT_TABLE_4.csv"
relative_filename = path + filename #+ ".csv"
# out_filename = "mds_mly_minus20160901"
# after the numeric_st_idx's number will be tranform to int64
# numeric_st_idx = 1
# K = 8
label_path = "./kmean_label/"
output_path = "./CDR_CONCAT_ANALYZE_GRAPH/"
# output_path = "./CDR_CONCAT_ANALYZE_GRAPH_MINI/"
# read revise csv file and print cost time
# just load 5 data
t0 = time()
df_src = pd.read_csv(relative_filename, error_bad_lines=False)
print("time for read csv: %.2f" % (time()-t0))
# -------------------------
for c in df_src.columns[1:]:
if not "CNT" in c:
df_src = df_src.drop(c, 1)
# ----------------------------
# df = df.drop('MINING_DW_SUBSCR_NO', 1)
df_group = pd.read_csv('DNA_KMEANS_RESULT_ID_NEW.csv', error_bad_lines = False)
groups_name = ['1', '2', '3', '4', '5', '6', '7', '8', 'seldom', 'None']
# groups_name = ['1', '2', '3', '4', '5', '6', '7', '8']
# Ks = [8, 7, 6, 7, 5, 6, 6, 8, 7, 7]
Ks = [6, 4, 6, 7, 7, 6, 8, 7, 7, 7]
evening = "Evening user"
moring = "Morning user"
midnight = "Midnight user"
lunch = "Lunch time user"
All = "All day user"
dinner = "Dinner time user"
afternoon = "Afternoon user"
cluster_name = [
{107141:moring, 121153:midnight, 17176:lunch, 59992:afternoon, 32089:evening, 70046:dinner},
{25449:evening, 30950:dinner, 5441:lunch, 59944:midnight, 62860:All},
{20553:afternoon, 20809:dinner, 26798:moring, 32848:midnight, 4801:lunch},
{17959:evening, 24518:dinner, 33790:moring, 35510:midnight, 5181:lunch},
{17238:evening, 25183:dinner, 32834:moring, 31327:midnight, 3892:lunch},
{14298:midnight, 21404:"Late midnight user", 35439:moring, 35802:dinner, 39104:"Office time user"},
{19744:evening, 24966:afternoon, 33129:"Night user", 41770:moring, 44540:midnight},
{106596:dinner, 124046:moring, 146613:midnight, 21343:lunch, 91568:afternoon}
]
norm = "0704"
df_src['Groups'] = df_group['Groups']
for j in range(8):
K = Ks[j]
group = groups_name[j]
df = df_src[df_src['Groups'] == group]
label_path = "./kmean_label/"
# label_name = "label_K" + str(K) + "_de_with_kid_" + group + "_" + norm + ".npy"
label_name = "label_K" + str(K) + "__" + group + "_" + norm + ".npy"
labels_ = np.load(label_path + label_name)
# df.loc['label',list(map(str, df.index))] = labels_
df['label'] = labels_
grouped = df.groupby('label')
print(group)
df['label'] = labels_
grouped = df.drop(['MINING_DW_SUBSCR_NO', 'Groups'], 1).groupby('label')
# grouped = df.groupby('label')
# get count
group_count = grouped[df.columns[1]].count().values
# df = df.drop('MINING_DW_SUBSCR_NO', 1)
# get mean
group_mean = grouped.mean()
# cluster_name = {1012:'每通通話量長', 1470990:'幾乎不用', 23626:'高度使用', 283083:'有在使用', 48456:'夜貓族', 3601:'超高度使用', 68665:'中度使用', 697364:'稍微使用'}
# aggregate display data
data = {}
for i in range(K):
# data[str(i)] = grouped.mean().values[i]
# if "HOUR" in filename:
# # data[cluster_name[cluster_result[i]] + "(" + str(cluster_result[i]) + ")"] = list(map(lambda x: x/30,grouped.mean().values[i]))
# # data["(" + str(group_count[i]) + ")"] = list(map(lambda x: x/30, group_mean.values[i][1:]))
# else:
# # data[cluster_name[cluster_result[i]] + "(" + str(cluster_result[i]) + ")"] = list(map(lambda x: x/4,grouped.mean().values[i]))
# data["(" + str(group_count[i]) + ")"] = list(map(lambda x: x/4, group_mean.values[i][1:]))
# data[cluster_name[j][group_count[i]] + "(" + str(group_count[i]) + ")"] = group_mean.values[i]
data["(" + str(group_count[i]) + ")"] = group_mean.values[i]
# data[str(cluster_name[i])] = grouped.mean().values[i]
pp.pprint(df.columns[1:-2])
# select label
# xl = str(df.columns)
# xl = "MO_0_24 MT_0_24 MO_SUN_SAT_w_h MT_SUN_SAT_w_h"
xl = "hour"
# if filename.find("WORK") != -1:
# xl = str(df.columns[1:])
# elif filename.find("hours") == -1:
# xl = "SUN ~ SAT"
# yl = "time"
# if filename.find("TIME") == -1:
# yl = "count"
yl = "percentage"
# draw
# # set line colors
# mycolors = []
# # if K > 5:
# # mycolors = Spectral11[0:5] + Spectral11[6:K + 1]
# # else:
# # mycolors = Spectral11[0:K]
# for i in range(K):
# mycolors.append(Spectral11[i * 2])
title = "Group " + group
line = Line(data, ylabel = yl, xlabel = xl, color = Category10[10], title = title, legend = "top_center")
# line = Line(data, ylabel = 'mean ' + sys.argv[2], xlabel = xl)
# line.legend.orientation = 'horizontal'
legend = line.legend
legend.plot = None
legend.location = (0 , 300)
line.add_layout(legend[0], "right")
line.xaxis.axis_label_text_font_size = '20px'
line.yaxis.axis_label_text_font_size = '20px'
line.title.text_font_size = '30px'
# save file
# output_file("test_K" + str(i + 1) + ".html")
output_filename = ("%s_K%d_G%s_%s_line.html" % (filename[:-4], K, group, norm))
output_file(output_path + output_filename)
# output_file(output_path + filename[:-4] + "_K" + str(K) + "_NAME_distribution.html")
save(line)
# show(line)
# # save file
# # output_file("test_K" + str(i + 1) + ".html")
# line.title.text = title + " DETAIL"
# output_file(output_path + filename[:-4] + "_K" + str(K) + "_NAME_LARGE_distribution.html")
# save(line)
# # show(line)
|
[
"[email protected]"
] | |
a47d8415c94513aab2c7019425699a484a4715b5
|
9b64f0f04707a3a18968fd8f8a3ace718cd597bc
|
/huaweicloud-sdk-projectman/huaweicloudsdkprojectman/v4/model/list_child_issues_v4_response.py
|
3c616232bd4fb42bfe7dfe5c4fc8ea0de1d33c81
|
[
"Apache-2.0"
] |
permissive
|
jaminGH/huaweicloud-sdk-python-v3
|
eeecb3fb0f3396a475995df36d17095038615fba
|
83ee0e4543c6b74eb0898079c3d8dd1c52c3e16b
|
refs/heads/master
| 2023-06-18T11:49:13.958677 | 2021-07-16T07:57:47 | 2021-07-16T07:57:47 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,537 |
py
|
# coding: utf-8
import re
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
class ListChildIssuesV4Response(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'issues': 'list[IssueResponseV4]',
'total': 'int'
}
attribute_map = {
'issues': 'issues',
'total': 'total'
}
def __init__(self, issues=None, total=None):
"""ListChildIssuesV4Response - a model defined in huaweicloud sdk"""
super(ListChildIssuesV4Response, self).__init__()
self._issues = None
self._total = None
self.discriminator = None
if issues is not None:
self.issues = issues
if total is not None:
self.total = total
@property
def issues(self):
"""Gets the issues of this ListChildIssuesV4Response.
工作项列表
:return: The issues of this ListChildIssuesV4Response.
:rtype: list[IssueResponseV4]
"""
return self._issues
@issues.setter
def issues(self, issues):
"""Sets the issues of this ListChildIssuesV4Response.
工作项列表
:param issues: The issues of this ListChildIssuesV4Response.
:type: list[IssueResponseV4]
"""
self._issues = issues
@property
def total(self):
"""Gets the total of this ListChildIssuesV4Response.
总数
:return: The total of this ListChildIssuesV4Response.
:rtype: int
"""
return self._total
@total.setter
def total(self, total):
"""Sets the total of this ListChildIssuesV4Response.
总数
:param total: The total of this ListChildIssuesV4Response.
:type: int
"""
self._total = total
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
import simplejson as json
return json.dumps(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ListChildIssuesV4Response):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"[email protected]"
] | |
7b25d3a0b38ecf71f28ab8ade8d455c4f755784e
|
32eeb97dff5b1bf18cf5be2926b70bb322e5c1bd
|
/benchmark/redreader/testcase/firstcases/testcase8_000.py
|
487ccbe5525b05345cc0e3f99e97b78ace629218
|
[] |
no_license
|
Prefest2018/Prefest
|
c374d0441d714fb90fca40226fe2875b41cf37fc
|
ac236987512889e822ea6686c5d2e5b66b295648
|
refs/heads/master
| 2021-12-09T19:36:24.554864 | 2021-12-06T12:46:14 | 2021-12-06T12:46:14 | 173,225,161 | 5 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,361 |
py
|
#coding=utf-8
import os
import subprocess
import time
import traceback
from appium import webdriver
from appium.webdriver.common.touch_action import TouchAction
from selenium.common.exceptions import NoSuchElementException, WebDriverException
desired_caps = {
'platformName' : 'Android',
'deviceName' : 'Android Emulator',
'platformVersion' : '4.4',
'appPackage' : 'org.quantumbadger.redreader',
'appActivity' : 'org.quantumbadger.redreader.activities.MainActivity',
'resetKeyboard' : True,
'androidCoverage' : 'org.quantumbadger.redreader/org.quantumbadger.redreader.JacocoInstrumentation',
'noReset' : True
}
def command(cmd, timeout=5):
p = subprocess.Popen(cmd, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, shell=True)
time.sleep(timeout)
p.terminate()
return
def getElememt(driver, str) :
for i in range(0, 5, 1):
try:
element = driver.find_element_by_android_uiautomator(str)
except NoSuchElementException:
time.sleep(1)
else:
return element
os.popen("adb shell input tap 50 50")
element = driver.find_element_by_android_uiautomator(str)
return element
def getElememtBack(driver, str1, str2) :
for i in range(0, 2, 1):
try:
element = driver.find_element_by_android_uiautomator(str1)
except NoSuchElementException:
time.sleep(1)
else:
return element
for i in range(0, 5, 1):
try:
element = driver.find_element_by_android_uiautomator(str2)
except NoSuchElementException:
time.sleep(1)
else:
return element
os.popen("adb shell input tap 50 50")
element = driver.find_element_by_android_uiautomator(str2)
return element
def swipe(driver, startxper, startyper, endxper, endyper) :
size = driver.get_window_size()
width = size["width"]
height = size["height"]
try:
driver.swipe(start_x=int(width * startxper), start_y=int(height * startyper), end_x=int(width * endxper),
end_y=int(height * endyper), duration=2000)
except WebDriverException:
time.sleep(1)
driver.swipe(start_x=int(width * startxper), start_y=int(height * startyper), end_x=int(width * endxper),
end_y=int(height * endyper), duration=2000)
return
# testcase000
try :
starttime = time.time()
driver = webdriver.Remote('http://localhost:4723/wd/hub', desired_caps)
element = getElememtBack(driver, "new UiSelector().text(\"All Subreddits\")", "new UiSelector().className(\"android.widget.TextView\").instance(3)")
TouchAction(driver).tap(element).perform()
element = getElememt(driver, "new UiSelector().className(\"android.widget.TextView\").description(\"Sort Posts\")")
TouchAction(driver).long_press(element).release().perform()
driver.press_keycode(4)
element = getElememt(driver, "new UiSelector().className(\"android.widget.TextView\").description(\"Sort Posts\")")
TouchAction(driver).long_press(element).release().perform()
except Exception, e:
print 'FAIL'
print 'str(e):\t\t', str(e)
print 'repr(e):\t', repr(e)
print traceback.format_exc()
else:
print 'OK'
finally:
cpackage = driver.current_package
endtime = time.time()
print 'consumed time:', str(endtime - starttime), 's'
command("adb shell am broadcast -a com.example.pkg.END_EMMA --es name \"8_000\"")
jacocotime = time.time()
print 'jacoco time:', str(jacocotime - endtime), 's'
driver.quit()
if (cpackage != 'org.quantumbadger.redreader'):
cpackage = "adb shell am force-stop " + cpackage
os.popen(cpackage)
|
[
"[email protected]"
] | |
c74462826047dc31ce78bc75478dd40b6448f120
|
ef9cb55b02ababca58ce6880b575120c4f28fdb9
|
/blog/models.py
|
acf0d8f124a21c803016a1554c92811cf188da95
|
[] |
no_license
|
mikohan/portfolio
|
dc6226f3aee73b5af181a0ecc9a13668dde2fe4e
|
51fda71a4fecec77ff207eb94e514f6924eaf44e
|
refs/heads/master
| 2020-05-30T09:11:51.866902 | 2019-06-04T05:29:27 | 2019-06-04T05:29:27 | 189,637,532 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 463 |
py
|
from django.db import models
class Blog(models.Model):
title = models.CharField(max_length=255)
pub_date = models.DateTimeField()
body = models.TextField()
image = models.ImageField(upload_to='images/')
def __str__(self):
return self.title
def cut(self):
split = self.body.split()
desc = ' '.join(split[:40])
return desc
def pub_date_pretty(self):
return self.pub_date.strftime('%b %e %Y')
|
[
"[email protected]"
] | |
6a6cb216fa9aee4db2182b77261d6d65dfd2fed7
|
d7d524d1c0ba1cf62cdbc2f9bf5b9c66fa56726b
|
/armstrong interval.py
|
626c90c1598da113f54258b1c5f678f4dccdac20
|
[] |
no_license
|
ramyasutraye/pythonproject
|
d997ca5ada024e211b6bf087d0d56684daf9df8b
|
38975a99eb3ee1ad9e79a9efd538cc992d249fc3
|
refs/heads/master
| 2020-04-23T19:30:10.128774 | 2018-05-25T06:18:53 | 2018-05-25T06:18:53 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 268 |
py
|
a = int(input("Enter lower range: "))
b = int(input("Enter upper range: "))
for num in range(a, b + 1):
order = len(str(num))
sum = 0
temp = num
while temp > 0:
digit = temp % 10
sum += digit ** order
temp //= 10
if num == sum:
print(num)
|
[
"[email protected]"
] | |
6b5991808844bf4bf53bb9ef1f2ba289ed0cbe2d
|
6846a0469efc79b89edc8f856944d5a8005d7244
|
/id_0123.py
|
8f711263e8087edcc8d3178a22f25e1d21fd0249
|
[] |
no_license
|
CGenie/project_euler
|
42cb966e13645339490046eb44a729660ae0c092
|
cc90edd061b0f4d9e076d5a684b842c202a6812a
|
refs/heads/master
| 2020-06-05T00:41:49.266961 | 2014-01-13T19:11:31 | 2014-01-13T19:11:31 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 992 |
py
|
#!/usr/bin/python
# #####################################################################
# id_0123.py
#
# Przemyslaw Kaminski <[email protected]>
# Time-stamp: <>
######################################################################
from helper_py3 import memoize
def gen_primes():
lst_primes = [2]
yield 2
p = 3
while True:
prime = True
for x in lst_primes:
if p % x == 0:
prime = False
break
if prime:
lst_primes.append(p)
yield p
p += 2
if __name__ == '__main__':
gp = gen_primes()
M = 10**10
n = 0
while True:
pn = next(gp)
n += 1
if pn**2 >= M:
ret = ((-1)**n + 1 + ((-1)**(n - 1) + 1)*n*pn) % pn**2
if (n + 1) % 100 == 0:
print("pn = " + str(pn) + ", n = " + str(n) + ", ret = " + str(ret))
if ret > M:
print("sol = " + str(n))
break
|
[
"[email protected]"
] | |
6a22e8f4dffd272e12fba138916e4c7de47b0cfc
|
80d50ea48e10674b1b7d3f583a1c4b7d0b01200f
|
/src/datadog_api_client/v1/model/geomap_widget_definition.py
|
a47dba11739a217d6ebe6cc92133fe5fc63bbc9e
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT",
"MPL-2.0"
] |
permissive
|
DataDog/datadog-api-client-python
|
3e01fa630278ad0b5c7005f08b7f61d07aa87345
|
392de360e7de659ee25e4a6753706820ca7c6a92
|
refs/heads/master
| 2023-09-01T20:32:37.718187 | 2023-09-01T14:42:04 | 2023-09-01T14:42:04 | 193,793,657 | 82 | 36 |
Apache-2.0
| 2023-09-14T18:22:39 | 2019-06-25T22:52:04 |
Python
|
UTF-8
|
Python
| false | false | 5,014 |
py
|
# Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License.
# This product includes software developed at Datadog (https://www.datadoghq.com/).
# Copyright 2019-Present Datadog, Inc.
from __future__ import annotations
from typing import List, Union, TYPE_CHECKING
from datadog_api_client.model_utils import (
ModelNormal,
cached_property,
unset,
UnsetType,
)
if TYPE_CHECKING:
from datadog_api_client.v1.model.widget_custom_link import WidgetCustomLink
from datadog_api_client.v1.model.geomap_widget_request import GeomapWidgetRequest
from datadog_api_client.v1.model.geomap_widget_definition_style import GeomapWidgetDefinitionStyle
from datadog_api_client.v1.model.widget_time import WidgetTime
from datadog_api_client.v1.model.widget_text_align import WidgetTextAlign
from datadog_api_client.v1.model.geomap_widget_definition_type import GeomapWidgetDefinitionType
from datadog_api_client.v1.model.geomap_widget_definition_view import GeomapWidgetDefinitionView
class GeomapWidgetDefinition(ModelNormal):
validations = {
"requests": {
"max_items": 1,
"min_items": 1,
},
}
@cached_property
def openapi_types(_):
from datadog_api_client.v1.model.widget_custom_link import WidgetCustomLink
from datadog_api_client.v1.model.geomap_widget_request import GeomapWidgetRequest
from datadog_api_client.v1.model.geomap_widget_definition_style import GeomapWidgetDefinitionStyle
from datadog_api_client.v1.model.widget_time import WidgetTime
from datadog_api_client.v1.model.widget_text_align import WidgetTextAlign
from datadog_api_client.v1.model.geomap_widget_definition_type import GeomapWidgetDefinitionType
from datadog_api_client.v1.model.geomap_widget_definition_view import GeomapWidgetDefinitionView
return {
"custom_links": ([WidgetCustomLink],),
"requests": ([GeomapWidgetRequest],),
"style": (GeomapWidgetDefinitionStyle,),
"time": (WidgetTime,),
"title": (str,),
"title_align": (WidgetTextAlign,),
"title_size": (str,),
"type": (GeomapWidgetDefinitionType,),
"view": (GeomapWidgetDefinitionView,),
}
attribute_map = {
"custom_links": "custom_links",
"requests": "requests",
"style": "style",
"time": "time",
"title": "title",
"title_align": "title_align",
"title_size": "title_size",
"type": "type",
"view": "view",
}
def __init__(
self_,
requests: List[GeomapWidgetRequest],
style: GeomapWidgetDefinitionStyle,
type: GeomapWidgetDefinitionType,
view: GeomapWidgetDefinitionView,
custom_links: Union[List[WidgetCustomLink], UnsetType] = unset,
time: Union[WidgetTime, UnsetType] = unset,
title: Union[str, UnsetType] = unset,
title_align: Union[WidgetTextAlign, UnsetType] = unset,
title_size: Union[str, UnsetType] = unset,
**kwargs,
):
"""
This visualization displays a series of values by country on a world map.
:param custom_links: A list of custom links.
:type custom_links: [WidgetCustomLink], optional
:param requests: Array of one request object to display in the widget. The request must contain a ``group-by`` tag whose value is a country ISO code.
See the `Request JSON schema documentation <https://docs.datadoghq.com/dashboards/graphing_json/request_json>`_
for information about building the ``REQUEST_SCHEMA``.
:type requests: [GeomapWidgetRequest]
:param style: The style to apply to the widget.
:type style: GeomapWidgetDefinitionStyle
:param time: Time setting for the widget.
:type time: WidgetTime, optional
:param title: The title of your widget.
:type title: str, optional
:param title_align: How to align the text on the widget.
:type title_align: WidgetTextAlign, optional
:param title_size: The size of the title.
:type title_size: str, optional
:param type: Type of the geomap widget.
:type type: GeomapWidgetDefinitionType
:param view: The view of the world that the map should render.
:type view: GeomapWidgetDefinitionView
"""
if custom_links is not unset:
kwargs["custom_links"] = custom_links
if time is not unset:
kwargs["time"] = time
if title is not unset:
kwargs["title"] = title
if title_align is not unset:
kwargs["title_align"] = title_align
if title_size is not unset:
kwargs["title_size"] = title_size
super().__init__(kwargs)
self_.requests = requests
self_.style = style
self_.type = type
self_.view = view
|
[
"[email protected]"
] | |
09b0bfd7ba89b753adde15365a1674b25fb38d71
|
1488955228c48cbaff586e2a8d86249a47645d0d
|
/app/main/views.py
|
ec63c98a0f7b5aab3d87e02ab569d663a4452b22
|
[] |
no_license
|
vincentouma/watchlist
|
a286c9d09bb06b18edfa4bc8883e9ec7f302bd01
|
329f90c23e373e14a29f1764cb8958adbbb02279
|
refs/heads/master
| 2020-06-28T11:00:17.353435 | 2019-08-02T10:19:16 | 2019-08-02T10:19:16 | 198,234,792 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,956 |
py
|
from flask import render_template,request,redirect,url_for,abort
from . import main
from ..requests import get_movies,get_movie,search_movie
from ..models import Review, User
from .forms import ReviewForm,UpdateProfile
from flask_login import login_required,current_user
from .. import db,photos
import markdown2
#views
@main.route('/')
def index():
'''
View root page function that returns the index page and its data
'''
#getting popular movies
popular_movies = get_movies('popular')
upcoming_movie = get_movies('upcoming')
now_showing_movie = get_movies('now_playing')
title = 'Home - Welcome to The Best best Movie Review Website Online'
# message = 'Hello World'
search_movie = request.args.get('movie_query')
if search_movie:
return redirect(url_for('.search',movie_name = search_movie))
else:
return render_template('index.html',title = title, popular = popular_movies,upcoming = upcoming_movie, now_playing = now_showing_movie)
@main.route('/movie/<int:id>')
def movie(id):
'''
View root page function theat returns the index pages and its data
'''
movie = get_movie(id)
title = f'{movie.title}'
reviews = Review.get_reviews(movie.id)
return render_template('movie.html', title = title, movie = movie, reviews = reviews)
@main.route('/search/<movie_name>')
def search(movie_name):
'''
view function to display search results
'''
movie_name_list = movie_name.split(" ")
movie_name_format = "+".join(movie_name_list)
searched_movies = search_movie(movie_name_format)
title = f'search resultd for {movie_name}'
return render_template('search.html', title = title, movies = searched_movies)
@main.route('/movie/review/new/<int:id>', methods = ['GET','POST'])
@login_required
def new_review(id):
form = ReviewForm()
movie = get_movie(id)
if form.validate_on_submit():
title = form.title.data
review = form.review.data
# Updated review instance
new_review = Review(movie_id=movie.id,movie_title=title,image_path=movie.poster,movie_review=review,user=current_user)
# save review method
new_review.save_review()
return redirect(url_for('.movie',id = movie.id ))
title = f'{movie.title} review'
return render_template('new_review.html',title = title, review_form=form, movie=movie)
@main.route('/user/<uname>')
def profile(uname):
user = User.query.filter_by(username = uname).first()
if user is None:
abort(404)
return render_template("profile/profile.html", user = user)
@main.route('/user/<uname>/update',methods = ['GET','POST'])
@login_required
def update_profile(uname):
user = User.query.filter_by(username = uname).first()
if user is None:
abort(404)
form = UpdateProfile()
if form.validate_on_submit():
user.bio = form.bio.data
db.session.add(user)
db.session.commit()
return redirect(url_for('.profile',uname=user.username))
return render_template('profile/update.html',form =form)
@main.route('/user/<uname>/update/pic',methods= ['POST'])
@login_required
def update_pic(uname):
user = User.query.filter_by(username = uname).first()
if 'photo' in request.files:
filename = photos.save(request.files['photo'])
path = f'photos/{filename}'
user.profile_pic_path = path
db.session.commit()
return redirect(url_for('main.profile',uname=uname))
@main.route('/review/<int:id>')
def single_review(id):
review=Review.query.get(id)
if review is None:
abort(404)
format_review = markdown2.markdown(review.movie_review,extras=["code-friendly", "fenced-code-blocks"])
return render_template('review.html',review = review,format_review=format_review)
|
[
"[email protected]"
] | |
da6cc4d0465295d7dfc8e71959ada0bb8de28a93
|
87706e10023b027bf6b4ef9146242a99c0ebbea2
|
/docs/conf.py
|
1fe4bd3668497bf382483451a6823a64d9af1fb9
|
[
"Unlicense"
] |
permissive
|
Kadantte/anime-downloader
|
206dc7b9850d6494135ee143c4069df024e500d0
|
24de83d4ef392e17f39710cc9054ff90e3602533
|
refs/heads/master
| 2022-09-24T02:16:30.770196 | 2022-09-12T11:12:28 | 2022-09-12T11:12:28 | 168,595,085 | 8 | 0 |
Unlicense
| 2022-09-12T15:01:57 | 2019-01-31T20:54:19 |
Python
|
UTF-8
|
Python
| false | false | 5,643 |
py
|
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('../anime_downloader'))
# -- Project information -----------------------------------------------------
project = 'anime-downloader'
copyright = '2018, Vishnunarayan K I'
author = 'Vishnunarayan K I'
# The short X.Y version
version = ''
# The full version, including alpha/beta/rc tags
release = '3.5.0'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
'sphinx.ext.napoleon',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'alabaster'
html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'anime-downloaderdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'anime-downloader.tex', 'anime-downloader Documentation',
'Vishnunarayan K I', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'anime-downloader', 'anime-downloader Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'anime-downloader', 'anime-downloader Documentation',
author, 'anime-downloader', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# -- Extension configuration -------------------------------------------------
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
|
[
"[email protected]"
] | |
afd81f81f1f1b883587446ae90c0eef7fe9119b6
|
7d02813987b49c2a69d92b9b2fdf5148af37274f
|
/case/Recommend/testAccountBind.py
|
bfa36a597587e27610642247da283901f0f4eb06
|
[] |
no_license
|
xgh321324/api_test
|
29e01cbe5f0b7c2df25fb7e781cedf8031140c72
|
2575495baac3ab90adab7a7a85904c38a78dd4b7
|
refs/heads/master
| 2022-07-23T19:54:39.320828 | 2022-07-02T09:13:35 | 2022-07-02T09:13:35 | 129,185,513 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,803 |
py
|
#coding:utf-8
from common.login_lanting import auto_login_by_UID
import requests,unittest,time,json
from common.logger import Log
from common.Hash import get_digit,get_sign
from common.Excel import Excel_util
class Account(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.s = requests.session()
cls.to = auto_login_by_UID()
cls.header = {'User-Agent': 'PelvicFloorPersonal/4.1.1 (iPad; iOS 10.1.1; Scale/2.00)',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'zh-Hans-CN;q=1',
'Content-Type': 'application/json',
'requestApp': '2',
'requestclient': '2',
'versionForApp': '4.4.0',
'Authorization': 'Basic YXBpTGFudGluZ0BtZWRsYW5kZXIuY29tOkFwaVRobWxkTWxkQDIwMTM=',
'Connection': 'keep-alive'
}
cls.log = Log()
cls.excel = Excel_util(r'C:\Users\Administrator\Desktop\Interface_testcase.xls')
def test_bind_account01(self):
u'绑定提现账号接口'
self.log.info('开始测试绑定账号接口..')
url = 'http://api-rec.sunnycare.cc/v1/account/bind'
json_data = {
'token': self.to,
'timestamp': str(int(time.time())),
'alipay_account': '2088012687108144',
'real_name': '许广会',
'nick_name': '许广会',
'nonce': get_digit()
}
json_data['sign'] = get_sign(json_data)
r = self.s.post(url,headers = self.header,json=json_data)
self.log.info('绑定支付宝返回:%s' % r.json())
#断言
self.assertEqual(200,r.json()['code'],msg='返回状态码不是200')
self.assertEqual('请求成功',r.json()['note'])
self.log.info('绑定账号接口测试结束!\n')
def test_bind_account02(self):
u'解除绑定账号接口'
self.log.info('开始测试解除绑定账号接口..')
url = 'http://api-rec.sunnycare.cc/v1/account/unbind'
json_data = {
'token': self.to,
'timestamp': str(int(time.time())),
'type': '0',#0,支付宝;1,微信
'nonce': get_digit()
}
json_data['sign'] = get_sign(json_data)
r = self.s.post(url,headers = self.header,json=json_data)
self.log.info('解除绑定支付宝返回:%s' % r.json())
#断言
self.assertEqual(200,r.json()['code'],msg='返回状态码不是200')
self.assertEqual('请求成功',r.json()['note'])
self.log.info('解除绑定账号接口测试结束!\n')
@classmethod
def tearDownClass(cls):
cls.s.close()
if __name__=='__main__':
unittest.main()
|
[
"[email protected]"
] | |
56cfe94c34974098be5441d30e82c556d53a814e
|
86a017dd4c8d4d77c511cc598190aaa9dc0ae3e8
|
/data structure/mine_linked_list.py
|
92ff136a59524e8fa5ebb2031ddd83e8e998da40
|
[] |
no_license
|
sungguenja/studying
|
fd7459eb9faa6488d7b63bf3884a92513daf3c54
|
719f4dfbda211c34de2a0c8cf3b9d3001f29fcec
|
refs/heads/master
| 2023-08-17T13:46:44.343780 | 2023-08-10T11:55:15 | 2023-08-10T11:55:15 | 232,306,053 | 0 | 0 | null | 2022-12-16T10:53:26 | 2020-01-07T11:00:28 |
Python
|
UTF-8
|
Python
| false | false | 1,711 |
py
|
import mine_node
class LinkedList:
def __init__(self):
self.head = None
def isEmpty(self):
return self.top == None
def clear(self):
self.top = None
def push(self,item):
now_node = mine_node.Node(item,self.top)
self.top = now_node
def size(self):
node = self.top
count = 0
while node != None:
count += 1
node = node.link
return count
def getNode(self,position):
if position<0:
return None
node = self.head
while position>0 and node != None:
node = node.link
position -= 1
return node
def getValue(self,position):
node = self.getNode(position)
if node == None:
return None
else:
return node.data
def replace(self,item,position):
node = self.getNode(position)
if node != None:
node.data = item
def find(self,data):
node = self.head
while node != None:
if node.data == data:
break
node = node.link
return node
def insert(self,position,data):
node = self.getNode(position-1)
if node == None:
self.head = mine_node.Node(data,self.head)
else:
insert_node = mine_node.Node(data,node.link)
node.link = insert_node
def delete(self,position):
node = self.getNode(position-1)
if node != None:
if self.head != None:
self.head = self.head.link
elif node.link != None:
node.link = node.link.link
|
[
"[email protected]"
] | |
4724d5aa9415a81ce783f5bab5bea5842e84c4e9
|
217440fcc3a91e4ad1a8e008bd315128de7d571a
|
/day11/08-常见类的函数.py
|
9a1178d3e15539060839c925447403eea8ccf73c
|
[] |
no_license
|
zhangbo111/0102-0917
|
a6af056ce9c9a8ab9500e8d016846dc6c50ec1c6
|
1631ea402612e82ae62b093749e2c4f19a021c63
|
refs/heads/master
| 2020-04-18T16:55:41.675156 | 2019-02-12T01:48:25 | 2019-02-12T01:48:25 | 167,643,635 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 457 |
py
|
class Father:
pass
class Mother:
pass
class Laowang:
pass
class Son(Father, Mother):
pass
# 检测一个类是否是另外一个类的子类 如果是返回True 否 False
result1 = issubclass(Son, Father)
result2 = issubclass(Son, Mother)
result3 = issubclass(Son, Laowang)
# 检测Son类是否是Mother类或者Laowang类的子类 满足一个就可以
result4 = issubclass(Son, (Mother, Laowang))
print(result1, result2, result3, result4)
|
[
"[email protected]"
] | |
5f7a6e0094d7dff4e2a88f1833c2b9afbec85264
|
24fe1f54fee3a3df952ca26cce839cc18124357a
|
/servicegraph/lib/python2.7/site-packages/acimodel-4.0_3d-py2.7.egg/cobra/modelimpl/rtdmc/ajpfilterpol.py
|
2d8941c96e207a74adc8b90ad0b1cdbcb211fabc
|
[] |
no_license
|
aperiyed/servicegraph-cloudcenter
|
4b8dc9e776f6814cf07fe966fbd4a3481d0f45ff
|
9eb7975f2f6835e1c0528563a771526896306392
|
refs/heads/master
| 2023-05-10T17:27:18.022381 | 2020-01-20T09:18:28 | 2020-01-20T09:18:28 | 235,065,676 | 0 | 0 | null | 2023-05-01T21:19:14 | 2020-01-20T09:36:37 |
Python
|
UTF-8
|
Python
| false | false | 4,481 |
py
|
# coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2019 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class AJPFilterPol(Mo):
meta = ClassMeta("cobra.model.rtdmc.AJPFilterPol")
meta.isAbstract = True
meta.moClassName = "rtdmcAJPFilterPol"
meta.moClassName = "rtdmcAJPFilterPol"
meta.rnFormat = ""
meta.category = MoCategory.REGULAR
meta.label = "Abstract JP Filter Policy"
meta.writeAccessMask = 0x20000001
meta.readAccessMask = 0x20000001
meta.isDomainable = False
meta.isReadOnly = False
meta.isConfigurable = True
meta.isDeletable = True
meta.isContextRoot = False
meta.childClasses.add("cobra.model.rtdmc.RsFilterToRtMapPol")
meta.childClasses.add("cobra.model.pim.RouteMapDef")
meta.childClasses.add("cobra.model.fault.Delegate")
meta.childNamesAndRnPrefix.append(("cobra.model.rtdmc.RsFilterToRtMapPol", "rsfilterToRtMapPol"))
meta.childNamesAndRnPrefix.append(("cobra.model.pim.RouteMapDef", "rtmapdef"))
meta.childNamesAndRnPrefix.append(("cobra.model.fault.Delegate", "fd-"))
meta.superClasses.add("cobra.model.pol.Comp")
meta.superClasses.add("cobra.model.naming.NamedObject")
meta.superClasses.add("cobra.model.pol.Obj")
meta.superClasses.add("cobra.model.rtdmc.AFilterPol")
meta.concreteSubClasses.add("cobra.model.pim.JPOutbFilterDef")
meta.concreteSubClasses.add("cobra.model.pim.JPInbFilterPol")
meta.concreteSubClasses.add("cobra.model.pim.JPInbFilterDef")
meta.concreteSubClasses.add("cobra.model.pim.JPOutbFilterPol")
meta.rnPrefixes = [
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "descr", "descr", 5582, PropCategory.REGULAR)
prop.label = "Description"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 128)]
prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+']
meta.props.add("descr", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "name", "name", 4991, PropCategory.REGULAR)
prop.label = "Name"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 64)]
prop.regex = ['[a-zA-Z0-9_.:-]+']
meta.props.add("name", prop)
prop = PropMeta("str", "nameAlias", "nameAlias", 28417, PropCategory.REGULAR)
prop.label = "Name alias"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 63)]
prop.regex = ['[a-zA-Z0-9_.-]+']
meta.props.add("nameAlias", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
def __init__(self, parentMoOrDn, markDirty=True, **creationProps):
namingVals = []
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
|
[
"[email protected]"
] | |
e2a0d3c0ad23256cde4b53012bff5b0474c41b96
|
56014da6ebc817dcb3b7a136df8b11cf9f976d93
|
/Python基础笔记/05-读写文件及办公文档自动化/05.04-OS模块.py
|
09d813dea42521efecdd3acd34a51b2e3b7f6223
|
[] |
no_license
|
sunday2146/notes-python
|
52b2441c981c1106e70a94b999e986999334239a
|
e19d2aee1aa9433598ac3c0a2a73b0c1e8fa6dc2
|
refs/heads/master
| 2022-01-12T22:55:45.401326 | 2019-01-18T03:18:26 | 2019-01-18T03:18:26 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,945 |
py
|
import os
"""
os:包含了普遍的操作系统的功能
"""
#nt---windows系统 posix---linux,Unix或Mac OS X
#获取操作系统 类型
print(os.name)
#print(os.unname)--打印操作系统详细的信息,windows不支持
#获取操作系统中的环境变量
print(os.environ)
#获取指定环境变量
print(os.environ.get("appdata"))
#获得当前目录 ./a/
print(os.curdir)
#获取当前工作目录,即当前python脚本所在的目录
print(os.getcwd())
#以列表的形式返回指定目录下所有文件
print(os.listdir(r"C:\Users\Zhangyadi\Desktop"))
#在当前目录下创建新目录
#os.mkdir("sunck")
#删除目录
#os.rmdir("sunck")
#获取文件属性
#print(os.stat("sunck"))
#重命名
#os.rename("sunck","kaige")
#删除普通文件
#os.remove("hello.py.txt")
#运行shell命令---记事本
#os.system("notepad")
#os.system("write")-写字板
#os.system("mspaint")--画板
#os.system("shutdown-s-t 500")-自动关机
#os.system("shutdown-a")-取消
#os.system("taskkill/f /im notepad.exe")--关闭
#有些方法存在os模块里,还有写存在与os.path
#查看当前的绝对路径
print(os.path.abspath("kaige"))
#拼接路径
p1 = r"C:\Users\Zhangyadi\Desktop\project"
p2 = "sunck"
#注意:参数2里开始不要有斜杠\
#C:\Users\Zhangyadi\Desktop\project\sunck
print(os.path.join(p1,p2))
p3 = "/root/sunck/home"
p4 = "kaige"
print(os.path.join(p3,p4))
#拆分路径
path2 = r"C:\Users\Zhangyadi\Desktop\project\kaige"
print(os.path.split(path2))
#获取扩展名
print(os.path.splitext(path2))
#判断是否是目录
print(os.path.isdir(path2))
#判断文件是否存在
path3 = r"C:\Users\Zhangyadi\Desktop\56fil6.txt"
print(os.path.isfile(path3))
#判断目录是否存在
print(os.path.exists(path2))
#获得文件大小(字节)
print(os.path.getsize(path3))
#获得文件的目录
print(os.path.dirname(path3))
print(os.path.basename(path3))#获取文件名
|
[
"[email protected]"
] | |
a71eaf902c6b63983c91e8caf7675b99dd64e78b
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/303/usersdata/303/80291/submittedfiles/testes.py
|
f8fdf8b73bca451bb6d4e647f3f675e329669678
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 209 |
py
|
# -*- coding: utf-8 -*-
n= int(input('Digite um numero:'))
if n%3==0 and n%7==0:
print(' O numero é divisivel por 3 e por 7')
else:
print(' o numero nao é divisivel por 3 e por 7')
|
[
"[email protected]"
] | |
9885653186d1619aaa626651335b51322f938b13
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02754/s775628525.py
|
c08aa17fa27593932f995ed4aa58535828193f96
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 222 |
py
|
if __name__ == '__main__':
n,a,b = map(int,input().split())
tmp1 = (n // (a + b)) * a
if tmp1 == 0:
tmp2 = min(n,a)
else:
tmp2 = n % (a + b)
tmp2 = min(a,tmp2)
print(tmp1+tmp2)
|
[
"[email protected]"
] | |
94607147cb6a428256583d99cca352c265328f80
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02595/s086135640.py
|
10579db5c1e5ef2a5300d80b183a84bc3668641d
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 149 |
py
|
n,d = map(int,input().split())
cnt = 0
md = d**2
for _ in range(n):
a,b = map(int,input().split())
if md >= (a**2+b**2):
cnt += 1
print(cnt)
|
[
"[email protected]"
] | |
b37b2e218d9b6497281ffcb42383e42614c8930c
|
f0a5ad7b8aa39f51f233391fead0da3eabecc4ee
|
/.history/toolbox/abreFile_20191127163354.py
|
2da87cf946b4539934df6748b231c06528e4165f
|
[] |
no_license
|
OseiasBeu/webScrapping
|
e0a524847e55b24dbbd3d57bbe7fa43b4e101f48
|
1e72c7551aea355a891043baecfcbab8a89e719a
|
refs/heads/master
| 2022-10-25T18:12:50.858653 | 2020-06-18T01:29:24 | 2020-06-18T01:29:24 | 224,681,550 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 816 |
py
|
import pandas as pd
import os
def abreFile():
oldAddres = 'C:/Users/beuo/Documents/Demandas/AtualizaMiddleIntegrationVtex/files/'
newFile = 'C:/Users/beuo/Documents/Demandas/AtualizaMiddleIntegrationVtex/files/extract.xlsx'
def encontraArquivosEmPastaRecursivamente(pasta, extensao):
arquivosTxt = []
caminhoAbsoluto = os.path.abspath(pasta)
for pastaAtual, subPastas, arquivos in os.walk(caminhoAbsoluto):
arquivosTxt.extend([os.path.join(pastaAtual,arquivo) for arquivo in arquivos if arquivo.endswith('.xls')])
return arquivosTxt
old = encontraArquivosEmPastaRecursivamente(oldAddres, '.xls')
print(old[0])
os.rename(old[0],newFile)
# wb = pd.ExcelFile('./file/extract.xlsx')
# df = pd.read_excel(wb)
# print(df.head())
abreFile()
|
[
"[email protected]"
] | |
3855a95c8084c4bb4923ae0522d68b3251d55a9c
|
bfb1db9b58064f63ed8040b50d5fe3b4664adc01
|
/wechat_django/decorators.py
|
689644e5a65f53d8e12040fa0cf847b3d445f9e8
|
[
"MIT"
] |
permissive
|
hvv1616/wechat-django
|
74947d7ea126e507d649cb152af1a66d68593a8f
|
5599f237bc1781a594102ce7ff491086f8cf69d2
|
refs/heads/master
| 2020-04-30T07:22:38.427671 | 2019-03-18T12:56:20 | 2019-03-18T12:56:20 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,249 |
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from functools import wraps
from six import text_type
__all__ = ("message_handler", )
def message_handler(names_or_func=None):
"""自定义回复业务需加装该装饰器
被装饰的自定义业务接收一个``wechat_django.models.WeChatMessageInfo``对象
并且返回一个``wechatpy.replies.BaseReply``对象
:param names_or_func: 允许使用该message_handler的appname 不填所有均允许
:type names_or_func: str or list or tuple or callable
@message_handler
def custom_business(message):
user = message.user
# ...
return TextReply("hello", message=message.message)
@message_handler(("app_a", "app_b"))
def app_ab_only_business(message):
# ...
"""
def decorator(view_func):
@wraps(view_func)
def decorated_view(message):
return view_func(message)
decorated_view.message_handler = names or True
return decorated_view
if isinstance(names_or_func, text_type):
names = [names_or_func]
elif callable(names_or_func):
names = None
return decorator(names_or_func)
return decorator
|
[
"[email protected]"
] | |
db8cdad93128a19ba84640c54d3a3bcf21458506
|
dc798f062b15e6ad060a5cfb731db5f286e2088b
|
/lesson7/task4/tests.py
|
0dc1cb67e0b5856510c204ea5431b442ee148c04
|
[] |
no_license
|
DmitryTsybulkin/stepik-python
|
dce78c4fe616fe3f5bd26e0dad9c80bc5c5c4ab2
|
0726346f43e21623a1200aa76b9c7e9ff5476844
|
refs/heads/master
| 2020-04-27T22:28:33.695541 | 2019-09-13T14:03:28 | 2019-09-13T14:03:28 | 174,738,049 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 138 |
py
|
from test_helper import check_samples
if __name__ == '__main__':
check_samples(samples=[["480\n1\n2","9\n2"],["475\n1\n55","9\n50"]])
|
[
"[email protected]"
] | |
cc5efee86d9bd9204bbc9ff243e80878e33ea5a6
|
ae4be4a17468f89e06975a402cddd7dabf692ec9
|
/ABC/137/C/source.py
|
5da7bff89fd85a546813bb268e62e676e9596f88
|
[] |
no_license
|
naru380/AtCoder
|
95ae61230d3182dc2a317a77f8e9300c68443199
|
296d071d6a91ea7e061ee3923b5c26b0c7536119
|
refs/heads/master
| 2020-09-20T02:12:29.405393 | 2020-05-31T09:58:08 | 2020-05-31T09:58:08 | 224,354,223 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,136 |
py
|
import itertools
from collections import Counter
import math
N = int(input())
def generate_prime_numbers():
search_range = 150
search_list = [True for i in range(0, search_range+1)]
search_list[0] = False
search_list[1] = False
search_list[2] = True
for i in range(2, search_range+1):
for j in range(i*2, search_range+1, i):
search_list[j] = False
prime_numbers = [i for i in range(search_range+1) if search_list[i] == True]
return prime_numbers[:27]
def combination(n, r):
return math.factorial(n) // math.factorial(r) // math.factorial(n-r)
prime_numbers = generate_prime_numbers()
encoded_strings = []
for i in range(N):
S = input()
encoded_string = 1
for c in S:
char_to_int = ord(c) - ord('a')
encoded_string *= prime_numbers[char_to_int]
encoded_strings.append(encoded_string)
# print(encoded_strings)
ans = 0
# for comb in itertools.combinations(encoded_strings, 2):
# if comb[0] == comb[1]:
# ans += 1
counter = Counter(encoded_strings)
for i in counter.values():
if i > 1:
ans += combination(i, 2)
print(ans)
|
[
"[email protected]"
] | |
3b30c93eabcd27038c83049c2ee79ddeb97f9bac
|
f8e8e365c9cf58b61d72655bc2340baeaed5baff
|
/Leetcode/Python Solutions/Binary Search/FirstBadVersion.py
|
65537ce9473dd531e132bb495f34504fa9fb26fb
|
[
"MIT"
] |
permissive
|
Mostofa-Najmus-Sakib/Applied-Algorithm
|
39a69f6b9ed113efe4a420d19cad79e0aa317637
|
bc656fd655617407856e0ce45b68585fa81c5035
|
refs/heads/master
| 2023-08-31T19:54:34.242559 | 2021-11-05T03:43:35 | 2021-11-05T03:43:35 | 412,263,430 | 0 | 0 |
MIT
| 2021-09-30T23:45:29 | 2021-09-30T23:45:25 | null |
UTF-8
|
Python
| false | false | 806 |
py
|
"""
LeetCode Problem 278. First Bad Version
Link: https://leetcode.com/problems/first-bad-version/
Written by: Mostofa Adib Shakib
Language: Python
Time Complexity: O(logn)
Space complexity: O(1)
"""
# The isBadVersion API is already defined for you.
# @param version, an integer
# @return a bool
# def isBadVersion(version):
class Solution:
def firstBadVersion(self, n):
"""
:type n: int
:rtype: int
"""
first = 0
last = n
while first <= last:
mid = (first+last)//2
if isBadVersion(mid) == False:
first = mid + 1
elif isBadVersion(mid) == True:
if isBadVersion(mid-1) == True:
last = mid - 1
else:
return mid
|
[
"[email protected]"
] | |
678256f0e9251afdae873f233eb56b60123f7369
|
b0c02d7ca86c1ef84af18a8c701702e8bb212b64
|
/display-stuff/neopixels/ColorSynthesis/Neopixel Color Synthesis/colorsynthesis1.py
|
dc3e92458492203c89b292ad1f19f38abeac0e08
|
[] |
no_license
|
flashypepo/myMicropython-Examples
|
24fa2f372e68742abe0f74913df000dfe64a9e55
|
b2b63df865b5ad471b351ca5f279135025859f5d
|
refs/heads/master
| 2021-09-24T18:52:18.083444 | 2018-10-13T11:59:19 | 2018-10-13T11:59:19 | 98,223,412 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,746 |
py
|
# Micro/CircuitPython NeoPixel Color Synthesis Experiments pt. 1
import machine
import time
import math
import neopixel
NEOPIXEL_PIN = machine.Pin(15, machine.Pin.OUT)
NEOPIXEL_COUNT = 8 * 4 #12
def seconds():
return time.ticks_ms()/1000 # MicroPython code for current seconds
# Setup NeoPixels
pixels = neopixel.NeoPixel(NEOPIXEL_PIN, NEOPIXEL_COUNT)
def blank():
pixels.fill((0,0,0))
pixels.write()
blank()
''' Example 2:
amplitude = 128
frequency = 0.25 # Increase this to speed up, decrease to slow down the pulse.
phase = 0
offset = 128
try:
while True:
red = int(amplitude*math.sin(2*math.pi*frequency*seconds()+phase)+\
offset)
color = (red, 0, 0)
pixels.fill(color)
pixels.write()
print("r={}\tg={}\tb={}".format(*color))
time.sleep(0.1)
except:
blank()
print('done')
#'''
################################################################################
# Example 3:
# Refactor to a functional style. Create a sine wave function on the fly
# so it's easy to add more animations (just make more sine wave functions).
################################################################################
def sine_wave(amplitude, frequency, phase, offset):
return lambda t: amplitude*math.sin(2*math.pi*frequency*t+phase)+offset
red_wave = sine_wave(128, 0.25, 0, 128)
green_wave = sine_wave(128, 0.25, math.pi, 128)
try:
while True:
current = seconds()
red = int(red_wave(current))
green = int(green_wave(current))
color = (red, green, 0)
pixels.fill(color)
pixels.write()
print("r={}\tg={}\tb={}".format(*color))
time.sleep(0.1)
except:
blank()
print('done')
|
[
"[email protected]"
] | |
78544477f1980b8197bbeb6369a8c22371a2db77
|
a6203ce0f7f871ccd8fd341af6254795c938232b
|
/easy/power-of-two/solution.py
|
08541ea6db52d29b1ee3005fc763fdc7559eb622
|
[] |
no_license
|
hsuanhauliu/leetcode-solutions
|
542590de9b1dd4480bd582850363f71487dd37d0
|
c14d8829c95f61ff6691816e8c0de76b9319f389
|
refs/heads/master
| 2021-03-31T00:31:18.489947 | 2019-10-21T03:51:10 | 2019-10-21T03:51:10 | 124,963,304 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 266 |
py
|
class Solution:
def isPowerOfTwo(self, n: int) -> bool:
if n <= 0:
return False
while n != 1:
# keep dividing
if n % 2:
return False
n //= 2
return True
|
[
"[email protected]"
] | |
fa2426367d7e331041c267f0caa9af5a01f702f0
|
620323fc090cebaf7aca456ff3f7fbbe1e210394
|
/psutil_example/get_win_services.py
|
26361ecb1fd9c3ae7b2481a9ed2b4502e0765fd2
|
[
"CC-BY-4.0"
] |
permissive
|
gil9red/SimplePyScripts
|
bd2733372728bf9b9f00570e90316fa12116516b
|
773c2c9724edd8827a1dbd91694d780e03fcb05a
|
refs/heads/master
| 2023-08-31T04:26:09.120173 | 2023-08-30T17:22:59 | 2023-08-30T17:22:59 | 22,650,442 | 157 | 46 | null | 2023-09-08T17:51:33 | 2014-08-05T16:19:52 |
Python
|
UTF-8
|
Python
| false | false | 853 |
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = "ipetrash"
# pip install psutil
import psutil
from psutil._pswindows import WindowsService
def get_win_services() -> list[WindowsService]:
return list(psutil.win_service_iter())
if __name__ == "__main__":
win_service_list = get_win_services()
print(f"Win service list ({len(win_service_list)}):")
for service in win_service_list:
title = f"{service.name()!r} ({service.display_name()})"
path = (
f"Pid={service.pid()}, name={service.name()!r}, display_name={service.display_name()!r}, "
f"status={service.status()!r}, start_type={service.start_type()!r}"
)
print("Title:", title)
print("Path:", path)
print("Status:", service.status())
print("binpath:", service.binpath())
print()
|
[
"[email protected]"
] | |
80ecbb277308e7fb1010e5ec65eb4683e140c3fe
|
feba3c32aac7f17d8fbaf6ef7bb4d229844f8247
|
/machine_learning/clustering/hierarchical_clustering/AgglomerativeClustering/main.py
|
c44aad4338e74004ce5203e18522385184a3123b
|
[] |
no_license
|
lisunshine1234/mlp-algorithm-python
|
d48aa1336ae7c4925a0e30f4f09fa6de21f83d0e
|
898359a10f65f16e94f3bb27cc61f3837806ca68
|
refs/heads/master
| 2023-05-01T11:11:47.465491 | 2021-05-24T13:53:40 | 2021-05-24T13:53:40 | 291,934,886 | 0 | 0 | null | 2021-05-24T13:42:15 | 2020-09-01T08:00:17 |
Python
|
UTF-8
|
Python
| false | false | 4,604 |
py
|
import numpy as np
import run as r
'''
[id]
145
[name]
AgglomerativeClustering
[input]
x_train 训练集 训练集标签数据集 二维数组 必须 定数
y_train 测试集 测试集数据集 二维数组 必须 定数
n_clusters 簇数 默认为2,要查找的集群数。如果'None'不是'distance_threshold',则必须为'None',可选整数 整数 不必须 定数
affinity 亲和力 默认为'euclidean',用于计算链接的度量。可以是'euclidean','l1','l2','manhattan','cosine'或'precomputed'。如果链接为'ward',则仅接受'euclidean'。如果为'precomputed',则需要距离矩阵(而不是相似度矩阵)作为拟合方法的输入,可选'euclidean' 字符串 不必须 定数
memory memory 默认为None,用于缓存树计算的输出。默认情况下,不进行缓存。如果给出了字符串,则它是缓存目录的路径,可选整数,字符串 字符串 不必须 定数
connectivity 连通性 默认为None,连接矩阵。为每个样本定义遵循给定数据结构的相邻样本。这可以是连通性矩阵本身,也可以是将数据转换为连通性矩阵(例如从kneighbors_graph派生)的可调用对象。默认值为None,即分层聚类算法是非结构化的,可选数组 不定数组 不必须 定数
compute_full_tree 计算全树 默认为auto,尽早在n_clusters处停止树的构建。还要注意的是,当更改群集数量并使用缓存时,计算完整树可能是有利的。如果'True'不是'distance_threshold',则必须为'None'。默认情况下,'compute_full_tree'是'auto',当'True'不是'distance_threshold'或'None'次于100或'n_clusters'之间的最大值时,等于'0.02 * n_samples'。否则,'auto'等于'False',可选布尔值,'auto' 字符串 不必须 定数
linkage 链接标准 默认为ward,使用哪个链接标准。链接标准确定要在观察组之间使用的距离。该算法将合并最小化此标准的成对集群。-ward将合并的簇的方差最小化。-平均使用两组的每个观测值的距离的平均值。-完全或最大链接使用两组所有观测值之间的最大距离。-single使用两组的所有观测值之间的最小距离,可选'ward','average','single','complete' 字符串 不必须 定数
distance_threshold 距离阈值 默认为None,链接距离阈值,超过该距离时,群集将不会合并。如果不是'None',则'n_clusters'必须为'None',而'compute_full_tree'必须为'True',可选浮点数 浮点数 不必须 定数
[output]
n_clusters_ 簇数 该算法找到的簇数。如果为'distance_threshold=None',则等于给定的'n_clusters' 整数
labels_ 标签 每个点的聚类标签 一维数组
n_leaves_ 叶子数 层次树中的叶数 整数
n_connected_components_ 组件连接数 图中估计的已连接组件数 整数
children_ children_ 每个非叶节点的子级。小于'n_samples'的值对应于作为原始样本的树的叶子。大于或等于'i'的节点'n_samples'是非叶子节点,并且具有子节点'children_[i - n_samples]'。或者,在第i次迭代中,children [i] [0]和children [i] [1]合并以形成节点'n_samples + i 二维数组
[outline]
聚集聚类以递归方式合并这对最小增加给定链接距离的聚类对。
[describe]
聚集聚类以递归方式合并这对最小增加给定链接距离的聚类对。
'''
def main(x_train, y_train,
n_clusters=2, affinity="euclidean", memory=None, connectivity=None, compute_full_tree='auto', linkage='ward', distance_threshold=None
):
if type(x_train) is str:
x_train = eval(x_train)
if type(y_train) is str:
y_train = eval(y_train)
if type(n_clusters) is str:
n_clusters = eval(n_clusters)
if type(connectivity) is str:
connectivity = eval(connectivity)
if type(distance_threshold) is str:
distance_threshold = eval(distance_threshold)
return r.run(x_train=x_train, y_train=y_train, n_clusters=n_clusters,
affinity=affinity,
memory=memory,
connectivity=connectivity,
compute_full_tree=compute_full_tree,
linkage=linkage,
distance_threshold=distance_threshold)
if __name__ == '__main__':
import numpy as np
import json
array = np.loadtxt('D:\\123_2.csv', delimiter=',')
array = array[0:20, :]
y = array[:, -1].tolist()
x = np.delete(array, -1, axis=1).tolist()
array = array.tolist()
back = main(x, y)
print(back)
for i in back:
print(i + ":" + str(back[i]))
json.dumps(back)
|
[
"[email protected]"
] | |
1d90ee6dc0cce81b7ee6e5ebc395a18ae771e9a8
|
786027545626c24486753351d6e19093b261cd7d
|
/ghidra9.2.1_pyi/ghidra/app/util/navigation/__init__.pyi
|
50c892e49919e06c449a9ea0c91e410ecd8bb2e3
|
[
"MIT"
] |
permissive
|
kohnakagawa/ghidra_scripts
|
51cede1874ef2b1fed901b802316449b4bf25661
|
5afed1234a7266c0624ec445133280993077c376
|
refs/heads/main
| 2023-03-25T08:25:16.842142 | 2021-03-18T13:31:40 | 2021-03-18T13:31:40 | 338,577,905 | 14 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 195 |
pyi
|
from .GoToAddressLabelDialog import GoToAddressLabelDialog as GoToAddressLabelDialog
from .GoToQuery import GoToQuery as GoToQuery
from .GoToServiceImpl import GoToServiceImpl as GoToServiceImpl
|
[
"[email protected]"
] | |
6f7c3eebdf06407cee5d8e9e62976c7a454ff836
|
e3a25b40812b6b70f10b52a6f66f9348dcc251a6
|
/algorithm/0402codeAD/구슬고르기복습.py
|
ae0fd9e3bf77bee8c5bdacf9d2e3d4790c8f7305
|
[] |
no_license
|
yoonwoo123/python101
|
75643cb5dcf411c9ddcf988bf09bb88e4523206c
|
637dce64a6320a6f46eb941e33e8e9f6ee41c910
|
refs/heads/master
| 2020-04-14T11:30:43.018126 | 2019-07-25T08:28:31 | 2019-07-25T08:28:31 | 163,815,689 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,620 |
py
|
import sys
sys.stdin = open("구슬고르기_input.txt")
# 김은경 샘 코드
def DFS1(n): # 중복순열
if n>N:
for i in range(1, N+1): print(arr[i], end=' ')
print()
return
for i in range(1, 7):
arr[n]=i
DFS1(n+1)
def DFS3(n): # 순열
if n>N:
for i in range(1, N+1): print(arr[i], end=' ')
print()
return
for i in range(1, 7):
if chk[i]:continue
chk[i]=1 # 순열은 체크 해야함
arr[n]=i
DFS3(n+1)
chk[i]=0 # 순열은 체크해제도 해야함
def DFS2(n, start): # 중복조합
if n>N:
for i in range(1, N+1): print(arr[i], end=' ')
print()
return
for i in range(start, 7): # 시작은 스타트부터
arr[n]=i
DFS2(n+1, i) # 스타트업데이트는 start가 아닌 i
def DFS4(n, start): # 조합
if n>N:
for i in range(1, N+1): print(arr[i], end=' ')
print()
return
for i in range(start, 7):
arr[n]=i
DFS4(n+1, i+1) # 조합은 i 가 아닌 i + 1 중요!
#main---------------------------------
N, M = map(int, input().split())
arr =[0] * (N+1)
chk = [0] * 7
if M ==1: DFS1(1)
elif M ==3 : DFS3(1)
elif M == 2: DFS2(1, 1)
elif M ==4: DFS4(1,1)
# def ovperm(n, k):
# if n == k:
# for g in p:
# print(g, end=" ")
# print()
# else:
# for i in range(k, n):
# a[i], a[k] = a[k], a[i]
# p[k] = a[i]
# perm(n, k+1)
# # perm(n-1, k+1)
# a[i], a[k] = a[k], a[i]
#
# def DFS(no): # chk를 하면 순열 chk를 하지 않으면 중복순열
# if no >= N:
# for i in range(N):
# print(b[i], end=" ")
# print()
# return
# for i in range(6):
# # if chk[i]:continue # 1이면 continue, 0이면 진행
# # chk[i] = 1
# b[no] = a[i]
# DFS(no + 1)
# # chk[i] = 0
#
# def comb(no):
# if no >= N:
# for i in range(N):
# print(b[i], end=" ")
# print()
# return
# b[no] = a[no]
# comb(no + 1)
# b[no] = 0
# comb(no + 1)
#
# # def combs(no, start): # a[no]번째 구슬을 상자에 담거나 담지 않는 모든 경우
# # for i in range(N): print(b[i], end=" ")
# # print()
# # if no >= N or start >= N:
# # return
# # for i in range(start, N):
# # b[no] = a[i]
# # combs(no+1, i+1)
# # b[no] = 0
#
# N = int(input())
# a = [n for n in range(1, 7)]
# b = [0] * N
# chk = [0] * N
# # DFS(0)
# # comb(0)
# DFS(0)
|
[
"[email protected]"
] | |
8f5b53674caa26cd827c4943842f96a981027ade
|
386a5b505d77c9798aaab78495d0f00c349cf660
|
/python/function/harmonic.py
|
f23e439bde8eccf7c61bf23d64a8e3c28998c89d
|
[] |
no_license
|
namratarane20/MachineLearning
|
2da2c87217618d124fd53f607c20641ba44fb0b7
|
b561cc74733b655507242cbbf13ea09a2416b9e2
|
refs/heads/master
| 2023-01-20T18:54:15.662179 | 2020-03-09T14:12:44 | 2020-03-09T14:12:44 | 237,597,461 | 0 | 0 | null | 2023-01-05T12:37:12 | 2020-02-01T10:22:20 |
Python
|
UTF-8
|
Python
| false | false | 431 |
py
|
#this program is used print the nth harmonic value
from data import functional
try:
value = int(input("enter the value: "))
if value > 0: # if value is more than 0 it will run the method
functional.harmonic(value)
else:print("enter more than 0")
except ValueError: # if is not numerical value it will throw the error
print("enter the proper input")
|
[
"[email protected]"
] | |
b97cffb7e0a43919de823cb6cf823479aa0bc268
|
a2ee667a402d821831ce1532c3a2e62305624388
|
/extras/sample_site/sample_site/urls.py
|
b391adc051ec60ccb38c358017504169712765ab
|
[
"MIT"
] |
permissive
|
cltrudeau/django-flowr
|
9c1c7c8a43d881f962e8dd58ca424daa3ee1348a
|
ea2d69fda94d1998f48301954f8dc69f0b553553
|
refs/heads/master
| 2023-07-05T20:28:05.370538 | 2023-06-29T19:36:41 | 2023-06-29T19:36:41 | 40,761,586 | 3 | 0 |
MIT
| 2022-12-26T19:50:47 | 2015-08-15T13:37:23 |
Python
|
UTF-8
|
Python
| false | false | 185 |
py
|
from django.conf.urls import include, url
from django.contrib import admin
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
url(r'^flowr/', include('flowr.urls')),
]
|
[
"[email protected]"
] | |
b4be61d8b86b193478f3cf286e713cde26bb27d9
|
7e7a1a1c7f5a2069b50b90b247d89faef17b7eef
|
/test/unit/test_make.py
|
32d6c20acbcdafa123544c60d5ce8704b4b77154
|
[
"BSD-3-Clause"
] |
permissive
|
JulianVolodia/bfg9000
|
e1d13e07ef43577ce871cbdf28d7854eaad9985e
|
c04867cd7fc4861bc67fe38f9ca47ee6cc43edef
|
refs/heads/master
| 2021-01-11T12:16:38.842893 | 2016-12-11T21:16:52 | 2016-12-12T01:18:07 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,162 |
py
|
import os
import unittest
from six.moves import cStringIO as StringIO
from bfg9000 import path
from bfg9000 import safe_str
from bfg9000.backends.make.syntax import *
from bfg9000.platforms import platform_name
esc_colon = ':' if platform_name() == 'windows' else '\\:'
def quoted(s):
return "'" + s + "'"
class TestMakeWriter(unittest.TestCase):
# strings
def test_write_string_target(self):
out = Writer(StringIO())
out.write('foo: $bar|baz,quux', Syntax.target)
self.assertEqual(out.stream.getvalue(),
'foo' + esc_colon + '\\ $$bar|baz,quux')
def test_write_string_dependency(self):
out = Writer(StringIO())
out.write('foo: $bar|baz,quux', Syntax.dependency)
self.assertEqual(out.stream.getvalue(),
'foo' + esc_colon + '\\ $$bar\\|baz,quux')
def test_write_string_function(self):
out = Writer(StringIO())
out.write('foo: $bar|baz,quux', Syntax.function)
self.assertEqual(out.stream.getvalue(), quoted('foo: $$bar|baz$,quux'))
def test_write_string_shell(self):
out = Writer(StringIO())
out.write('foo: $bar|baz,quux', Syntax.shell)
self.assertEqual(out.stream.getvalue(), quoted('foo: $$bar|baz,quux'))
def test_write_string_clean(self):
out = Writer(StringIO())
out.write('foo: $bar|baz,quux', Syntax.clean)
self.assertEqual(out.stream.getvalue(), 'foo: $$bar|baz,quux')
# escaped strings
def test_write_escaped_string_target(self):
out = Writer(StringIO())
out.write(safe_str.escaped_str('foo: $bar|baz,quux'), Syntax.target)
self.assertEqual(out.stream.getvalue(), 'foo: $bar|baz,quux')
def test_write_escaped_string_dependency(self):
out = Writer(StringIO())
out.write(safe_str.escaped_str('foo: $bar|baz,quux'),
Syntax.dependency)
self.assertEqual(out.stream.getvalue(), 'foo: $bar|baz,quux')
def test_write_escaped_string_function(self):
out = Writer(StringIO())
out.write(safe_str.escaped_str('foo: $bar|baz,quux'), Syntax.function)
self.assertEqual(out.stream.getvalue(), 'foo: $bar|baz,quux')
def test_write_escaped_string_shell(self):
out = Writer(StringIO())
out.write(safe_str.escaped_str('foo: $bar|baz,quux'), Syntax.shell)
self.assertEqual(out.stream.getvalue(), 'foo: $bar|baz,quux')
def test_write_escaped_string_clean(self):
out = Writer(StringIO())
out.write(safe_str.escaped_str('foo: $bar|baz,quux'), Syntax.clean)
self.assertEqual(out.stream.getvalue(), 'foo: $bar|baz,quux')
# jbos
def test_write_jbos_target(self):
out = Writer(StringIO())
s = safe_str.jbos('$foo', safe_str.escaped_str('$bar'))
out.write(s, Syntax.target)
self.assertEqual(out.stream.getvalue(), '$$foo$bar')
def test_write_jbos_dependency(self):
out = Writer(StringIO())
s = safe_str.jbos('$foo', safe_str.escaped_str('$bar'))
out.write(s, Syntax.dependency)
self.assertEqual(out.stream.getvalue(), '$$foo$bar')
def test_write_jbos_function(self):
out = Writer(StringIO())
s = safe_str.jbos('$foo', safe_str.escaped_str('$bar'))
out.write(s, Syntax.function)
self.assertEqual(out.stream.getvalue(), quoted('$$foo') + '$bar')
def test_write_jbos_shell(self):
out = Writer(StringIO())
s = safe_str.jbos('$foo', safe_str.escaped_str('$bar'))
out.write(s, Syntax.shell)
self.assertEqual(out.stream.getvalue(), quoted('$$foo') + '$bar')
def test_write_jbos_clean(self):
out = Writer(StringIO())
s = safe_str.jbos('$foo', safe_str.escaped_str('$bar'))
out.write(s, Syntax.clean)
self.assertEqual(out.stream.getvalue(), '$$foo$bar')
# paths
def test_write_path_target(self):
out = Writer(StringIO())
out.write(path.Path('foo', path.Root.srcdir), Syntax.target)
self.assertEqual(out.stream.getvalue(),
os.path.join('$(srcdir)', 'foo'))
def test_write_path_dependency(self):
out = Writer(StringIO())
out.write(path.Path('foo', path.Root.srcdir), Syntax.dependency)
self.assertEqual(out.stream.getvalue(),
os.path.join('$(srcdir)', 'foo'))
def test_write_path_function(self):
out = Writer(StringIO())
out.write(path.Path('foo', path.Root.srcdir), Syntax.function)
self.assertEqual(out.stream.getvalue(),
quoted(os.path.join('$(srcdir)', 'foo')))
def test_write_path_shell(self):
out = Writer(StringIO())
out.write(path.Path('foo', path.Root.srcdir), Syntax.shell)
self.assertEqual(out.stream.getvalue(),
quoted(os.path.join('$(srcdir)', 'foo')))
def test_write_path_clean(self):
out = Writer(StringIO())
out.write(path.Path('foo', path.Root.srcdir), Syntax.clean)
self.assertEqual(out.stream.getvalue(),
os.path.join('$(srcdir)', 'foo'))
|
[
"[email protected]"
] | |
751baeeaf78e31e7c30ff0263dce2e8a7717fb44
|
48517a9b7ec7b0f0bf0a03291b7d1e3def751c0a
|
/Pibow/corner_to_corner_3_v2.py
|
4b9f958fcb0bdf6453d970f0e489ffbcd7e54229
|
[
"MIT"
] |
permissive
|
Breakfast-for-Pigeons/Unicorn-HAT
|
1ae033bf11c05b9cc739b1eacfc77665506e0bc8
|
9ff1388ee627a8e81f361929e9e9b708db4e2832
|
refs/heads/master
| 2021-06-06T12:22:48.162031 | 2020-10-22T17:31:51 | 2020-10-22T17:31:51 | 74,648,524 | 1 | 0 | null | 2018-10-02T17:37:31 | 2016-11-24T07:28:23 |
Python
|
UTF-8
|
Python
| false | false | 7,514 |
py
|
#!/usr/bin/python3
"""
Corner to Corner 3 version 2- Pibow
Moves a square from the lower right corner to the upper left corner.
Instead of cycling through all the colors, a specific color must be sent
to the function as an argument.
....................
Author: Paul Ryan
This program was written on a Raspberry Pi using the Geany IDE.
"""
########################################################################
# Import modules #
########################################################################
from time import sleep
import unicornhat
from bfp_unicornhat import print_header
from bfp_unicornhat import stop
########################################################################
# Import Variables #
########################################################################
from bfp_unicornhat import C1
from bfp_unicornhat import C2
from bfp_unicornhat import C3
from bfp_unicornhat import C4
from bfp_unicornhat import C5
from bfp_unicornhat import C6
from bfp_unicornhat import C7
from bfp_unicornhat import C8
########################################################################
# Functions #
########################################################################
def corner_to_corner_3_v2(color):
"""
Moves a square from the lower right corner to the upper left corner.
Arguments:
This function takes an RGB tuple as an argument argument.
"""
sleep_speed = 0.1
off = (0, 0, 0)
unicornhat.set_pixel(7, 7, color)
unicornhat.show()
sleep(sleep_speed)
unicornhat.set_pixel(6, 7, color)
unicornhat.set_pixel(6, 6, color)
unicornhat.set_pixel(7, 6, color)
unicornhat.show()
sleep(sleep_speed)
unicornhat.set_pixel(5, 7, color)
unicornhat.set_pixel(5, 6, color)
unicornhat.set_pixel(5, 5, color)
unicornhat.set_pixel(6, 5, color)
unicornhat.set_pixel(7, 5, color)
unicornhat.show()
sleep(sleep_speed)
unicornhat.set_pixel(4, 7, color)
unicornhat.set_pixel(4, 6, color)
unicornhat.set_pixel(4, 5, color)
unicornhat.set_pixel(4, 4, color)
unicornhat.set_pixel(5, 4, color)
unicornhat.set_pixel(6, 4, color)
unicornhat.set_pixel(7, 4, color)
unicornhat.show()
sleep(sleep_speed)
unicornhat.set_pixel(3, 7, color)
unicornhat.set_pixel(3, 6, color)
unicornhat.set_pixel(3, 5, color)
unicornhat.set_pixel(3, 4, color)
unicornhat.set_pixel(3, 3, color)
unicornhat.set_pixel(4, 3, color)
unicornhat.set_pixel(5, 3, color)
unicornhat.set_pixel(6, 3, color)
unicornhat.set_pixel(7, 3, color)
unicornhat.show()
sleep(sleep_speed)
unicornhat.set_pixel(2, 7, color)
unicornhat.set_pixel(2, 6, color)
unicornhat.set_pixel(2, 5, color)
unicornhat.set_pixel(2, 4, color)
unicornhat.set_pixel(2, 3, color)
unicornhat.set_pixel(2, 2, color)
unicornhat.set_pixel(3, 2, color)
unicornhat.set_pixel(4, 2, color)
unicornhat.set_pixel(5, 2, color)
unicornhat.set_pixel(6, 2, color)
unicornhat.set_pixel(7, 2, color)
unicornhat.show()
sleep(sleep_speed)
unicornhat.set_pixel(1, 7, color)
unicornhat.set_pixel(1, 6, color)
unicornhat.set_pixel(1, 5, color)
unicornhat.set_pixel(1, 4, color)
unicornhat.set_pixel(1, 3, color)
unicornhat.set_pixel(1, 2, color)
unicornhat.set_pixel(1, 1, color)
unicornhat.set_pixel(2, 1, color)
unicornhat.set_pixel(3, 1, color)
unicornhat.set_pixel(4, 1, color)
unicornhat.set_pixel(5, 1, color)
unicornhat.set_pixel(6, 1, color)
unicornhat.set_pixel(7, 1, color)
unicornhat.show()
sleep(sleep_speed)
unicornhat.set_pixel(0, 7, color)
unicornhat.set_pixel(0, 6, color)
unicornhat.set_pixel(0, 5, color)
unicornhat.set_pixel(0, 4, color)
unicornhat.set_pixel(0, 3, color)
unicornhat.set_pixel(0, 2, color)
unicornhat.set_pixel(0, 1, color)
unicornhat.set_pixel(0, 0, color)
unicornhat.set_pixel(1, 0, color)
unicornhat.set_pixel(2, 0, color)
unicornhat.set_pixel(3, 0, color)
unicornhat.set_pixel(4, 0, color)
unicornhat.set_pixel(5, 0, color)
unicornhat.set_pixel(6, 0, color)
unicornhat.set_pixel(7, 0, color)
unicornhat.show()
sleep(sleep_speed)
unicornhat.set_pixel(7, 0, off)
unicornhat.set_pixel(7, 1, off)
unicornhat.set_pixel(7, 2, off)
unicornhat.set_pixel(7, 3, off)
unicornhat.set_pixel(7, 4, off)
unicornhat.set_pixel(7, 5, off)
unicornhat.set_pixel(7, 6, off)
unicornhat.set_pixel(7, 7, off)
unicornhat.set_pixel(6, 7, off)
unicornhat.set_pixel(5, 7, off)
unicornhat.set_pixel(4, 7, off)
unicornhat.set_pixel(3, 7, off)
unicornhat.set_pixel(2, 7, off)
unicornhat.set_pixel(1, 7, off)
unicornhat.set_pixel(0, 7, off)
unicornhat.show()
sleep(sleep_speed)
unicornhat.set_pixel(6, 0, off)
unicornhat.set_pixel(6, 1, off)
unicornhat.set_pixel(6, 2, off)
unicornhat.set_pixel(6, 3, off)
unicornhat.set_pixel(6, 4, off)
unicornhat.set_pixel(6, 5, off)
unicornhat.set_pixel(6, 6, off)
unicornhat.set_pixel(5, 6, off)
unicornhat.set_pixel(4, 6, off)
unicornhat.set_pixel(3, 6, off)
unicornhat.set_pixel(2, 6, off)
unicornhat.set_pixel(1, 6, off)
unicornhat.set_pixel(0, 6, off)
unicornhat.show()
sleep(sleep_speed)
unicornhat.set_pixel(5, 0, off)
unicornhat.set_pixel(5, 1, off)
unicornhat.set_pixel(5, 2, off)
unicornhat.set_pixel(5, 3, off)
unicornhat.set_pixel(5, 4, off)
unicornhat.set_pixel(5, 5, off)
unicornhat.set_pixel(4, 5, off)
unicornhat.set_pixel(3, 5, off)
unicornhat.set_pixel(2, 5, off)
unicornhat.set_pixel(1, 5, off)
unicornhat.set_pixel(0, 5, off)
unicornhat.show()
sleep(sleep_speed)
unicornhat.set_pixel(4, 0, off)
unicornhat.set_pixel(4, 1, off)
unicornhat.set_pixel(4, 2, off)
unicornhat.set_pixel(4, 3, off)
unicornhat.set_pixel(4, 4, off)
unicornhat.set_pixel(3, 4, off)
unicornhat.set_pixel(2, 4, off)
unicornhat.set_pixel(1, 4, off)
unicornhat.set_pixel(0, 4, off)
unicornhat.show()
sleep(sleep_speed)
unicornhat.set_pixel(3, 0, off)
unicornhat.set_pixel(3, 1, off)
unicornhat.set_pixel(3, 2, off)
unicornhat.set_pixel(3, 3, off)
unicornhat.set_pixel(2, 3, off)
unicornhat.set_pixel(1, 3, off)
unicornhat.set_pixel(0, 3, off)
unicornhat.show()
sleep(sleep_speed)
unicornhat.set_pixel(2, 0, off)
unicornhat.set_pixel(2, 1, off)
unicornhat.set_pixel(2, 2, off)
unicornhat.set_pixel(1, 2, off)
unicornhat.set_pixel(0, 2, off)
unicornhat.show()
sleep(sleep_speed)
unicornhat.set_pixel(1, 0, off)
unicornhat.set_pixel(1, 1, off)
unicornhat.set_pixel(0, 1, off)
unicornhat.show()
sleep(sleep_speed)
unicornhat.set_pixel(0, 0, off)
unicornhat.show()
sleep(sleep_speed)
if __name__ == '__main__':
COLORS = [C1, C2, C3, C4, C5, C6, C7, C8]
try:
# STEP01: Print header
print_header()
# STEP02: Print instructions in white text
print("\033[1;37;40mPress Ctrl-C to stop the program.")
# STEP03:
for COLOR in COLORS:
corner_to_corner_3_v2(COLOR)
# STEP04: Exit the program.
stop()
except KeyboardInterrupt:
stop()
|
[
"[email protected]"
] | |
4a625cc49e2d484363ea090f357a0e45dc2e536a
|
9e28200b71d43de1e122a964e88f1b547bfde465
|
/question_leetcode/702.py
|
e39791835a7143e971e1b37e879656148c9a064b
|
[] |
no_license
|
paul0920/leetcode
|
6f8a7086eefd3e9bccae83752ef41cbfee1acaea
|
474886c5c43a6192db2708e664663542c2e39548
|
refs/heads/master
| 2023-08-19T14:10:10.494355 | 2021-09-16T20:26:50 | 2021-09-16T20:26:50 | 290,560,326 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 879 |
py
|
# """
# This is ArrayReader's API interface.
# You should not implement it, or speculate about its implementation
# """
# class ArrayReader(object):
# def get(self, index):
# """
# :type index: int
# :rtype int
# """
class Solution(object):
def search(self, reader, target):
"""
:type reader: ArrayReader
:type target: int
:rtype: int
"""
kth = 1
while reader.get(kth - 1) < target:
kth *= 2
left = 0
right = kth
while left + 1 < right:
mid = left + (right - left) // 2
if reader.get(mid) < target:
left = mid
else:
right = mid
if reader.get(left) == target:
return left
if reader.get(right) == target:
return right
return -1
|
[
"[email protected]"
] | |
0242ad91656a9be579908b441d9b94af3542b343
|
ef821468b081ef2a0b81bf08596a2c81e1c1ef1a
|
/Programming Basics/Nested_Loops-LAB/Cinema_Tickets.py
|
7dae726c936e27321845167e1b72d8edcf1c7c38
|
[] |
no_license
|
Ivaylo-Atanasov93/The-Learning-Process
|
71db22cd79f6d961b9852f140f4285ef7820dd80
|
354844e2c686335345f6a54b3af86b78541ed3f3
|
refs/heads/master
| 2023-03-30T20:59:34.304207 | 2021-03-29T15:23:05 | 2021-03-29T15:23:05 | 294,181,544 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,362 |
py
|
movie = ''
free_seats = 0
ticket_type = ''
sold_seats = 0
student = 0
standard = 0
kids = 0
total_tickets = 0
flag = False
while not flag:
movie = input()
if movie == 'Finish':
break
free_seats = int(input())
while ticket_type != 'End':
ticket_type = input()
if ticket_type == 'student':
student += 1
sold_seats += 1
total_tickets += 1
elif ticket_type == 'standard':
standard += 1
sold_seats += 1
total_tickets += 1
elif ticket_type == 'kid':
kids += 1
sold_seats += 1
total_tickets += 1
elif ticket_type == 'End':
print(f'{movie} - {(sold_seats / free_seats) * 100:.2f}% full.')
elif ticket_type == 'Finish':
print(f'{movie} - {(sold_seats / free_seats) * 100:.2f}% full.')
flag = True
break
if sold_seats == free_seats:
print(f'{movie} - {(sold_seats / free_seats) * 100:.2f}% full.')
break
sold_seats = 0
ticket_type = ''
if flag:
break
print(f'Total tickets: {total_tickets}')
print(f'{(student / total_tickets) * 100:.2f}% student tickets.')
print(f'{(standard / total_tickets) * 100:.2f}% standard tickets.')
print(f'{(kids / total_tickets) * 100:.2f}% kids tickets.')
|
[
"[email protected]"
] | |
6b9966fd76928a69a7d63ecd8c2b9856b2bfa4c9
|
a46825af0830a0f84f426547fba1b1f45fb97b3f
|
/backend/apps/area/urls.py
|
ce30e18f5bcb945d04fae1d297b2a0b9a011ea19
|
[] |
no_license
|
szshysj/Digital_marketing_web
|
47544c7b9e0c425a78b0d51195ac245fdaef0503
|
86b31f261158b4c8d130c64ae7e573b8316c8bc4
|
refs/heads/master
| 2020-08-29T21:26:23.402279 | 2020-03-18T07:55:27 | 2020-03-18T07:55:27 | 218,178,158 | 0 | 0 | null | 2019-12-05T10:16:02 | 2019-10-29T01:26:07 |
Vue
|
UTF-8
|
Python
| false | false | 313 |
py
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# @Time : 2019/10/14 23:28
# @Author : 孔祥旭
# @Email : [email protected] / [email protected]
from tornado.web import url
from apps.area.handler import GetAreaHandler
urlpatten = [
# 获取所有可投放地域列表
url('/get/area/', GetAreaHandler)
]
|
[
"[email protected]"
] | |
2d322f049fa8f8f91dfb80709a634df823f3de47
|
26f6313772161851b3b28b32a4f8d255499b3974
|
/Python/RelativeRanks.py
|
d749965e66f0cfbeff330cd167f3bbe034cf128d
|
[] |
no_license
|
here0009/LeetCode
|
693e634a3096d929e5c842c5c5b989fa388e0fcd
|
f96a2273c6831a8035e1adacfa452f73c599ae16
|
refs/heads/master
| 2023-06-30T19:07:23.645941 | 2021-07-31T03:38:51 | 2021-07-31T03:38:51 | 266,287,834 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,254 |
py
|
"""
Given scores of N athletes, find their relative ranks and the people with the top three highest scores, who will be awarded medals: "Gold Medal", "Silver Medal" and "Bronze Medal".
Example 1:
Input: [5, 4, 3, 2, 1]
Output: ["Gold Medal", "Silver Medal", "Bronze Medal", "4", "5"]
Explanation: The first three athletes got the top three highest scores, so they got "Gold Medal", "Silver Medal" and "Bronze Medal".
For the left two athletes, you just need to output their relative ranks according to their scores.
Note:
N is a positive integer and won't exceed 10,000.
All the scores of athletes are guaranteed to be unique.
"""
class Solution:
def findRelativeRanks(self, scores):
"""
:type nums: List[int]
:rtype: List[str]
"""
medals = ["Gold Medal", "Silver Medal", "Bronze Medal"]
sorted_scores_dict = {}
for order,score in enumerate(sorted(scores, reverse = True)):
if order <= 2:
sorted_scores_dict[score] = medals[order]
else:
sorted_scores_dict[score] = str(order + 1)
res = [sorted_scores_dict[score] for score in scores]
return res
s = Solution()
scores = [5, 4, 3, 2, 1]
print(s.findRelativeRanks(scores))
|
[
"[email protected]"
] | |
a5d7d1e55d35d5dc95e02c6e501613df521f4fb6
|
3873b03ac81354d4ed24e94df5fa8429e726bbd2
|
/titles/101. 对称二叉树.py
|
9236ac01422c19b4ad9190406e21e379f2b5f6e8
|
[] |
no_license
|
lichangg/myleet
|
27032f115597481b6c0f3bbe3b83e80b34c76365
|
3d5a96d896ede3ea979783b8053487fe44e38969
|
refs/heads/master
| 2023-03-21T15:50:14.128422 | 2021-03-16T09:58:07 | 2021-03-16T09:58:07 | 286,616,721 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,428 |
py
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
from utils.util_funcs import Tree
# 这个测试用例[1,2,2,None,3,None,3]过不了,本地能过
#
class Solution:
FLAG=True
def isSymmetric(self, root) -> bool:
l = root.left
r = root.right
stack = [l,r]
while stack and all(stack):
nums = []
for i in stack:
nums.append(i.val)
mid=int(len(nums)/2)
if nums[:mid]!=nums[mid:][::-1]:
Solution.FLAG = False
break
temp = []
for j in stack:
if j:
temp.append(j.left)
temp.append(j.right)
stack = temp
return Solution.FLAG
# 二刷,层序遍历
class Solution:
def isSymmetric(self, root) -> bool:
def is_symmetric(nums):
return nums == nums[::-1]
stack = [root]
while stack:
res = []
temp = []
for i in stack:
if i:
res.append(i.val)
temp.append(i.left)
temp.append(i.right)
else:res.append(None)
flag = is_symmetric(res)
if not flag:
return False
stack = temp
return True
t=Tree()
[t.add(i)for i in [1,2,2,None,3,None,3]]
a=Solution().isSymmetric(t.root)
print(a)
|
[
"[email protected]"
] | |
d7cf184777adc0f7980c16fcc2f6eabb750712be
|
6c14069181f313e84eeb524dd495e3882156ef50
|
/samples/basic/crud/models/cisco-ios-xr/Cisco-IOS-XR-mpls-ldp-oper/nc-read-xr-mpls-ldp-oper-10-ydk.py
|
1e872ab64c188f2aa654c40dc1d1d96d25dd113a
|
[
"Apache-2.0"
] |
permissive
|
decolnz/ydk-py-samples
|
dde0fd64fd4df12a215588766a0f1fb8baf07fcd
|
7fa3f53c4d458c3332d372fb2fe3c46c5e036f07
|
refs/heads/master
| 2021-01-19T03:24:19.877929 | 2017-04-04T17:16:46 | 2017-04-04T17:16:46 | 87,310,389 | 1 | 0 | null | 2017-04-05T13:06:57 | 2017-04-05T13:06:57 | null |
UTF-8
|
Python
| false | false | 2,714 |
py
|
#!/usr/bin/env python
#
# Copyright 2016 Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Read all data for model Cisco-IOS-XR-mpls-ldp-oper.
usage: nc-read-xr-mpls-ldp-oper-10-ydk.py [-h] [-v] device
positional arguments:
device NETCONF device (ssh://user:password@host:port)
optional arguments:
-h, --help show this help message and exit
-v, --verbose print debugging messages
"""
from argparse import ArgumentParser
from urlparse import urlparse
from ydk.services import CRUDService
from ydk.providers import NetconfServiceProvider
from ydk.models.cisco_ios_xr import Cisco_IOS_XR_mpls_ldp_oper \
as xr_mpls_ldp_oper
import logging
def process_mpls_ldp(mpls_ldp):
"""Process data in mpls_ldp object."""
pass
if __name__ == "__main__":
"""Execute main program."""
parser = ArgumentParser()
parser.add_argument("-v", "--verbose", help="print debugging messages",
action="store_true")
parser.add_argument("device",
help="NETCONF device (ssh://user:password@host:port)")
args = parser.parse_args()
device = urlparse(args.device)
# log debug messages if verbose argument specified
if args.verbose:
logger = logging.getLogger("ydk")
logger.setLevel(logging.DEBUG)
handler = logging.StreamHandler()
formatter = logging.Formatter(("%(asctime)s - %(name)s - "
"%(levelname)s - %(message)s"))
handler.setFormatter(formatter)
logger.addHandler(handler)
# create NETCONF provider
provider = NetconfServiceProvider(address=device.hostname,
port=device.port,
username=device.username,
password=device.password,
protocol=device.scheme)
# create CRUD service
crud = CRUDService()
mpls_ldp = xr_mpls_ldp_oper.MplsLdp() # create object
# read data from NETCONF device
# mpls_ldp = crud.read(provider, mpls_ldp)
process_mpls_ldp(mpls_ldp) # process object data
provider.close()
exit()
# End of script
|
[
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.