blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2d5e608579841a44031e64c373d497d78288e98e
|
80f2fa4f1f4d56eef9471174f80b62838db9fc3b
|
/xdl/xdl/python/backend/mxnet/convert_utils.py
|
1182f929bb080337b4f5368b36c8a279477309a7
|
[
"Apache-2.0"
] |
permissive
|
laozhuang727/x-deeplearning
|
a54f2fef1794274cbcd6fc55680ea19760d38f8a
|
781545783a4e2bbbda48fc64318fb2c6d8bbb3cc
|
refs/heads/master
| 2020-05-09T17:06:00.495080 | 2019-08-15T01:45:40 | 2019-08-15T01:45:40 | 181,295,053 | 1 | 0 |
Apache-2.0
| 2019-08-15T01:45:41 | 2019-04-14T10:51:53 |
PureBasic
|
UTF-8
|
Python
| false | false | 3,191 |
py
|
# Copyright 2018 Alibaba Group. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import mxnet as mx
import numpy as np
from mxnet.initializer import One
from mxnet.initializer import Zero
from mxnet.initializer import Constant
from xdl.python.lib.datatype import DataType as xt
from xdl.python.lib.tensorshape import TensorShape as xts
class MX2XDL(object):
@staticmethod
def convert_shape(shape):
return xts(list(shape))
@staticmethod
def convert_type(dtype):
if dtype == np.int16:
return xt.int16
if dtype == np.int32:
return xt.int32
elif dtype == np.int64:
return xt.int64
elif dtype == np.float32 or dtype is None:
return xt.float
elif dtype == np.float64:
return xt.double
else:
raise Exception("unsupported datatype:", dtype)
@staticmethod
def convert_initializer(initializer, args):
import xdl.python.ops.init_ops as xi
if initializer is None or initializer == '':
return xi.Zeros()
elif initializer == 'one':
return xi.Ones()
elif initializer == 'zero':
return xi.Zeros()
elif initializer == 'constant' or initializer == 'Constant':
return xi.Constant(value=args['value'])
elif initializer == 'uniform':
scale = 0.07
if args.has_key('scale'):
scale = args['scale']
return xi.UniformUnitScaling(factor=scale)
elif initializer == 'normal':
sigma = 0.01
if args.has_key('sigma'):
sigma = args['sigma']
return xi.TruncatedNormal(stddev=sigma)
elif initializer == 'identity':
param = []
if args.has_key('init_value'):
param = args['init_value']
return xi.Identity(np.array(param, dtype=np.float32))
else:
raise Exception('unsupport mxnet initializer:' + initializer)
class XDL2MX(object):
@staticmethod
def convert_type(dtype):
if dtype == xt.int16:
return 'int16'
elif dtype == xt.int32:
return 'int32'
elif dtype == xt.int64:
return 'int64'
elif dtype == xt.float:
return 'float32'
elif dtype == xt.double:
return 'float64'
else:
raise Exception("unsupported datatype:", dtype)
|
[
"[email protected]"
] | |
8470b45483b504a9ac0a11ddad19a85fd67badf5
|
fcde32709c62b8ee86da459bb7c8eee52c848118
|
/爬虫1905/day07/07_maoyanspider.py
|
a58245ebcd950ce02c7237e81b5a8a7f1daa3da5
|
[] |
no_license
|
klaus2015/py_base
|
6b92d362c3d7dc0e09205a037f4d580381dac94d
|
ec32c731c1c2f6a0dab87f1d167397e4fa86b8de
|
refs/heads/master
| 2022-07-28T15:49:30.383648 | 2020-05-11T15:31:43 | 2020-05-11T15:31:43 | 261,777,278 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 619 |
py
|
from selenium import webdriver
url = 'https://maoyan.com/board/4'
browser = webdriver.Chrome()
browser.get(url)
# 基准xpath: [<selenium xxx li at xxx>,<selenium xxx li at>]
li_list = browser.find_elements_by_xpath('//*[@id="app"]/div/div/div[1]/dl/dd')
for li in li_list:
item = {}
# info_list: ['1', '霸王别姬', '主演:张国荣', '上映时间:1993-01-01', '9.5']
info_list = li.text.split('\n')
item['number'] = info_list[0]
item['name'] = info_list[1]
item['star'] = info_list[2]
item['time'] = info_list[3]
item['score'] = info_list[4]
print(item)
|
[
"[email protected]"
] | |
852490d729b985e69d687a1f0ed8e5043d18b59a
|
bc539788b876773e294383863252c1637de9eb7f
|
/scrapy/PycharmProjects/Reptile/automation_Testing/pageobjects/primeur.py
|
b5765198c8c00001d1762535bb5b9cbf98cb42fa
|
[] |
no_license
|
umsung/scrapy
|
4eb56bf74f3e617e49dcdec61cf77010eb912f4f
|
deacd9f289159c5af114b0dd3110448ad7eb43e8
|
refs/heads/master
| 2020-05-31T14:11:46.530793 | 2019-10-16T01:32:25 | 2019-10-16T01:32:25 | 190,321,772 | 3 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,548 |
py
|
from framework.base_page import BasePage
import time
from selenium.webdriver.common.by import By
import random
from framework.logger import *
logger = Logger('Primeur').getlog()
class Primeur(BasePage):
navbox = (By.XPATH, "//*[@class='navbox']/a[4]")
gopage = (By.XPATH, '//*[@id="gopage"]')
qj_sort = (By.XPATH, "//*[@class='qj-sort']/ul/li[9]")
addExpectcart = (By.CLASS_NAME, "addExpectcart")
lazy = (By.CLASS_NAME, "lazy")
pay_btn = (By.XPATH, '//*[@id="Pay"]')
addcart_detail = (By.XPATH, "//*[@id='AddCart']")
priceS = (By.XPATH, '//*[@class="price-v"]')
ReceiveEmail = (By.XPATH, "//*[@id='ReceiveEmail']")
checkexpect = (By.XPATH, "//*[@id='checkexpect']")
num = (By.XPATH, '//*[@class="order-amount"]')
spanPay = (By.XPATH, '//*[@id="spanPay"]')
btnToPay = (By.XPATH, "//*[@id='btnToPay']")
btn_payment = (By.XPATH, "//*[@class='btn-payment']")
pay_zfb = (By.XPATH, "//*[@class='pay-zfb']")
pay_cft = (By.XPATH, "//*[@class='pay-cft']")
pri_input = (By.XPATH, "//*[@class='st-out']/input[1]")
pri_submit = (By.XPATH, "//*[@class='st-out']/input[2]")
def primeur_buy(self, email):
self.find_element(*self.navbox).click()
self.expected_conditions(self.gopage)
self.find_element(*self.qj_sort).click()
time.sleep(1.5)
self.find_element(*self.qj_sort).click()
time.sleep(1.5)
self.find_elements(*self.addExpectcart)[random.randint(0, 49)].click()
time.sleep(1)
self.find_elements(*self.lazy)[random.randint(0, 49)].click()
self.switch_to(1)
self.expected_conditions(self.addcart_detail).click()
price = self.expected_conditions(self.priceS).text
price = price.replace('¥', '')
self.expected_conditions(self.pay_btn).click()
self.expected_conditions(self.ReceiveEmail).clear()
time.sleep(0.5)
self.find_element(*self.ReceiveEmail).send_keys(email)
self.expected_conditions(self.checkexpect).click()
order_amount = self.expected_conditions(self.num).text
t_price = self.expected_conditions(self.spanPay).text
t_price = t_price.replace(',', '')
if float(price) * int(order_amount) == float(t_price):
logger.info('期酒价格正确:{}'.format(price))
else:
logger.info('期酒价格错误:{}'.format(price))
self.get_windows_img()
self.expected_conditions(self.btnToPay).click()
time.sleep(1)
# 默认微信支付方式
self.expected_conditions(self.btn_payment).click()
time.sleep(1)
self.back()
# 支付宝支付
self.expected_conditions(self.pay_zfb).click()
self.expected_conditions(self.btn_payment).click()
time.sleep(1)
self.back()
# 财付通支付
self.expected_conditions(self.pay_cft).click()
self.expected_conditions(self.btn_payment).click()
time.sleep(1)
self.close()
self.switch_to(0)
def primeur_search(self, text):
self.expected_conditions(self.pri_input).send_keys(text)
self.expected_conditions(self.pri_submit).click()
self.move_to(*self.navbox)
self.find_elements(By.XPATH, "//*[@class='extend-qj']/dl//a")[random.randint(0, 7)].click()
self.expected_conditions((By.XPATH, "//*[@class='qj-sort']/ul/li[9]")).click()
|
[
"[email protected]"
] | |
9caa0a55e144b6e14c9cb2a644b72b93caec68d8
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02407/s588681694.py
|
198ea6576032a9e20876cd8043d50eef73ac6a9b
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 182 |
py
|
num = int(input())
data = list(map(int, input().split()))
lengh = len(data)
for tmp in range(lengh):
if tmp == lengh-1:
print(data.pop(-1))
else:
print(data.pop(-1), end=" ")
|
[
"[email protected]"
] | |
c9b64c4bfdc00788592a94875e52019ca0453b03
|
2451f5297cdad588f5c1450336bf4de7cd38ebd8
|
/hotline/styles/__init__.py
|
a1af3599f2045b6c43336f947b7e68c0dffaddfa
|
[
"MIT"
] |
permissive
|
danbradham/hotline
|
45aebfa2e3ef53b5782dfcd006351daeed8b45ac
|
267037d2b783f2fd5ed9ad16afaad9a51e821a5f
|
refs/heads/main
| 2021-12-15T02:05:22.577499 | 2021-07-03T13:12:11 | 2021-07-03T13:12:11 | 11,076,114 | 16 | 3 | null | null | null | null |
UTF-8
|
Python
| false | false | 353 |
py
|
# -*- coding: utf-8 -*-
import os
import sys
from glob import glob
this_module = sys.modules[__name__]
this_package = os.path.dirname(__file__)
for file in glob(os.path.join(this_package, '*.css')):
with open(file, 'r') as f:
data = f.read()
style_name = os.path.basename(file).split('.')[0]
setattr(this_module, style_name, data)
|
[
"[email protected]"
] | |
b1159fafc24bc513627ba31c35d9f0208fb1d6a7
|
d2a818967193f8f7f9e980ef5ba7decea6cb1065
|
/L1Trigger/L1TMuonEndCap/python/fakeEmtfParams_2017_MC_cff.py
|
3343034d1d16d8aaf7170908508f36e31701472f
|
[
"Apache-2.0"
] |
permissive
|
panoskatsoulis/cmssw
|
1f5bfc6664856032db6609fad1b793d63b31afa6
|
5e32e53f9a775ea197e83fdb1462f99d4c9cb1a9
|
refs/heads/l1t-integration-CMSSW_9_2_8
| 2022-08-14T15:22:14.881299 | 2017-09-01T06:17:20 | 2017-09-01T06:17:20 | 102,378,833 | 0 | 1 | null | 2021-02-12T12:09:50 | 2017-09-04T16:01:18 |
C++
|
UTF-8
|
Python
| false | false | 1,459 |
py
|
import FWCore.ParameterSet.Config as cms
## Fills CondFormats from the database
from CondCore.CondDB.CondDB_cfi import CondDB
CondDB.connect = cms.string("frontier://FrontierProd/CMS_CONDITIONS")
## Fills firmware, pT LUT, and PC LUT versions manually
emtfParamsSource = cms.ESSource(
"EmptyESSource",
recordName = cms.string('L1TMuonEndcapParamsRcd'),
iovIsRunNotTime = cms.bool(True),
firstValid = cms.vuint32(1)
)
emtfParams = cms.ESProducer(
"L1TMuonEndCapParamsESProducer",
## Version 7 was deployed June 8, 2017
PtAssignVersion = cms.int32(7),
## 123456 is default (most up-to-date) firmware version
FirmwareVersion = cms.int32(123456),
## v1 corresponds to data/emtf_luts/ph_lut_v2, used at the beginning of 2017
PrimConvVersion = cms.int32(1)
)
## Fills pT LUT XMLs ("forests") from the database
emtfForestsSource = cms.ESSource(
"EmptyESSource",
recordName = cms.string('L1TMuonEndCapForestRcd'),
iovIsRunNotTime = cms.bool(True),
firstValid = cms.vuint32(1)
)
emtfForestsDB = cms.ESSource(
"PoolDBESSource",
CondDB,
toGet = cms.VPSet(
cms.PSet(
## https://cms-conddb.cern.ch/cmsDbBrowser/search/Prod/L1TMuonEndCapForest
record = cms.string("L1TMuonEndCapForestRcd"),
## v7 EMTF pT LUTs from June 8, 2017
tag = cms.string("L1TMuonEndCapForest_static_Sq_20170613_v7_mc")
)
)
)
|
[
"[email protected]"
] | |
fa124ae1000dfb25e11780f6a3e0bfed4690739f
|
c6c61ae056151292b84cb8840bc90120bdea0152
|
/payment_bridge/tests/common.py
|
1a1c907589d6f1860c4644e7e52c44c1170984d4
|
[] |
no_license
|
zbyte64/active_merchant_compat
|
a61bd0a1dbdbd2e76af71264aff0cefc606f1cfc
|
e9a95563c8c7afec684b13ff40836a8177c3a0f2
|
refs/heads/master
| 2021-01-25T08:55:06.816324 | 2012-11-28T19:54:51 | 2012-11-28T19:54:51 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,759 |
py
|
import base64
import json
import unittest
import yaml
import os
from payment_bridge.wsgi import BaseDirectPostApplication
global_config = {}
inpath = os.path.join(os.getcwd(), 'gateways.yaml')
if os.path.exists(inpath):
infile = open(inpath)
global_config = yaml.load(infile) or {}
else:
print "Please create the following file with gateway credentials:", inpath
class BaseTestDirectPostApplication(BaseDirectPostApplication):
def __init__(self, **kwargs):
self.gateway = kwargs.pop('gateway')
super(BaseTestDirectPostApplication, self).__init__(**kwargs)
def load_gateways_config(self):
return [self.gateway]
def decrypt_data(self, encrypted_data):
"""
Takes an encoded string and returns a dictionary
"""
return json.loads(base64.b64decode(encrypted_data))
def encrypt_data(self, params):
"""
Takes a dictionary and returns a string
"""
return base64.b64encode(json.dumps(params))
class PaymentData(object):
cc_info = {
'cc_number':'4111 1111 1111 1111',
'cc_exp_year': '2015',
'cc_exp_month': '11',
'cc_ccv': '111',
'bill_first_name':'John',
'bill_last_name': 'Smith',
}
bill_address = {
'bill_first_name':'John',
'bill_last_name': 'Smith',
'bill_address1':'5555 Main St',
'bill_address2':'',
'bill_city':'San Diego',
'bill_state':'CA',
'bill_country':'US',
'bill_zip':'92101',
'bill_email':'[email protected]',
}
ship_address = {
'ship_first_name':'John',
'ship_last_name': 'Smith',
'ship_address1':'5555 Main St',
'ship_address2':'',
'ship_city':'San Diego',
'ship_state':'CA',
'ship_country':'US',
'ship_zip':'92101',
'ship_email':'[email protected]',
}
def get_cc_info(self):
return dict(self.cc_info)
def get_bill_address(self):
return dict(self.bill_address)
def get_bill_info(self):
info = self.get_cc_info()
info.update(self.bill_address)
return info
def get_ship_address(self):
return dict(self.ship_address)
def get_all_info(self):
info = self.get_bill_info()
info.update(self.ship_address)
return info
class BaseGatewayTestCase(unittest.TestCase):
gateway = {}
def setUp(self):
self.checkGatewayConfigured()
gateway = dict(self.gateway)
gateway['params'] = self.read_gateway_params()
self.application = BaseTestDirectPostApplication(redirect_to='http://localhost:8080/direct-post/', gateway=gateway)
self.data_source = PaymentData()
def tearDown(self):
self.application.shutdown()
def read_gateway_params(self):
return global_config.get(self.gateway['module'], None)
def get_supported_actions(self):
if not hasattr(self, '_supported_actions'):
#calling a gateway with action = None is a request for the supported actions
response = self.application.call_bridge(data=None, secure_data=None, gateway='test', action=None)
if response['message'] == 'Unrecognized gateway':
self.skipTest(response['message'])
self._supported_actions = response['supported_actions']
return self._supported_actions
def checkGatewayConfigured(self):
if self.read_gateway_params() == None:
self.skipTest("Gateway unconfigured")
def checkGatewaySupport(self, action):
if not action in self.get_supported_actions():
self.skipTest("Unsupported action: %s" % action)
|
[
"[email protected]"
] | |
e3f10a7582e6fc5d779950e44c40a5806c9fe248
|
b7f45072d056b80ed49e6bcde91877d8576e970d
|
/ImageJ/py/test_close_non_image_window.py
|
2cd2c6e0e90d945001ede2dac8896cf07f92104b
|
[] |
no_license
|
jrminter/tips
|
128a18ee55655a13085c174d532c77bcea412754
|
f48f8b202f8bf9e36cb6d487a23208371c79718e
|
refs/heads/master
| 2022-06-14T08:46:28.972743 | 2022-05-30T19:29:28 | 2022-05-30T19:29:28 | 11,463,325 | 5 | 8 | null | 2019-12-18T16:24:02 | 2013-07-17T00:16:43 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 470 |
py
|
from ij import IJ, ImagePlus, WindowManager, Prefs, ImageStack
import jmFijiGen as jmg
IJ.run("Close All")
# load an image and create a Result Window and a ROI Manager
imp = IJ.openImage("http://imagej.nih.gov/ij/images/blobs.gif")
imp.show()
IJ.setAutoThreshold(imp, "Default");
IJ.run("Convert to Mask")
IJ.run(imp, "Analyze Particles...", "display exclude clear add")
jmg.close_open_non_image_window("Results")
jmg.close_open_non_image_window("ROI Manager")
|
[
"[email protected]"
] | |
af3a768529efb8bb50385450db2321e290882c18
|
f0d583a064cc53510d8b00b42ac869832e70bf41
|
/facerecognition/evaluate/FaceCropper.py
|
6826195a7621246c2a234747f1b31805e68464ca
|
[] |
no_license
|
PaulZoni/nn
|
918d543b4b2d955ff991da70ce4e88d4d94d13c8
|
25a81579499c893584b040f536ddbef254197f4e
|
refs/heads/master
| 2020-04-27T19:05:10.968050 | 2019-06-27T12:22:16 | 2019-06-27T12:22:16 | 174,564,933 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,879 |
py
|
import cv2
import numpy as np
class FaceCropper(object):
CASCADE_PATH = "/home/pavel/PycharmProjects/nn/facerecognition/evaluate/haarcascade_frontalface_default.xml"
frontal_face_extended = "/home/pavel/PycharmProjects/nn/facerecognition/evaluate/haarcascade_frontalcatface_extended.xml"
def __init__(self):
self.face_cascade = cv2.CascadeClassifier(self.CASCADE_PATH)
def generate(self, image_path=None, show_result=None, size=32, inter=cv2.INTER_AREA, frame=None):
img = None
if frame is None:
img = cv2.imread(image_path)
else:
img = frame
if img is None and frame is None:
print("Can't open image file")
return 0
print(len(img))
faces = self.face_cascade.detectMultiScale(img, 1.1, 3, minSize=(100, 100),)
if faces is None:
print('Failed to detect face')
return 0
if show_result:
for (x, y, w, h) in faces:
cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2)
image_resize = cv2.resize(img, (960, 540))
cv2.imshow('img', image_resize)
cv2.waitKey(0)
cv2.destroyAllWindows()
facecnt = len(faces)
print("Detected faces: %d" % facecnt)
if facecnt is 0:
return 0
i = 0
height, width = img.shape[:2]
last_images = []
for (x, y, w, h) in faces:
r = max(w, h) / 2
centerx = x + w / 2
centery = y + h / 2
nx = int(centerx - r)
ny = int(centery - r)
nr = int(r * 2)
faceimg = img[ny:ny + nr, nx:nx + nr]
lastimg = cv2.resize(faceimg, (size, size), interpolation=inter)
i += 1
last_images.append(lastimg)
return np.array(last_images)
|
[
"[email protected]"
] | |
e874ce17a476b6813ee430fd51b64ccbb202365f
|
e174e13114fe96ad2a4eeb596a3d1c564ae212a8
|
/Python for Finance Analyze Big Financial Data by Y. Hilpisch/Code of Python For Finance/4375OS_03_Code/4375OS_03_22_dir2_default_input_value.py
|
2c4bf3307842ec868e3d734bbc87c545bf5e7179
|
[] |
no_license
|
Kevinqian0501/python_books
|
c1a7632d66dceb46db439f7cbed86d85370aab42
|
0691e4685af03a296aafb02447e3585db55ce461
|
refs/heads/master
| 2021-08-30T19:27:03.985464 | 2017-12-19T05:56:31 | 2017-12-19T05:56:31 | 104,145,012 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 372 |
py
|
"""
Name : 4375OS_03_22_dir2_default_input_value.py
Book : Python for Finance
Publisher: Packt Publishing Ltd.
Author : Yuxing Yan
Date : 12/25/2013
email : [email protected]
[email protected]
"""
# with a default input value
def dir2(path='c:\python32'):
from os import listdir
print(listdir(path))
|
[
"[email protected]"
] | |
0e266919ea78b49136e3fa48756b2e0ad863ee7f
|
6390a7f030cc6b2ff61237e41360af2d270e1efb
|
/tests/numpy/type.py
|
bf439e924a092a27e2e2c855344ea182de7bcdd3
|
[
"MIT"
] |
permissive
|
davidkellis/py2rb
|
b999ca4c3b9316d19ac42c6d57fbbc158ee35700
|
4518a1549cfacc25a1ea3c736bca3de15a123878
|
refs/heads/master
| 2023-06-22T05:32:16.209823 | 2021-07-05T01:55:53 | 2021-07-05T01:55:53 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 231 |
py
|
# coding: utf-8
import numpy as np
print(np.int8)
print(np.int16)
print(np.int32)
print(np.int64)
print(np.int)
print(np.uint8)
print(np.uint16)
print(np.uint32)
print(np.uint64)
print(np.uint)
print(np.float32)
print(np.float64)
|
[
"[email protected]"
] | |
1343447d884966d58eae60eff8a5d897df8e129a
|
0aec617440075b73e5da64cd1477b6a098ed864c
|
/data_structures/recursion/binary_search.py
|
496f81ebb3311fb3e671c99933525b63e1203629
|
[
"MIT"
] |
permissive
|
severian5it/udacity_dsa
|
0b1512cc8c5125149d6be6f78fa14446e7ab5c25
|
e47f27b0179961d6107fe46a236ac7d887fe6816
|
refs/heads/main
| 2023-03-07T02:24:37.299599 | 2021-02-14T10:34:50 | 2021-02-14T10:34:50 | 316,949,338 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 533 |
py
|
#bear in mind, this is log(n)
def binary_search(arr, target):
return binary_search_func(arr, 0, len(arr) - 1, target)
def binary_search_func(arr, start_index, end_index, target):
if start_index > end_index:
return -1
mid_index = (start_index + end_index) // 2
if arr[mid_index] == target:
return mid_index
elif arr[mid_index] > target:
return binary_search_func(arr, start_index, mid_index - 1, target)
else:
return binary_search_func(arr, mid_index + 1, end_index, target)
|
[
"[email protected]"
] | |
2bc186d49fd3741a5945895a8313e016d372f690
|
d10724d15f2888c5d2de8abb340995aa2a2074b9
|
/examples/python/src/07fizzbuzz/main.py
|
a7bacd972b2efa6b474803bcf2f437439b106265
|
[
"MIT"
] |
permissive
|
podhmo/prestring
|
5849e7f7de3626e8a1f48740190d98cd55bd3721
|
8a3499377d1b1b2b180809b31bd7536de5c3ec4d
|
refs/heads/master
| 2021-07-16T06:35:10.555681 | 2021-03-28T05:35:37 | 2021-03-28T05:35:37 | 31,548,112 | 10 | 1 |
MIT
| 2021-03-28T05:27:35 | 2015-03-02T15:53:34 |
Python
|
UTF-8
|
Python
| false | false | 292 |
py
|
def fizzbuzz(n: int) -> str:
if n % 3 == 0 and n % 5 == 0:
return "fizzbuzz"
elif n % 3 == 0:
return "fizz"
elif n % 5 == 0:
return "buzz"
else:
return str(n)
if __name__ == "__main__":
print(", ".join(fizzbuzz(i) for i in range(1, 21)))
|
[
"[email protected]"
] | |
694a55ffe10f4262a60d4c2029e30a6b57a22ff9
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_200/499.py
|
b15fef3836eebfaaa053844940f5b5fa956d25de
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 514 |
py
|
for _ in xrange(input()):
print "Case #%d:" % (_+1),
n = raw_input()
l = len(n)
nn = map(int, n)
def dfs(c, less, st):
if c == l:
return int(st)
if less:
v = dfs(c+1, 1, st + '9')
else:
v = 0
if c == l-1 or nn[c] <= nn[c+1]:
v = max(v, dfs(c+1, 0, st + n[c]))
if c == 0 or nn[c-1] <= nn[c]-1:
v = max(v, dfs(c+1, 1, st + str(nn[c]-1)))
return v
print dfs(0, 0, "")
|
[
"[email protected]"
] | |
45b7193b9e36e0ceb7d6cdceeb758a380ea8adb4
|
ac5e52a3fc52dde58d208746cddabef2e378119e
|
/exps-gsn-edf/gsn-edf_ut=3.5_rd=0.5_rw=0.04_rn=4_u=0.075-0.35_p=harmonic-2/sched=RUN_trial=75/sched.py
|
580c5a476d4a3ce082bab13eb8366ff4f2034cf6
|
[] |
no_license
|
ricardobtxr/experiment-scripts
|
1e2abfcd94fb0ef5a56c5d7dffddfe814752eef1
|
7bcebff7ac2f2822423f211f1162cd017a18babb
|
refs/heads/master
| 2023-04-09T02:37:41.466794 | 2021-04-25T03:27:16 | 2021-04-25T03:27:16 | 358,926,457 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 349 |
py
|
-X FMLP -Q 0 -L 2 84 250
-X FMLP -Q 0 -L 2 79 400
-X FMLP -Q 0 -L 2 66 300
-X FMLP -Q 1 -L 1 54 250
-X FMLP -Q 1 -L 1 50 250
-X FMLP -Q 1 -L 1 49 400
-X FMLP -Q 2 -L 1 41 300
-X FMLP -Q 2 -L 1 37 125
-X FMLP -Q 3 -L 1 35 250
-X FMLP -Q 3 -L 1 31 300
30 125
28 125
26 300
25 100
21 100
19 125
15 175
11 100
10 100
7 100
|
[
"[email protected]"
] | |
2adfa7d968a07dd30d191878d89081daf3f7949b
|
c7e028d71b5dd72eb18b72c6733e7e98a969ade6
|
/src/demos/datastructures/fifo.py
|
74444fc181f799a0428cb21e7b27d0e754254573
|
[
"MIT"
] |
permissive
|
antoniosarosi/algoritmia
|
da075a7ac29cc09cbb31e46b82ae0b0ea8ee992f
|
22b7d61e34f54a3dee03bf9e3de7bb4dd7daa31b
|
refs/heads/master
| 2023-01-24T06:09:37.616107 | 2020-11-19T16:34:09 | 2020-11-19T16:34:09 | 314,302,653 | 8 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 341 |
py
|
#coding: latin1
#< full
from algoritmia.datastructures.queues import Fifo
dfltFifo = Fifo([0, 1])
listBasedFifo = Fifo([0, 1], createList=lambda data: list(data))
for i in range(2, 6):
listBasedFifo.push(i)
dfltFifo.push(i)
while len(listBasedFifo) > 0:
print(dfltFifo.pop(), listBasedFifo.pop(), end=" : ")
#> full
|
[
"amarzal@localhost"
] |
amarzal@localhost
|
b64d842a5f0f64d7ae91f197a6e0a98a5a0be31d
|
f7a474af31989a7492411b9e18ba76d3c1527029
|
/Day-18/DjangoForm/views.py
|
b64cc455fe609dd5123bb340d8d00f2c33eeac6a
|
[] |
no_license
|
chikkalarameshsaikumar/Django-TOT
|
01fa4190ca7d2c23e3e0d74e704037babd5b3217
|
fb91bb6b2db306b1379f2c00f8d5d27e9b5821f2
|
refs/heads/main
| 2023-02-05T00:13:03.310573 | 2020-12-25T11:10:03 | 2020-12-25T11:10:03 | 339,008,757 | 0 | 1 | null | 2021-02-15T08:18:18 | 2021-02-15T08:18:18 | null |
UTF-8
|
Python
| false | false | 1,545 |
py
|
from django.shortcuts import render,redirect
from django.http import HttpResponse
# Create your views here.
# from DjangoForm.forms import DynamicHtmlFormGen, RegisterForm
from .models import Register
from .forms import Reg
def registerForm(request):
if request.method=='POST':
#data = request.POST
#print(data)
# name = data['name']
# print(name)
f = RegisterForm(request.POST)
f.save()
return HttpResponse("record inserted successfully...")
f = RegisterForm()
return render(request,'DjangoForm/registerForm.html',{"f":f})
def fetchAll(request):
data = Register.objects.all()
#print(data)
#return HttpResponse('check in cmd')
return render(request,'DjangoForm/fetchAll.html',{'data':data})
def dynamicHtmlFormGen(request):
# return HttpResponse("hi i am working fine")
t = DynamicHtmlFormGen()
return render(request,'DjangoForm/dynamicHtmlFormGen.html',{'form':t})
def home(request):
return render(request,'DjangoForm/home.html')
def rgform(request):
if request.method == "POST":
y = Reg(request.POST)
if y.is_valid():
# print(y)
y.save()
return redirect("/")
y = Reg()
return render(request,'DjangoForm/register.html',{'tg':y})
def fetchall(request):
t = Register.objects.all()
return render(request,'DjangoForm/fetch.html',{'y':t})
def upd(request,id):
a = Register.objects.get(id=id)
if request.method == "POST":
w = Reg(request.POST,instance=a)
if w.is_valid():
w.save()
return redirect('/ft')
w = Reg(instance=a)
return render(request,'DjangoForm/update.html',{'t':w})
|
[
"[email protected]"
] | |
fc34da7d0a63f931eb43704be15efd3f638678f9
|
650b3dd4cc74f32db78f7d99cef9907aec78a222
|
/dialogs/tools/fDepreciation_data.py
|
832cfd0bbed68f7aae6e702a9f8b189942aee073
|
[] |
no_license
|
mech4/PKTrx
|
29b871ab587434e7c208175c248f48d9b6c80a17
|
cf01bc5be8837d632974786d2419c58b94a0381d
|
refs/heads/master
| 2020-03-29T19:55:07.331831 | 2012-09-18T20:22:52 | 2012-09-18T20:22:52 | 6,289,691 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 525 |
py
|
import sys
import com.ihsan.foundation.pobjecthelper as phelper
def FormSetDataEx(uideflist,params):
config = uideflist.config
uipData = uideflist.uipData.Dataset.AddRecord()
app = config.AppObject
res = app.rexecscript('accounting','appinterface/AccountingDay.GetLastCloseDate',app.CreateValues())
rec = res.FirstRecord
if rec.Is_Err : raise '',rec.Err_Message
LastCloseDate = int(rec.LastCloseDate)
uipData.LastCloseDate = LastCloseDate
uipData.ProcessDate = LastCloseDate + 1
|
[
"[email protected]"
] | |
e7591c29d28eb94dede0687778c05ae5ebba9be1
|
b08870f8fe7b3cf1bbab3c52a7bacbb36ee1dcc6
|
/verp/hr/doctype/department/department.py
|
78df3a770042e793040a9911f00f7b77bfe97d92
|
[] |
no_license
|
vsadminpk18/verpfinalversion
|
7148a64fe6134e2a6371470aceb1b57cc4b5a559
|
93d164b370ad9ca0dd5cda0053082dc3abbd20da
|
refs/heads/master
| 2023-07-13T04:11:59.211046 | 2021-08-27T06:26:48 | 2021-08-27T06:26:48 | 400,410,611 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,302 |
py
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils.nestedset import NestedSet, get_root_of
from verp.utilities.transaction_base import delete_events
from frappe.model.document import Document
class Department(NestedSet):
nsm_parent_field = 'parent_department'
def autoname(self):
root = get_root_of("Department")
if root and self.department_name != root:
self.name = get_abbreviated_name(self.department_name, self.company)
else:
self.name = self.department_name
def validate(self):
if not self.parent_department:
root = get_root_of("Department")
if root:
self.parent_department = root
def before_rename(self, old, new, merge=False):
# renaming consistency with abbreviation
if not frappe.get_cached_value('Company', self.company, 'abbr') in new:
new = get_abbreviated_name(new, self.company)
return new
def on_update(self):
if not frappe.local.flags.ignore_update_nsm:
super(Department, self).on_update()
def on_trash(self):
super(Department, self).on_trash()
delete_events(self.doctype, self.name)
def on_doctype_update():
frappe.db.add_index("Department", ["lft", "rgt"])
def get_abbreviated_name(name, company):
abbr = frappe.get_cached_value('Company', company, 'abbr')
new_name = '{0} - {1}'.format(name, abbr)
return new_name
@frappe.whitelist()
def get_children(doctype, parent=None, company=None, is_root=False):
condition = ''
var_dict = {
"name": get_root_of("Department"),
"parent": parent,
"company": company,
}
if company == parent:
condition = "name=%(name)s"
elif company:
condition = "parent_department=%(parent)s and company=%(company)s"
else:
condition = "parent_department = %(parent)s"
return frappe.db.sql("""
select
name as value,
is_group as expandable
from `tab{doctype}`
where
{condition}
order by name""".format(doctype=doctype, condition=condition), var_dict, as_dict=1)
@frappe.whitelist()
def add_node():
from frappe.desk.treeview import make_tree_args
args = frappe.form_dict
args = make_tree_args(**args)
if args.parent_department == args.company:
args.parent_department = None
frappe.get_doc(args).insert()
|
[
"[email protected]"
] | |
212ae839fc4995842e57d2a227c3fc5d77dc51fb
|
8a58b02b1dfc97bf56a5fd94732316c032e24a70
|
/api/tests.py
|
d76ab163735695925faa78e7a7a3345bf8ab58bb
|
[] |
no_license
|
momentum-team-2/example--django-recipebook
|
ab04d4957268ed8251e84d8a09cfc60a138c9d9f
|
4a4e17c396fcc9f4c648cea494c4ae6d5dc5e570
|
refs/heads/main
| 2022-11-28T13:40:13.301591 | 2020-08-05T14:09:55 | 2020-08-05T14:09:55 | 279,464,956 | 0 | 0 | null | 2023-09-04T18:58:14 | 2020-07-14T02:50:58 |
Python
|
UTF-8
|
Python
| false | false | 762 |
py
|
from django.test import TestCase
from rest_framework.test import APIClient
from users.models import User
from rest_framework.authtoken.models import Token
# Create your tests here.
class RecipesAPITestCase(TestCase):
def test_user_is_added_to_recipe_on_creation(self):
user = User.objects.create(username="test")
token = Token.objects.filter(user=user).first()
client = APIClient()
client.credentials(HTTP_AUTHORIZATION="Token " + token.key)
response = client.post(
"/api/recipes/",
{"title": "Test Recipe", "ingredients": [], "steps": []},
format="json",
)
self.assertEqual(response.status_code, 201)
self.assertEqual(response.data["user"], user.username)
|
[
"[email protected]"
] | |
718559c2ac4ab854f51d624b912324dcf7fe2be7
|
20b76d0a9a2d31ec929ffcdb082931201b58361f
|
/homework/2020-09-20/2020-09-20-杨婷婷.py
|
46f6234d029e6c75645919444ff24e57147ec43e
|
[] |
no_license
|
yangtingting123456/interfaceiframe
|
3a6ff3f386cb98dcf7849ea3ab52a8ce93c6d306
|
12fc9ec2366f220a5cb1ce51c3a6a9ad7316316e
|
refs/heads/master
| 2023-01-02T00:13:53.878122 | 2020-10-26T06:08:16 | 2020-10-26T06:08:16 | 306,569,037 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,012 |
py
|
# 1、charles 的 三种过滤方式操作截图做成文档
#见charles四种过滤数据文档
# 2、charles 抓取 论坛 注册 、发帖数据,截图抓到了请求即可
# 3、requests 编写脚本 实现获取access_token、增加标签接口、实现查询标签接口、实现删除标签接口
# 用的公司项目做的,登录(获取token,密码md5加密)-获取用户列表-用户更新,详情,-退出等;
# 4、requests 模拟 https://www.qq.com的请求,用re模块截取出
# <meta name="description" content="(.+?)" />中的content内容
# import requests
# import re
#
# response = requests.get(url='https://www.qq.com' )
# body = response.content.decode('gbk')
# # print(body)
# content = re.findall(' <meta name="description" content="(.+?)" /> ',body)
# print(content)
import re
import requests
response = requests.get(url='https://www.qq.com')
body = response.content.decode('gbk')
# print(body)
con = re.findall(' name="description" content="(.+?)"',body)
print( con )
|
[
"[email protected]"
] | |
8d6dee6211d3b8e0bd8f42cb2ce3ca58cf345e87
|
54bc239124576563c1f0c72e381fb2a4fcaa6a9e
|
/Adafruit_AD8495_Guide/AD8495_Temperature.py
|
4546df8dcb61aa12248110733193b2823c7e335d
|
[
"MIT"
] |
permissive
|
jonsampson/Adafruit_Learning_System_Guides
|
79359154e26e710b088e0c1cbc9969a26a938a25
|
b941d8209cec42e3dce5f5e6b533584e3e99ac73
|
refs/heads/master
| 2020-07-29T17:43:53.439741 | 2019-10-14T01:53:01 | 2019-10-14T01:53:01 | 209,904,940 | 3 | 1 |
MIT
| 2019-09-21T01:04:35 | 2019-09-21T01:04:34 | null |
UTF-8
|
Python
| false | false | 283 |
py
|
import time
import analogio
import board
ad8495 = analogio.AnalogIn(board.A1)
def get_voltage(pin):
return (pin.value * 3.3) / 65536
while True:
temperature = (get_voltage(ad8495) - 1.25) / 0.005
print(temperature)
print(get_voltage(ad8495))
time.sleep(0.5)
|
[
"[email protected]"
] | |
db985281b42e7256f86e97b45e00e71da8cd0b1d
|
f4b60f5e49baf60976987946c20a8ebca4880602
|
/lib/python2.7/site-packages/acimodel-1.3_2j-py2.7.egg/cobra/modelimpl/psu/instpol.py
|
82e180acf3f6c666ea7d10c6bd65a11d575327f0
|
[] |
no_license
|
cqbomb/qytang_aci
|
12e508d54d9f774b537c33563762e694783d6ba8
|
a7fab9d6cda7fadcc995672e55c0ef7e7187696e
|
refs/heads/master
| 2022-12-21T13:30:05.240231 | 2018-12-04T01:46:53 | 2018-12-04T01:46:53 | 159,911,666 | 0 | 0 | null | 2022-12-07T23:53:02 | 2018-12-01T05:17:50 |
Python
|
UTF-8
|
Python
| false | false | 6,877 |
py
|
# coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2016 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class InstPol(Mo):
"""
The power redundancy policy is for all power supply units on the fabric nodes (leaves and spines) that are consuming the power supply policy through their respective selector profile policy.
"""
meta = ClassMeta("cobra.model.psu.InstPol")
meta.moClassName = "psuInstPol"
meta.rnFormat = "psuInstP-%(name)s"
meta.category = MoCategory.REGULAR
meta.label = "Power Supply Redundancy Policy"
meta.writeAccessMask = 0x20000000001
meta.readAccessMask = 0x800ae700000001
meta.isDomainable = False
meta.isReadOnly = False
meta.isConfigurable = True
meta.isDeletable = True
meta.isContextRoot = False
meta.childClasses.add("cobra.model.psu.RtPsuInstPolCons")
meta.childClasses.add("cobra.model.psu.RtResPsuInstPol")
meta.childClasses.add("cobra.model.fault.Delegate")
meta.childClasses.add("cobra.model.psu.RtPsuInstPol")
meta.childNamesAndRnPrefix.append(("cobra.model.psu.RtPsuInstPol", "rtfabricPsuInstPol-"))
meta.childNamesAndRnPrefix.append(("cobra.model.psu.RtPsuInstPolCons", "rtpsuInstPolCons"))
meta.childNamesAndRnPrefix.append(("cobra.model.psu.RtResPsuInstPol", "rtresPsuInstPol"))
meta.childNamesAndRnPrefix.append(("cobra.model.fault.Delegate", "fd-"))
meta.parentClasses.add("cobra.model.fabric.Inst")
meta.superClasses.add("cobra.model.fabric.ProtoPol")
meta.superClasses.add("cobra.model.fabric.ProtoInstPol")
meta.superClasses.add("cobra.model.naming.NamedObject")
meta.superClasses.add("cobra.model.pol.Obj")
meta.superClasses.add("cobra.model.pol.Def")
meta.superClasses.add("cobra.model.fabric.UtilInstPol")
meta.rnPrefixes = [
('psuInstP-', True),
]
prop = PropMeta("str", "adminRdnM", "adminRdnM", 765, PropCategory.REGULAR)
prop.label = "Admin Redundancy Mode"
prop.isConfig = True
prop.isAdmin = True
prop.defaultValue = 3
prop.defaultValueStr = "comb"
prop._addConstant("comb", "combined", 3)
prop._addConstant("insrc-rdn", "input-source-redundancy", 6)
prop._addConstant("n-rdn", "non-redundant", 4)
prop._addConstant("not-supp", "not-supported", 1)
prop._addConstant("ps-rdn", "n+1-redundancy", 5)
prop._addConstant("rdn", "n+n-redundancy", 2)
prop._addConstant("sinin-rdn", "single-input-redundancy", 7)
prop._addConstant("unknown", "unknown", 0)
meta.props.add("adminRdnM", prop)
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "descr", "descr", 5579, PropCategory.REGULAR)
prop.label = "Description"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 128)]
prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+']
meta.props.add("descr", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "lcOwn", "lcOwn", 9, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "local"
prop._addConstant("implicit", "implicit", 4)
prop._addConstant("local", "local", 0)
prop._addConstant("policy", "policy", 1)
prop._addConstant("replica", "replica", 2)
prop._addConstant("resolveOnBehalf", "resolvedonbehalf", 3)
meta.props.add("lcOwn", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "name", "name", 7080, PropCategory.REGULAR)
prop.label = "Name"
prop.isConfig = True
prop.isAdmin = True
prop.isCreateOnly = True
prop.isNaming = True
prop.range = [(1, 64)]
prop.regex = ['[a-zA-Z0-9_.:-]+']
meta.props.add("name", prop)
prop = PropMeta("str", "ownerKey", "ownerKey", 15230, PropCategory.REGULAR)
prop.label = "None"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 128)]
prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+']
meta.props.add("ownerKey", prop)
prop = PropMeta("str", "ownerTag", "ownerTag", 15231, PropCategory.REGULAR)
prop.label = "None"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 64)]
prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+']
meta.props.add("ownerTag", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
prop = PropMeta("str", "uid", "uid", 8, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("uid", prop)
meta.namingProps.append(getattr(meta.props, "name"))
# Deployment Meta
meta.deploymentQuery = True
meta.deploymentType = "Policy"
def __init__(self, parentMoOrDn, name, markDirty=True, **creationProps):
namingVals = [name]
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
|
[
"[email protected]"
] | |
ea44472cf613f7a505cdbd709dcbf6b69628ed94
|
35d42fa466f6457c83f9e89b6e87e050c0189bf2
|
/news/urls.py
|
8cd7b45c9fe79d8f08621a003fef854c096236ef
|
[] |
no_license
|
Burence1/The-Moringa-Tribune
|
4c0473f50f84f0f6563369b805d7b00bf8aa43ec
|
b035a082580eb1e8841e504c87f56392f85ae43e
|
refs/heads/main
| 2023-05-12T11:17:26.898628 | 2021-05-27T13:48:13 | 2021-05-27T13:48:13 | 365,954,800 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 837 |
py
|
from django.conf import settings
from django.conf.urls.static import static
from django.urls import path,re_path
from . import views
urlpatterns = [
path('',views.news_today,name='newsToday'),
re_path('archives/(\d{4}-\d{2}-\d{2})/',views.past_days_news,name = 'pastNews'),
path('search/',views.search_results,name='search_results'),
path('article/(\d+)', views.article, name='article'),
path('new-article',views.new_article, name='new-article'),
path('ajax/newsletter/', views.newsletter, name='newsletter'),
path('api/merch/merch-id/<int:pk>/',views.MerchDescription.as_view())
# path('api/merch/', views.MerchList.as_view()),
# re_path('api/merch/merch-id/(?P<pk>[0-9]+)/',
# views.MerchDescription.as_view())
]
if settings.DEBUG:
urlpatterns+= static(settings.MEDIA_URL,document_root=settings.MEDIA_ROOT)
|
[
"[email protected]"
] | |
b7e4e280e4c4ea18117163135448ed4e9f3b14b8
|
19be48da7eb090f31fd88b1cef9c8ef3a6aaa0eb
|
/funcion23.py
|
c746bc134e246f9f9e9ecf9b80faae8d064e47c1
|
[] |
no_license
|
smith-sanchez/t09_Carrion_Villavicencio
|
376608d60dd175d872f2622b38ff220b6160ff9a
|
4cbb0e0694b35fd7135748bc7ef13db7c7374390
|
refs/heads/master
| 2020-11-27T05:08:14.629793 | 2019-12-20T18:36:00 | 2019-12-20T18:36:00 | 229,316,559 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 246 |
py
|
# funcion 23
# salario de un chef
import libreria
import os
#import os
dia=int(os.sys.argv[1])
precio_dia=float(os.sys.argv[2])
# import libreia
salario_total=libreria.salario(dia,precio_dia)
print(" el salario es:",salario_total)
|
[
"[email protected]"
] | |
3cdf8011b618b07498f42f587746389db19ab840
|
e7964338707afba0228866a33f954a974fcc693b
|
/code/linreg/boston3d_loss.py
|
93704fc901be370ade12eb00fcf6b4701c31b2e4
|
[
"MIT"
] |
permissive
|
anawatbk/msds621
|
f96346ddc4fd47d7b9c3a40e2632da7a39aaf2e0
|
869a309e235359119f30477c7a57763e222197e5
|
refs/heads/master
| 2023-03-25T10:20:02.072200 | 2021-03-10T09:39:33 | 2021-03-10T09:39:33 | 333,196,889 | 0 | 0 |
MIT
| 2021-03-10T09:39:34 | 2021-01-26T19:41:04 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 2,523 |
py
|
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from mpl_toolkits.mplot3d import Axes3D # required even though not ref'd!
from matplotlib import rcParams
import matplotlib as mpl
from sklearn.linear_model import LinearRegression, Ridge, Lasso, LogisticRegression
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from sklearn.datasets import load_boston, load_iris, load_wine, load_digits, \
load_breast_cancer, load_diabetes
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix, precision_score, recall_score
import glob
import os
from PIL import Image as PIL_Image
# STOPPED WORK IN PROGRESS
def loss(B, X, y):
"Line coefficients: B = [y-intercept, slope]"
return np.mean(y - np.dot(X, np.array(B))) ** 2
def get_surface(X, y, loss, b0_range, b1_range):
n = len(X)
B0 = np.ones(shape=(n, 1))
X = np.hstack([np.ones(shape=(n, 1)), X]) # add ones column
(b0_mesh, b1_mesh) = np.meshgrid(b0_range, b1_range, indexing='ij')
L = np.zeros(b0_mesh.shape)
for i in range(len(b0_range)):
for j in range(len(b1_range)):
L[i][j] = loss([b0_range[i], b1_range[j]], X=X, y=y)
return L
def plot3d(L, b0_range, b1_range, ax, elev=50, azim=145):
rcParams["font.size"] = 10
ax.view_init(elev, azim)
b0_range_mesh, b1_range_mesh = np.meshgrid(b0_range, b1_range, indexing='ij')
surface = ax.plot_surface(b0_range_mesh, b1_range_mesh, L, alpha=0.7, cmap='coolwarm')
# plt.title("""$loss(\\beta) = \sum_{i=1}^{N}(y^{{(i)}} - (\\beta_0 + \\beta_1 x^{{(i)}}))^2$""", fontsize=12)
ax.set_xlabel('$\\beta_0$', fontsize=14)
ax.set_ylabel('$\\beta_1$', fontsize=14)
ax.zaxis.set_major_formatter(mpl.ticker.StrMethodFormatter('{x:.0f}'))
boston = load_boston()
df = pd.DataFrame(boston.data, columns=boston.feature_names)
df['MEDV'] = boston.target
print(df.head(3))
X = df.drop('MEDV', axis=1)
y = df['MEDV']
lm = LinearRegression()
lm.fit(X, y)
true_b0 = lm.intercept_
coeff = lm.coef_
print(f"True beta = {true_b0:.2f}, {coeff}")
b0_range = np.arange(-3030, -2900, .1) # y intercept
b1_range = np.arange(105, 120, .05) # slope
L = get_surface(X['LSTAT'], y, loss, b0_range=b0_range, b1_range=b1_range)
fig = plt.figure(figsize=(8, 7))
ax = fig.add_subplot(111, projection='3d')
plot3d(L, b0_range=b0_range, b1_range=b1_range, ax=ax, elev=25, azim=110)
#Theax.plot([true_b0], [true_b1], marker='x', markersize=10, color='black')
plt.show()
|
[
"[email protected]"
] | |
bfb4f12275d4630557cbb7716232b552fb2bc121
|
ba1e90ae6ea9f8f74d9b542e159825341c717712
|
/2014/w33.py
|
e5aa36b9425bc3b95b355755a29e3a5445ba785d
|
[] |
no_license
|
sailesh2/CompetitiveCode
|
b384687a7caa8980ab9b9c9deef2488b0bfe9cd9
|
5671dac08216f4ce75d5992e6af8208fa2324d12
|
refs/heads/master
| 2021-06-24T22:39:11.396049 | 2020-11-27T05:22:17 | 2020-11-27T05:22:17 | 161,877,355 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 236 |
py
|
n=input()
ar=[0]*1000001
i=0
while i<n:
k=input()
x=raw_input().split(' ')
j=0
while j<k:
ar[int(x[j])]=1
j=j+1
i=i+1
i=1
while i<=1000001:
if ar[i]==1:
print i,
i=i+1
|
[
"[email protected]"
] | |
7d6ab9147f7e2b8536e088e2f9369d2f7f13d547
|
4a36849188747a1e3cc4b052eb6bc3a21e3e53bb
|
/POJ/3061.Subsequence/3061.Subsequence.py
|
e877888939ef6ca21888b36bf9aeb5ccaf105122
|
[] |
no_license
|
koking0/Algorithm
|
88f69a26f424d1b60a8440c09dd51c8563a86309
|
2828811ae2f905865b4f391672693375c124c185
|
refs/heads/master
| 2022-07-06T17:10:07.440930 | 2022-06-24T14:59:40 | 2022-06-24T14:59:40 | 216,952,717 | 35 | 48 | null | 2020-07-21T02:46:26 | 2019-10-23T02:41:09 |
Java
|
UTF-8
|
Python
| false | false | 855 |
py
|
#!/usr/bin/env python
# -*- coding: utf-H -*-
# @Time : 2020/1/28 16:27
# @File : 3061.Subsequence.py
# ----------------------------------------------
# ☆ ☆ ☆ ☆ ☆ ☆ ☆
# >>> Author : Alex
# >>> QQ : 2426671397
# >>> Mail : [email protected]
# >>> Github : https://github.com/koking0
# ☆ ☆ ☆ ☆ ☆ ☆ ☆
import sys
while True:
try:
length, target = map(int, input().split())
sequence = list(map(int, input().split()))
left, sum_num, ans = 0, 0, sys.maxsize
for right in range(length):
sum_num += sequence[right]
while sum_num > target:
ans = min(right - left + 1, ans)
sum_num -= sequence[left]
left += 1
print(ans if ans != sys.maxsize else 0)
except EOFError:
break
|
[
"[email protected]"
] | |
a976c9d14e1dee06b2ff83170340b7db50d36e35
|
f0cdda3cf2817bcf991a14cf46e38c353e6872a6
|
/src/epuck2_gazebo/scripts/epuck2_control_codes/epuck_pid_controller.py
|
83a881db1ca46ec151e0f02e6df04aef77f70ca8
|
[] |
no_license
|
vinits5/gym-vinit
|
efc1b5312674840333eea4fb3912aa579c295f5f
|
3ebd79ee94a51c12a6b64fe743ebc742f8d5e63d
|
refs/heads/master
| 2020-03-22T00:55:19.272167 | 2018-06-30T19:00:12 | 2018-06-30T19:00:12 | 138,631,715 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,760 |
py
|
#! /usr/bin/python
import rospy
import math
from geometry_msgs.msg import Twist
from nav_msgs.msg import Odometry
import matplotlib.pyplot as plt
import numpy as np
import tf
from tf.transformations import euler_from_quaternion
from std_srvs.srv import Empty
import time
velocity_publisher = rospy.Publisher('epuck2/cmd_vel', Twist, queue_size=10)
class epuck():
def __init__(self):
rospy.init_node('epuck_controller', anonymous=True)
self.velocity_publisher = rospy.Publisher('epuck2/cmd_vel', Twist, queue_size=10)
self.pose_subscriber = rospy.Subscriber('/epuck2/odom_diffdrive', Odometry, self.callback)
self.rate = rospy.Rate(10)
def callback(self,data):
self.x = data.pose.pose.position.x
self.y = data.pose.pose.position.y
q0 = data.pose.pose.orientation.x
q1 = data.pose.pose.orientation.y
q2 = data.pose.pose.orientation.z
q3 = data.pose.pose.orientation.w
quaternion = (q0,q1,q2,q3)
self.euler = euler_from_quaternion(quaternion)
def orientation(self,angle):
angle = angle*(180.0/math.pi)
if angle >= -90:
angle = 90 - angle
else:
angle = - angle - 270
return angle
def motion(self,xg,yg):
loop = True
#PID Parameters
Kp = 1 #Proportional constant
Ki = 0.075 #Integral constant
Kd = 0 #Differential constant
E = 0 #Difference of errors
I = 0 #Sum of all errors
ai = 0 #Previous orientation of robot
ei = 0 #Previous error in orientation of robot
goal = True #True if goal not reached & False if reached
#Path points:
path_x = []
path_y = []
#PID loop
while goal:
yi = self.y #Current y position
xi = self.x #Current x position
path_x.append(xi)
path_y.append(yi)
#Error Calculations
ad = math.atan2(yg-yi,xg-xi) #Angle from curent position to Goal
e = ad - ai #Error in current and previous orientations
e = math.atan2(math.sin(e),math.cos(e)) #Error converted in range -90 to 90
#PID control
E = e - ei #Difference of previous and current error
I = I + e #Sum of all erros
w = Kp*e + Ki*I + Kd*E #Calculation of angular velocity
#Command Velocities to robot
vel = Twist() #Velocity object
if e >= 0: #Check for left or right turn
w = -w #For left: -w & for right: w
vel.angular.z = w
vel.linear.x = 0.05
velocity_publisher.publish(vel)
#Loop running at 10Hz frequency.
self.rate.sleep()
#New positions
yn = self.y #New y position
xn = self.x #New x position
ai = math.atan2(yn-yi,xn-xi) #New orientation from goal
ai = math.atan2(math.sin(ai),math.cos(ai)) #New orientation in range -90 to 90
#Check the goal condition
if ((xn-xg)*(xn-xg)+(yn-yg)*(yn-yg)-0.01*0.05)<0:
print('Goal Reached!')
vel.angular.z = 0
vel.linear.x = 0
velocity_publisher.publish(vel)
goal = False
return(path_x,path_y)
def circular_motion(self):
path_X = []
path_Y = []
y = [0,0.2,0.4,0.6,0.8,1.0]
x2 = []
for i in y:
x3 = 0.25-(i-0.5)*(i-0.5)
x2.append(x3)
x = [math.sqrt(i) for i in x2]
xf = []
yf = []
[xf.append(i) for i in x]
[yf.append(i) for i in y]
y.reverse()
[yf.append(i) for i in y]
x.reverse()
[xf.append(-i) for i in x]
for i in range(len(xf)):
path_x,path_y = self.motion(xf[i],yf[i])
path_X.append(path_x)
path_Y.append(path_y)
return (path_X,path_Y)
if __name__ == '__main__':
try:
X = epuck()
#xg = input('Enter xg: ')
#yg = input('Enter yg: ')
#path_x,path_y = X.motion(xg,yg)
x = input('Enter anything to start: ')
#reset_world = rospy.ServiceProxy('/gazebo/reset_world',Empty)
path_X,path_Y = X.circular_motion()
xx = []
yy = []
for i in path_X:
for j in i:
xx.append(j)
for i in path_Y:
for j in i:
yy.append(j)
plt.plot(xx,yy)
plt.show()
#reset_world()
except rospy.ROSInterruptException:
pass
|
[
"[email protected]"
] | |
774d66cb1470234449465f0188cd76c1b7dd3b9f
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/nouns/_gigabits.py
|
5b528902501ccb6eb2a2803116afd4524cf7a3d7
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 245 |
py
|
from xai.brain.wordbase.nouns._gigabit import _GIGABIT
#calss header
class _GIGABITS(_GIGABIT, ):
def __init__(self,):
_GIGABIT.__init__(self)
self.name = "GIGABITS"
self.specie = 'nouns'
self.basic = "gigabit"
self.jsondata = {}
|
[
"[email protected]"
] | |
ea2f3fd552459d85a170b03d4f5e904f7c191349
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p04000/s750895351.py
|
7378de763ab9a50cfa785771268623df0b68e5e7
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 604 |
py
|
import sys
input = sys.stdin.buffer.readline
from collections import defaultdict
def main():
H,W,N = map(int,input().split())
d = defaultdict(int)
for i in range(N):
a,b = map(int,input().split())
a -= 1
b -= 1
for x in range(3):
for y in range(3):
na,nb = a-x,b-y
if (0 <= na < H-2 and 0 <= nb < W-2):
d[na*W+nb] += 1
d = list(d.values())
ans = (H-2)*(W-2)-len(d)
print(ans)
for i in range(9):
i += 1
print(d.count(i))
if __name__ == "__main__":
main()
|
[
"[email protected]"
] | |
6c39c671da8ea030b974588fc017b2bac50a4db6
|
feeeab5dc580786a35dbddcb99ddab85bc893668
|
/managers/cc_help.py
|
208cebfbd552ce1485297a7e2ef7c4c00e44949c
|
[] |
no_license
|
idelfrides/POC_test_creditCard_type
|
54dd3c5de02547802074e2acf50295463e92f17d
|
10792ac8f3393a6e3d621d24a43eb794ec241a02
|
refs/heads/master
| 2020-08-08T01:58:54.997806 | 2019-10-08T14:43:23 | 2019-10-08T14:43:23 | 213,668,861 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,114 |
py
|
# from .help import get_digits
import re
from .help import get_digits
# there are codes --> code
CC_TYPE_GENERIC = 0
CC_TYPE_VISA = 1
CC_TYPE_AMEX = 2
CC_TYPE_DINERS = 3
CC_TYPE_DISCOVER = 4
CC_TYPE_MASTERCARD = 5
CC_TYPE_ELO = 6
CC_TYPE_JCB = 7
CC_TYPE_MIR = 8
CC_TYPE_UNIONPAY = 9
CC_TYPES = (
(CC_TYPE_ELO, {
'title': 'Elo',
'regex': re.compile(r'^(?:431274|451416|5067|5090|627780|636297)')
}),
(CC_TYPE_VISA, {
'title': 'Visa',
'regex': re.compile(r'^4')
}),
(CC_TYPE_AMEX, {
'title': 'American Express',
'regex': re.compile(r'^3[47]')
}),
(CC_TYPE_DINERS, {
'title': 'Diners Club',
'regex': re.compile(r'^3(?:0[0-5]|095|[689])')
}),
(CC_TYPE_DISCOVER, {
'title': 'Discover Card',
'regex': re.compile(r'^6(?:011|4[4-9]|5)')
}),
(CC_TYPE_JCB, {
'title': 'JCB',
'regex': re.compile(r'^35(?:2[89]|[3-8])')
}),
(CC_TYPE_MIR, {
'title': 'MIR',
'regex': re.compile(r'^220[0-4]')
}),
(CC_TYPE_UNIONPAY, {
'title': 'UnionPay',
'regex': re.compile(r'^62')
}),
(CC_TYPE_MASTERCARD, {
'title': 'MasterCard',
'regex': re.compile(r'^(?:5[1-5]|222[1-9]|22[3-9]|2[3-6]|27[01]|2720)')
}),
)
CC_TYPE_CHOICES = (
(CC_TYPE_GENERIC, 'Generic'),
(CC_TYPE_VISA, 'Visa'),
(CC_TYPE_AMEX, 'American Express'),
(CC_TYPE_DINERS, 'Diners Club'),
(CC_TYPE_DISCOVER, 'Discover Card'),
(CC_TYPE_MASTERCARD, 'MasterCard'),
(CC_TYPE_ELO, 'Elo'),
(CC_TYPE_JCB, 'JCB'),
(CC_TYPE_MIR, 'MIR'),
(CC_TYPE_UNIONPAY, 'UnionPay'),
)
def get_type(number):
"""
Gets credit card type given number.
:type number: str
:rtype: int
"""
number = get_digits(number)
for code, record in CC_TYPES:
if re.match(record['regex'], number):
return code
return CC_TYPE_GENERIC
|
[
"[email protected]"
] | |
b603e746dc5f758e8ad5e6b8160c2676e856d555
|
6fbd56a12f8675c8ee6dd9ad23101a9c02d34387
|
/setup.py
|
9ee9310affb4d9f8071f556091f427c1ae42963a
|
[
"MIT"
] |
permissive
|
matthiasdebernardini/topology
|
aa666940786dfdbc1fe1f732b73365d1eb596893
|
5cb7cb1e9a602874e7a325f95e50dfe110ca8efb
|
refs/heads/main
| 2023-02-14T18:54:40.751005 | 2021-01-05T09:29:01 | 2021-01-05T09:29:01 | 328,508,598 | 0 | 0 |
MIT
| 2021-01-11T00:26:57 | 2021-01-11T00:26:56 | null |
UTF-8
|
Python
| false | false | 846 |
py
|
from setuptools import setup
import io
with io.open('README.org', encoding='utf-8') as f:
long_description = f.read()
with io.open('requirements.txt', encoding='utf-8') as f:
requirements = [r for r in f.read().split('\n') if len(r)]
setup(name='lntopo',
version='0.1.0',
description='Tools to work with lnresearch/topology datasets',
long_description=long_description,
long_description_content_type='text/x-org',
url='http://github.com/lnresearch/topology',
author='Christian Decker',
author_email='[email protected]',
license='MIT',
packages=[],
package_data={},
scripts=[],
zip_safe=True,
entry_points = {
'console_scripts': [
'lntopo-cli = cli.__main__:cli',
],
},
install_requires=requirements
)
|
[
"[email protected]"
] | |
75beee39f655ccdabb0e887a3fea8cafc7e95c8a
|
e2897c39ec494856e0f110c57f3f0bb4740ac4de
|
/task_2/task_2/wsgi.py
|
d926bbe042842485d5f7b8e6eef5a2e769852adf
|
[] |
no_license
|
ksuvarna85/app_school
|
fc3f75eddf18535fff8cbf2b38d1fd39bf313102
|
9804cd0d9c629e37d72cd72738c675536ce1dd24
|
refs/heads/master
| 2022-12-10T23:24:30.967284 | 2020-09-02T05:13:16 | 2020-09-02T05:13:16 | 292,185,066 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 389 |
py
|
"""
WSGI config for task_2 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'task_2.settings')
application = get_wsgi_application()
|
[
"[email protected]"
] | |
953dfcb4dd312ccbcb7d455b544179ac4a617b59
|
2d4005c1bce1bad26fa9cba6c8ccab913e27c4ec
|
/Python高级/7丶http协议丶web服务器/4丶根据用户的需求返回相应的页面.py
|
98eb031fe68132eb0345e8427d55a43e7c9ea1ae
|
[] |
no_license
|
wfwf1990/learn
|
4b801f2c082ce180a6d70d680c8cadbc5c6ec3cf
|
5ed32454ddf083866fabd730d5b2ffb544a30e08
|
refs/heads/master
| 2020-03-21T18:16:20.284168 | 2018-07-18T11:35:29 | 2018-07-18T11:35:29 | 138,881,605 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,626 |
py
|
# Author: wangfang
# Author: wangfang
import socket
import re
def handle_data(client_socket):
recv_data = client_socket.recv(1024)
#接收的数据进行解码
recv_data = recv_data.decode("utf-8")
#接收的数据进行合并
recv_data = recv_data.splitlines()
#获取请求头中的URI
url = re.match("[^/]+(/[^ ]*)",recv_data[0]).group(1)
#如果路径是/ 修改路径为/index.html
if url == "/":
url = "/index.html"
#读取文件,没有不存在,执行异常代码
try:
f1 = open("./html" +url,"rb")
except:
response_header = "http/1.1 404 not found \r\n"
response_header += "\r\n"
response_body = "file not found".encode("utf-8")
else:
response_header = "http/1.1 200 OK \r\n"
response_header += "\r\n"
response_body = f1.read()
f1.close()
#向客户端返回报头和body
client_socket.send(response_header.encode("utf-8"))
client_socket.send(response_body)
#关闭套接字
client_socket.close()
def main():
"""控制整个程序"""
#创建tcp套接字
tcp_server_socket = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
#绑定端口
server_ip = ""
server_port = 8080
server_addr = (server_ip,server_port)
tcp_server_socket.bind(server_addr)
#监听
tcp_server_socket.listen(128)
while True:
"""接收用户请求和返回用户数据"""
client_socket,client_addr = tcp_server_socket.accept()
handle_data(client_socket)
tcp_server_socket.close()
if __name__ == "__main__":
main()
|
[
"[email protected]"
] | |
67e955dc2e70709a21813dde1e1e3ecf9da1ec54
|
41c26da9c57052a3c9cd17b81d91f41ef074cf8d
|
/MyLeetCode/FB/Sqrt(x).py
|
e57f5f518cdc5ab67b63761318a1cca55c7a2c24
|
[] |
no_license
|
ihuei801/leetcode
|
a82f59a16574f4781ce64a5faa099b75943de94e
|
fe79161211cc08c269cde9e1fdcfed27de11f2cb
|
refs/heads/master
| 2021-06-08T05:12:53.934029 | 2020-05-07T07:22:25 | 2020-05-07T07:22:25 | 93,356,513 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 571 |
py
|
###
# Binary Search
# Time Complexity: O(logn)
# Space Complexity: O(1)
###
class Solution(object):
def mySqrt(self, x):
"""
:type x: int
:rtype: int
"""
if x <= 0:
return 0
if x == 1:
return 1
l, r = 1, x
while l + 1 < r:
mid = (l + r)/2
if mid*mid == x:
return mid
elif mid*mid < x:
l = mid
else:
r = mid
if r*r <= x:
return r
else:
return l
|
[
"[email protected]"
] | |
e45e92ac2a515b699091a99231db873b58ea6c9e
|
09e57dd1374713f06b70d7b37a580130d9bbab0d
|
/data/cirq_new/cirq_program/startCirq_noisy786.py
|
f2b65f32ed029be9c4178c2a3b37eb138cc9f1e7
|
[
"BSD-3-Clause"
] |
permissive
|
UCLA-SEAL/QDiff
|
ad53650034897abb5941e74539e3aee8edb600ab
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
refs/heads/main
| 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,253 |
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=4
# total number=19
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
#thatsNoCode
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=1
c.append(cirq.H.on(input_qubit[1])) # number=2
c.append(cirq.H.on(input_qubit[1])) # number=7
c.append(cirq.H.on(input_qubit[2])) # number=3
c.append(cirq.H.on(input_qubit[3])) # number=4
c.append(cirq.CNOT.on(input_qubit[3],input_qubit[0])) # number=5
c.append(cirq.H.on(input_qubit[0])) # number=14
c.append(cirq.CZ.on(input_qubit[3],input_qubit[0])) # number=15
c.append(cirq.H.on(input_qubit[0])) # number=16
c.append(cirq.Z.on(input_qubit[1])) # number=13
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=8
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=9
c.append(cirq.SWAP.on(input_qubit[3],input_qubit[0])) # number=10
c.append(cirq.SWAP.on(input_qubit[3],input_qubit[0])) # number=11
c.append(cirq.Z.on(input_qubit[2])) # number=12
c.append(cirq.Y.on(input_qubit[0])) # number=17
c.append(cirq.Y.on(input_qubit[0])) # number=18
# circuit end
c.append(cirq.measure(*input_qubit, key='result'))
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 4
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2820
circuit = circuit.with_noise(cirq.depolarize(p=0.01))
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=circuit_sample_count)
frequencies = result.histogram(key='result', fold_func=bitstring)
writefile = open("../data/startCirq_noisy786.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close()
|
[
"[email protected]"
] | |
63a817e8557c763d366712c27c996a9e667b18c3
|
ebe7c57183b0eeba9af1bdc72f0f81b9b8129ca9
|
/23. HashTable/387.py
|
8b7b7473758a3bccdd09c324c10a8ef2fb84a148
|
[] |
no_license
|
proTao/leetcode
|
f2e46392b56b69606e1dd25cf5738cb0ad275645
|
97533d53c8892b6519e99f344489fa4fd4c9ab93
|
refs/heads/master
| 2021-11-24T10:23:56.927122 | 2021-11-18T04:28:05 | 2021-11-18T04:28:05 | 110,225,265 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 688 |
py
|
from collections import Counter
from math import inf
class Solution:
def firstUniqChar(self, s: str) -> int:
count = Counter(s)
for i, c in enumerate(s):
if count[c] == 1:
return i
return -1
def firstUniqChar(self, s: str) -> int:
alpha = "qwertyuiopasdfghjklzxcvbnm"
res = inf
for c in alpha:
i = s.find(c)
if i == -1:
continue
j = s.find(c, i+1)
if j == -1:
res = min(res, i)
return res if res is not inf else -1
if __name__ == "__main__":
print(Solution().firstUniqCharBetter("loveleetcode"))
|
[
"[email protected]"
] | |
a6664ec1cdda715d878aabeded1980ae5457a15c
|
6f4f4d2ff85574a42a6e539d43becce5815e4530
|
/lyman/tests/test_frontend.py
|
639f8dc1d503c8f8798aa0fc2826f066d4bf4007
|
[
"BSD-2-Clause"
] |
permissive
|
toddt/lyman
|
b6aa656b6f8a6a235b9bf2f64d035a1b78dc188f
|
e3a5519fce41a765ae593d8d161e995c5f9aae8c
|
refs/heads/master
| 2021-01-22T12:53:33.693352 | 2014-01-15T21:33:10 | 2014-01-15T21:33:10 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 661 |
py
|
from argparse import Namespace
from nose.tools import assert_equal
from .. import frontend
def test_determine_engine():
plugin_dict = dict(linear="Linear",
multiproc="MultiProc",
ipython="IPython",
torque="PBS")
for arg, plugin_str in plugin_dict.items():
args = Namespace(plugin=arg, queue=None)
if arg == "multiproc":
args.nprocs = 4
plugin, plugin_args = frontend.determine_engine(args)
yield assert_equal, plugin, plugin_str
if arg == "multiproc":
yield assert_equal, plugin_args, dict(n_procs=4, qsub_args="")
|
[
"[email protected]"
] | |
8c6a5a3e278d1c8a19d73033246e3453833eb81e
|
18f8a1c7122c0b320f17ea31192439779a8c63e8
|
/web/apps/admin/groups.py
|
b0ad4bfe977abf42bf38d551d4f9ce035134e1a5
|
[
"MIT"
] |
permissive
|
RyanLainchbury/zoom
|
d49afa8d3506fca2c6e426707bd60ba640420a45
|
684a16f4fe3cea3d26f2d520c743a871ca84ecc5
|
refs/heads/master
| 2020-12-25T19:03:12.881247 | 2017-06-09T07:29:27 | 2017-06-09T07:29:27 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,650 |
py
|
"""
system users
"""
from zoom.components import success, error
from zoom.collect import Collection, CollectionController
from zoom.forms import Form
from zoom.helpers import link_to, url_for
from zoom.models import Group, Groups
from zoom.tools import now
import zoom.validators as v
import zoom.fields as f
from model import update_group_members
def group_fields(request):
fields = f.Fields([
f.TextField('Name', v.required, v.valid_name),
f.MemoField('Description'),
f.PulldownField('Administrators', default='administrators', options=request.site.user_groups),
])
personal_fields = f.Section('Includes',[
# f.ChosenMultiselectField('Groups', options=request.site.user_groups),
f.ChosenMultiselectField('Users', options=request.site.user_options),
])
return f.Fields(fields, personal_fields)
class GroupCollectionController(CollectionController):
def before_insert(self, record):
record['type'] = 'U'
update_group_members(record)
def before_update(self, record):
record['type'] = 'U'
update_group_members(record)
def main(route, request):
def user_group(group):
return group.type == 'U' and not group.name.startswith('a_')
db = request.site.db
users = Groups(db)
fields = group_fields(request)
columns = 'link', 'description', 'administrators'
return Collection(
fields,
model=Group,
controller=GroupCollectionController,
store=users,
item_name='group',
url='/admin/groups',
filter=user_group,
columns=columns,
)(route, request)
|
[
"[email protected]"
] | |
cc247e80135181a627d1df3c82785a5871e3b13c
|
255e19ddc1bcde0d3d4fe70e01cec9bb724979c9
|
/all-gists/1485023/snippet.py
|
55f1081e778224a3121589a27e60a6f8ebd07476
|
[
"MIT"
] |
permissive
|
gistable/gistable
|
26c1e909928ec463026811f69b61619b62f14721
|
665d39a2bd82543d5196555f0801ef8fd4a3ee48
|
refs/heads/master
| 2023-02-17T21:33:55.558398 | 2023-02-11T18:20:10 | 2023-02-11T18:20:10 | 119,861,038 | 76 | 19 | null | 2020-07-26T03:14:55 | 2018-02-01T16:19:24 |
Python
|
UTF-8
|
Python
| false | false | 1,053 |
py
|
# You need gevent 1.0 and pyzmq 3.x
#
# pip install --user git://github.com/SiteSupport/gevent.git
# pip install --user pyzmq
#
import gevent
import zmq.green as zmq
import os, sys
ADDR = 'tcp://127.0.0.1:5555'
def run_parent():
ctx = zmq.Context()
sock = ctx.socket(zmq.PUSH)
sock.bind(ADDR)
for i in range(10):
sock.send('message: %d' % i)
gevent.sleep(1)
def run_child(ident):
# create a new context since we are forked in a new process
ctx = zmq.Context()
sock = ctx.socket(zmq.PULL)
sock.connect(ADDR)
while True:
msg = sock.recv()
print '%s: %s' % (ident, msg)
def fork_workers(num):
pids = []
for i in range(num):
pid = gevent.fork()
if pid == 0:
run_child(os.getpid())
sys.exit(0)
else:
pids.append(pid)
return pids
pids = fork_workers(3)
print 'workers:', ', '.join('%d' % p for p in pids)
run_parent()
# not cool, workers should die themselves actually
for pid in pids:
os.kill(pid, 15)
|
[
"[email protected]"
] | |
2868e0431b4695d3c0c1bf5f09a50754ff439a4e
|
983f77449bbea7ae1993a93d7f4431f0f07193f0
|
/lab/agent_exercising/model.py
|
3593f4708457d6223c507bb9e459248134d29983
|
[] |
no_license
|
johnholl/TDnets
|
09d45f2bab138639e3be107d2e44df01533c10c3
|
00afc8a5ad412047c658deed2f487a98f062788b
|
refs/heads/master
| 2020-06-19T06:41:42.159903 | 2017-03-13T13:02:11 | 2017-03-13T13:02:11 | 74,916,539 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,714 |
py
|
import numpy as np
import tensorflow as tf
import tensorflow.contrib.rnn as rnn
def normalized_columns_initializer(std=1.0):
def _initializer(shape, dtype=None, partition_info=None):
out = np.random.randn(*shape).astype(np.float32)
out *= std / np.sqrt(np.square(out).sum(axis=0, keepdims=True))
return tf.constant(out)
return _initializer
def flatten(x):
return tf.reshape(x, [-1, np.prod(x.get_shape().as_list()[1:])])
def conv2d(x, num_filters, name, filter_size=(3, 3), stride=(1, 1), pad="SAME", dtype=tf.float32, collections=None):
with tf.variable_scope(name):
stride_shape = [1, stride[0], stride[1], 1]
filter_shape = [filter_size[0], filter_size[1], int(x.get_shape()[3]), num_filters]
# there are "num input feature maps * filter height * filter width"
# inputs to each hidden unit
fan_in = np.prod(filter_shape[:3])
# each unit in the lower layer receives a gradient from:
# "num output feature maps * filter height * filter width" /
# pooling size
fan_out = np.prod(filter_shape[:2]) * num_filters
# initialize weights with random weights
w_bound = np.sqrt(6. / (fan_in + fan_out))
w = tf.get_variable("W", filter_shape, dtype, tf.random_uniform_initializer(-w_bound, w_bound),
collections=collections)
b = tf.get_variable("b", [1, 1, 1, num_filters], initializer=tf.constant_initializer(0.0),
collections=collections)
return tf.nn.conv2d(x, w, stride_shape, pad) + b
def linear(x, size, name, initializer=None, bias_init=0):
w = tf.get_variable(name + "/w", [x.get_shape()[1], size], initializer=initializer)
b = tf.get_variable(name + "/b", [size], initializer=tf.constant_initializer(bias_init))
return tf.matmul(x, w) + b
def categorical_sample(logits, d):
value = tf.squeeze(tf.multinomial(logits - tf.reduce_max(logits, [1], keep_dims=True), 1), [1])
return tf.one_hot(value, d)
class LSTMPolicy(object):
def __init__(self, ob_space, ac_space):
self.x = x = tf.placeholder(tf.float32, [None] + list(ob_space))
for i in range(4):
x = tf.nn.elu(conv2d(x, 32, "l{}".format(i + 1), [3, 3], [2, 2]))
# introduce a "fake" batch dimension of 1 after flatten so that we can do LSTM over time dim
x = tf.expand_dims(flatten(x), [0])
size = 256
lstm = rnn.rnn_cell.BasicLSTMCell(size, state_is_tuple=True)
self.state_size = lstm.state_size
step_size = tf.shape(self.x)[:1]
c_init = np.zeros((1, lstm.state_size.c), np.float32)
h_init = np.zeros((1, lstm.state_size.h), np.float32)
self.state_init = [c_init, h_init]
c_in = tf.placeholder(tf.float32, [1, lstm.state_size.c])
h_in = tf.placeholder(tf.float32, [1, lstm.state_size.h])
self.state_in = [c_in, h_in]
state_in = rnn.rnn_cell.LSTMStateTuple(c_in, h_in)
lstm_outputs, lstm_state = tf.nn.dynamic_rnn(
lstm, x, initial_state=state_in, sequence_length=step_size,
time_major=False)
lstm_c, lstm_h = lstm_state
x = tf.reshape(lstm_outputs, [-1, size])
self.logits = linear(x, ac_space, "action", normalized_columns_initializer(0.01))
self.vf = tf.reshape(linear(x, 1, "value", normalized_columns_initializer(1.0)), [-1])
self.state_out = [lstm_c[:1, :], lstm_h[:1, :]]
self.sample = categorical_sample(self.logits, ac_space)[0, :]
self.var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, tf.get_variable_scope().name)
def get_initial_features(self):
return self.state_init
def act(self, ob, c, h):
sess = tf.get_default_session()
return sess.run([self.sample, self.vf] + self.state_out,
{self.x: [ob], self.state_in[0]: c, self.state_in[1]: h})
def value(self, ob, c, h):
sess = tf.get_default_session()
return sess.run(self.vf, {self.x: [ob], self.state_in[0]: c, self.state_in[1]: h})[0]
class AuxLSTMPolicy(object):
def __init__(self, ob_space, ac_space):
self.x = x = tf.placeholder(tf.float32, [None] + list(ob_space))
self.action = tf.placeholder(tf.float32, [None, ac_space])
self.reward = tf.placeholder(tf.float32, [None, 1])
x = tf.nn.relu(conv2d(x, 16, "l1", [8, 8], [4, 4]))
x = conv_features = tf.nn.relu(conv2d(x, 32, "l2", [4, 4], [2, 2]))
x = flatten(x)
x = tf.nn.relu(linear(x, 256, "l3", normalized_columns_initializer(0.1)))
x = tf.concat(concat_dim=1, values=[x, self.action, self.reward])
# introduce a "fake" batch dimension of 1 after flatten so that we can do LSTM over time dim
x = tf.expand_dims(x, [0])
size = 256
lstm = rnn.rnn_cell.BasicLSTMCell(size, state_is_tuple=True)
self.state_size = lstm.state_size
step_size = tf.shape(self.x)[:1]
c_init = np.zeros((1, lstm.state_size.c), np.float32)
h_init = np.zeros((1, lstm.state_size.h), np.float32)
self.state_init = [c_init, h_init]
c_in = tf.placeholder(tf.float32, [1, lstm.state_size.c])
h_in = tf.placeholder(tf.float32, [1, lstm.state_size.h])
self.state_in = [c_in, h_in]
state_in = rnn.rnn_cell.LSTMStateTuple(c_in, h_in)
lstm_outputs, lstm_state = tf.nn.dynamic_rnn(
lstm, x, initial_state=state_in, sequence_length=step_size,
time_major=False)
lstm_c, lstm_h = lstm_state
x = tf.reshape(lstm_outputs, [-1, size])
self.logits = linear(x, ac_space, "action", normalized_columns_initializer(0.01))
self.vf = tf.reshape(linear(x, 1, "value", normalized_columns_initializer(1.0)), [-1])
self.state_out = [lstm_c[:1, :], lstm_h[:1, :]]
self.sample = categorical_sample(self.logits, ac_space)[0, :]
self.var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, tf.get_variable_scope().name)
def get_initial_features(self):
return self.state_init
def act(self, ob, prev_a, prev_r, c, h):
sess = tf.get_default_session()
return sess.run([self.sample, self.vf] + self.state_out,
{self.x: [ob], self.action: [prev_a], self.reward: [[prev_r]],
self.state_in[0]: c, self.state_in[1]: h})
def value(self, ob, prev_a, prev_r, c, h):
sess = tf.get_default_session()
return sess.run(self.vf, {self.x: [ob], self.action: [prev_a], self.reward: [[prev_r]],
self.state_in[0]: c, self.state_in[1]: h})[0]
|
[
"[email protected]"
] | |
2f211ee9858ffddacd1a6b995f06cd8455450b80
|
4d9ce4ab1f0ce0a857f215edc2ffc99ce3b82623
|
/tfx/orchestration/experimental/core/mlmd_state_test.py
|
6faacc6cc12f8ce1e987bfdbb57b7de35f8efd41
|
[
"Apache-2.0"
] |
permissive
|
vpipkt/tfx
|
448fd85a177f7e3a3a6dacf262eb0c93f459f534
|
42f4f4095ff3c3e23fe2ac1076c9a0fdfc631d23
|
refs/heads/master
| 2023-06-20T12:27:56.083959 | 2021-05-25T18:31:23 | 2021-05-25T18:33:12 | 370,820,614 | 0 | 0 |
Apache-2.0
| 2021-05-25T20:31:22 | 2021-05-25T20:31:22 | null |
UTF-8
|
Python
| false | false | 2,934 |
py
|
# Copyright 2021 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tfx.orchestration.experimental.core.mlmd_state."""
import os
import tensorflow as tf
from tfx.orchestration import metadata
from tfx.orchestration.experimental.core import mlmd_state
from tfx.orchestration.experimental.core import test_utils
from ml_metadata.proto import metadata_store_pb2
def _write_test_execution(mlmd_handle):
execution_type = metadata_store_pb2.ExecutionType(name='foo', version='bar')
execution_type_id = mlmd_handle.store.put_execution_type(execution_type)
[execution_id] = mlmd_handle.store.put_executions(
[metadata_store_pb2.Execution(type_id=execution_type_id)])
[execution] = mlmd_handle.store.get_executions_by_id([execution_id])
return execution
class MlmdStateTest(test_utils.TfxTest):
def setUp(self):
super().setUp()
pipeline_root = os.path.join(
os.environ.get('TEST_UNDECLARED_OUTPUTS_DIR', self.get_temp_dir()),
self.id())
metadata_path = os.path.join(pipeline_root, 'metadata', 'metadata.db')
connection_config = metadata.sqlite_metadata_connection_config(
metadata_path)
connection_config.sqlite.SetInParent()
self._mlmd_connection = metadata.Metadata(
connection_config=connection_config)
def test_mlmd_execution_update(self):
with self._mlmd_connection as m:
expected_execution = _write_test_execution(m)
# Mutate execution.
with mlmd_state.mlmd_execution_atomic_op(
m, expected_execution.id) as execution:
self.assertEqual(expected_execution, execution)
execution.last_known_state = metadata_store_pb2.Execution.CANCELED
# Test that updated execution is committed to MLMD.
[execution] = m.store.get_executions_by_id([execution.id])
self.assertEqual(metadata_store_pb2.Execution.CANCELED,
execution.last_known_state)
# Test that in-memory state is also in sync.
with mlmd_state.mlmd_execution_atomic_op(
m, expected_execution.id) as execution:
self.assertEqual(metadata_store_pb2.Execution.CANCELED,
execution.last_known_state)
def test_mlmd_execution_absent(self):
with self._mlmd_connection as m:
with mlmd_state.mlmd_execution_atomic_op(m, 1) as execution:
self.assertIsNone(execution)
if __name__ == '__main__':
tf.test.main()
|
[
"[email protected]"
] | |
f7d962573d6c4eeb3ac79b56b3303e17fe27a433
|
8afb5afd38548c631f6f9536846039ef6cb297b9
|
/_VSCODE-extensions/vscode-python/pythonFiles/runJediLanguageServer.py
|
a473bf76b3a84a8c79ff0f8fd1ea6b94dcf2f432
|
[
"MIT"
] |
permissive
|
bgoonz/UsefulResourceRepo2.0
|
d87588ffd668bb498f7787b896cc7b20d83ce0ad
|
2cb4b45dd14a230aa0e800042e893f8dfb23beda
|
refs/heads/master
| 2023-03-17T01:22:05.254751 | 2022-08-11T03:18:22 | 2022-08-11T03:18:22 | 382,628,698 | 10 | 12 |
MIT
| 2022-10-10T14:13:54 | 2021-07-03T13:58:52 | null |
UTF-8
|
Python
| false | false | 451 |
py
|
import re
import sys
import os
# Add the lib path to our sys path so jedi_language_server can find its references
EXTENSION_ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(os.path.join(EXTENSION_ROOT, "pythonFiles", "lib", "python"))
from jedi_language_server.cli import cli
# Trick language server into thinking it started from 'jedi-language-server.exe'
sys.argv[0] = "jedi-language-server.exe"
sys.exit(cli())
|
[
"[email protected]"
] | |
b59c150b00f4f258483032fd787f347eff062302
|
229e1e103bc24dda4d8fef54b762009e19045a45
|
/configs/nowd/gc/res101_d_gc.py
|
ad3d4ce5f80de72c915fef67bf6c818a89d6128a
|
[
"MIT"
] |
permissive
|
yinmh17/CCNet
|
c0be71919877c0d44c51cd8fd8ad8f644ef618a6
|
d5e90fe5ccfa16389fd25bdd3e2160ffe2dfbd22
|
refs/heads/master
| 2020-06-18T13:03:46.781284 | 2019-11-12T06:26:59 | 2019-11-12T06:26:59 | 196,311,075 | 1 | 1 |
MIT
| 2019-07-21T19:48:39 | 2019-07-11T03:10:01 |
Python
|
UTF-8
|
Python
| false | false | 1,097 |
py
|
model = dict(
type='basenet',
pretrained='',
backbone=dict(
type='ResNet',
depth=101,
num_stages=4,
block_num=[3, 4, 23, 3],
),
att=dict(
with_att=False,
type='glore',
att_stage=[False,False,True,False],
att_pos='after_add',
att_location=[[],[],[5,11,17],[]],
),
module=dict(
type='nl_nowd',
downsample=True,
whiten_type=[],
weight_init_scale=1.0,
with_gc=True,
with_nl=False,
nowd=[],
use_out=False,
out_bn=False,
)
)
train_cfg = dict(
batch_size=8,
learning_rate=1e-2,
momentum=0.9,
num_steps=60000,
power=0.9,
random_seed=1234,
restore_from='./dataset/resnet101-imagenet.pth',
save_num_images=2,
start_iters=0,
save_from=59500,
save_pred_every=100,
snapshot_dir='snapshots/',
weight_decay=0.0005
)
data_cfg = dict(
data_dir='cityscapes',
data_list='./dataset/list/cityscapes/train.lst',
ignore_label=255,
input_size='769,769',
num_classes=19,
)
|
[
"[email protected]"
] | |
a76ac90843514fd223703c25311d3db82fdcb1d9
|
fb86f0dca6e525b8a8ddb63f10b8d220ddd7f7fe
|
/test/functional/sapling_changeaddresses.py
|
f8aa5d49517df753cb930c26fe101290083c7303
|
[
"MIT"
] |
permissive
|
ORO-mlm/UNO-Core
|
14fcdb3c2db4bde256e48ea661ada61579ccf403
|
d6e6769ce57466cfc9e7cab681eab880cdb8e3e8
|
refs/heads/main
| 2023-06-16T08:21:00.808606 | 2021-07-12T07:08:35 | 2021-07-12T07:08:35 | 383,350,655 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,141 |
py
|
#!/usr/bin/env python3
# Copyright (c) 2019 The Zcash developers
# Copyright (c) 2020 The PIVX developers
# Copyright (c) 2021- The UNO developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or https://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import UnoTestFramework
from test_framework.util import *
from decimal import Decimal
# Test wallet change address behaviour
class WalletChangeAddressesTest(UnoTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
saplingUpgrade = ['-nuparams=v5_shield:1']
self.extra_args = [saplingUpgrade, saplingUpgrade]
def run_test(self):
self.nodes[0].generate(110)
# Obtain some transparent funds
midAddr = self.nodes[0].getnewshieldaddress()
# Shield almost all the balance
txid = self.nodes[0].shieldsendmany(get_coinstake_address(self.nodes[0]), [{"address": midAddr, "amount": Decimal(2400)}])
self.sync_all()
self.nodes[1].generate(1)
self.sync_all()
taddrSource = self.nodes[0].getnewaddress()
for _ in range(6):
recipients = [{"address": taddrSource, "amount": Decimal('3')}]
txid = self.nodes[0].shieldsendmany(midAddr, recipients, 1)
self.sync_all()
self.nodes[1].generate(1)
self.sync_all()
def check_change_taddr_reuse(target, isTargetShielded):
recipients = [{"address": target, "amount": Decimal('1')}]
# Send funds to recipient address twice
txid1 = self.nodes[0].shieldsendmany(taddrSource, recipients, 1)
self.nodes[1].generate(1)
self.sync_all()
txid2 = self.nodes[0].shieldsendmany(taddrSource, recipients, 1)
self.nodes[1].generate(1)
self.sync_all()
# Verify that the two transactions used different change addresses
tx1 = self.nodes[0].getrawtransaction(txid1, 1)
tx2 = self.nodes[0].getrawtransaction(txid2, 1)
assert_true(len(tx1['vout']) >= 1) # at least one output
assert_true(len(tx2['vout']) >= 1)
for i in range(len(tx1['vout'])):
tx1OutAddrs = tx1['vout'][i]['scriptPubKey']['addresses']
tx2OutAddrs = tx2['vout'][i]['scriptPubKey']['addresses']
if tx1OutAddrs != [target]:
print('Source address: %s' % taddrSource)
print('TX1 change address: %s' % tx1OutAddrs[0])
print('TX2 change address: %s' % tx2OutAddrs[0])
assert(tx1OutAddrs != tx2OutAddrs)
taddr = self.nodes[0].getnewaddress()
saplingAddr = self.nodes[0].getnewshieldaddress()
print()
print('Checking shieldsendmany(taddr->Sapling)')
check_change_taddr_reuse(saplingAddr, True)
print()
print('Checking shieldsendmany(taddr->taddr)')
check_change_taddr_reuse(taddr, False)
if __name__ == '__main__':
WalletChangeAddressesTest().main()
|
[
"[email protected]"
] | |
e74a4232dc7fc3b1de106635f6beb9dc191f4f63
|
373e44ad5fba391d86543f28b91a2cdf9a22f874
|
/model/TestPar45_60_varydatasize/60/PowerPredEDFA_average.py
|
d70aad24fd3ba2b6baa9d93a86d69d68d3e1e57a
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
szhu3210/oopt-gnpy
|
c311fe00b869d3bcfabae4e22366cbc2215eb91d
|
83768480eb9aedad560ab9a722493f04cfe80c9c
|
refs/heads/master
| 2020-04-02T00:24:48.608431 | 2019-04-01T18:32:11 | 2019-04-01T18:32:11 | 153,803,494 | 1 | 0 |
BSD-3-Clause
| 2018-10-19T15:22:16 | 2018-10-19T15:22:15 | null |
UTF-8
|
Python
| false | false | 1,521 |
py
|
# coding: utf-8
# In[171]:
# Ido Michael
import tensorflow as tf
import os, struct
import numpy as np
import matplotlib.pyplot as plt
import ParsePowerEDFA
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_absolute_error
import math
import sys
import configparser
import random
print(tf.__version__)
# In case we need to average results of 5 different debug files and plot them on a graph.
# ParsePowerEDFA.getTestFiles()
# Average files by name and then write collected results into a csv file.
[testdb, testmse, testmae, tr2, tr4, tr6, tr8, tr1, mse_tr, mae_tr] = ParsePowerEDFA.averageResults("TestPar45_60_60")
[val2, val4, val6, val8, val1, mse_val, mae_val] = ParsePowerEDFA.averageResults_val("TestPar45_60_60")
ParsePowerEDFA.plot_to_matrix(tr2, tr4, tr6, tr8, tr1, mse_tr, mae_tr)
ParsePowerEDFA.plot_to_matrix_Val(val2, val4, val6, val8, val1, mse_val, mae_val)
ParsePowerEDFA.plot_to_matrix_test(testdb, testmse, testmae)
# 20%
# [testdb, val2, val4, val6, val8, val1] = ParsePowerEDFA.averageResults([
# "./TestPar29.ini140-debug.log",
# "./TestPar29.ini84-debug.log",
# "./TestPar29.ini150-debug.log"
# ])
# [testdb, val2, val4, val6, val8, val1] = ParsePowerEDFA.averageResults(["./test/TestPar25.ini-smaller53-debug.log", "./test/TestPar25.ini-smaller103-debug.log", "./test/TestPar25.ini-smaller25-debug.log", "./test/TestPar25.ini-smaller37-debug.log", "./test/TestPar25.ini-smaller30-debug.log"])
# ParsePowerEDFA.plotGraph(val2, val4, val6, val8, val1)
|
[
"[email protected]"
] | |
a030538c5ca7316deb104f9555029252bad5e681
|
3c40dce2af71dd6216f4b64e5f42d4d6d5bc6b25
|
/auto_client/libs/plugins/__init__.py
|
02afa5f43fb806ca16c94112950069f750df338e
|
[] |
no_license
|
huzhou520/cmdb
|
1c93ad47d2a5e564c1e8f34ec9015590208fafeb
|
b6b4aba4184ed316a0a0b5f2b1a876473ec4cdbc
|
refs/heads/master
| 2020-09-11T22:36:31.588900 | 2019-12-27T01:55:40 | 2019-12-27T01:55:40 | 222,212,934 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,078 |
py
|
from settings import PLUGIN_DICT
def get_server_info(hostname, ssh_func):
"""
:param hostname: 要操作的远程主机
:param ssh_func: 要执行的方法
:return:
"""
info_dict = {}
for key, path in PLUGIN_DICT.items():
# 1.切割settings文件中的字典
"""
例:libs.plugins.board.Board,切割settings文件中的values切成如下:
key:libs.plugins.board(模块路径) value: Board(对应模块下面的方法)
"""
module_name, class_name = path.rsplit('.', maxsplit=1)
# 2.以字符串的方式加载模块
import importlib
module = importlib.import_module(module_name)
# print(module_name,class_name)
# 3.通过反射找模块下面的方法
cls = getattr(module, class_name)
# print(module_name, class_name)
# 4.实例化对象
obj = cls()
# 5.执行对象的process方法
ret = obj.process(hostname, ssh_func)
info_dict[key] = ret
# print(info_dict)
return info_dict
|
[
"[email protected]"
] | |
315dde5190931ae95728751f22a8752b3de8b9e1
|
a439ca43178d38cfe6daaee50ea134ca6c52b502
|
/thaniya_client/src/thaniya_client/tools/ThaniyaMySQL_native.py
|
a110c96dca5ffc807f8d9a6352e11b529c08a02e
|
[
"Apache-2.0"
] |
permissive
|
jkpubsrc/Thaniya
|
37ca727abdc6f9f605257813889fe3a033995bba
|
4ebdf2854e3d7888af7396adffa22628b4ab2267
|
refs/heads/master
| 2023-03-05T20:58:59.528746 | 2021-02-15T19:31:06 | 2021-02-15T19:31:06 | 331,318,787 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,503 |
py
|
import os
import typing
import tarfile
import jk_simpleexec
import jk_utils
from ..ThaniyaBackupContext import ThaniyaBackupContext
from .EnumTarPathMode import EnumTarPathMode
from .ThaniyaService import ThaniyaService
class ThaniyaMySQL_native:
@staticmethod
def mySQLDump(ctx:ThaniyaBackupContext, dbName:str, dbUserName:str, dbPassword:str, outputDumpFilePath:str) -> int:
assert isinstance(ctx, ThaniyaBackupContext)
assert isinstance(dbName, str)
assert dbName
assert isinstance(outputDumpFilePath, str)
assert outputDumpFilePath
ctx = ctx.descend("Creating dump file " + repr(outputDumpFilePath) + " ...")
with ctx.log as nestedLog:
outputDumpFilePath = ctx.absPath(outputDumpFilePath)
authFile = ctx.privateTempDir.writeTextFile("[mysqldump]\nuser=" + dbUserName + "\npassword=" + dbPassword + "\n")
result = jk_simpleexec.invokeCmd("/usr/bin/mysqldump", [
"--defaults-extra-file=" + authFile,
"--r",
outputDumpFilePath,
"--routines", # Include stored routines (procedures and functions) for the dumped databases in the output.
"--triggers", # Include triggers for each dumped table in the output.
dbName,
], workingDirectory=os.path.dirname(authFile))
if result.returnCode == 0:
nestedLog.notice("Succeeded.")
return os.path.getsize(outputDumpFilePath)
else:
result.dump(nestedLog.error)
raise Exception("Failed to backup database '" + dbName + "'!")
#
@staticmethod
def mySQLDumpCalculateSize(ctx:ThaniyaBackupContext, dbName:str, dbUserName:str, dbPassword:str) -> int:
import mysql.connector
assert isinstance(ctx, ThaniyaBackupContext)
ctx = ctx.descend("Calculating size for the MySQL dump ...")
with ctx.log as nestedLog:
con = None
try:
# Segmentation fault
# see: https://bugs.mysql.com/bug.php?id=89889
# (but this does not work)
print("> Connecting ....")
con = mysql.connector.connect(host="localhost", database=dbName, user=dbUserName, passwd=dbPassword)
print("> Connected.")
sqlQuery = "SELECT SUM(data_length) FROM information_schema.tables WHERE table_schema = '" + dbName + "';"
cursor = con.cursor()
cursor.execute(sqlQuery)
records = cursor.fetchall()
assert cursor.rowcount == 1
nEstimatedSize = -1
for row in records:
nEstimatedSize = row[0]
break
return nEstimatedSize
finally:
if con and con.is_connected():
cursor.close()
con.close()
#
#
|
[
"[email protected]"
] | |
716ed2177858886621060abad9ac3e5c264f152a
|
ec0b8bfe19b03e9c3bb13d9cfa9bd328fb9ca3f1
|
/res/packages/scripts/scripts/client/gui/Scaleform/daapi/view/battle/shared/timers_common.py
|
fa0fb189e2e6d1344d6ee8d161432c625338e6e9
|
[] |
no_license
|
webiumsk/WOT-0.9.20.0
|
de3d7441c5d442f085c47a89fa58a83f1cd783f2
|
811cb4e1bca271372a1d837a268b6e0e915368bc
|
refs/heads/master
| 2021-01-20T22:11:45.505844 | 2017-08-29T20:11:38 | 2017-08-29T20:11:38 | 101,803,045 | 0 | 1 | null | null | null | null |
WINDOWS-1250
|
Python
| false | false | 2,809 |
py
|
# 2017.08.29 21:46:09 Střední Evropa (letní čas)
# Embedded file name: scripts/client/gui/Scaleform/daapi/view/battle/shared/timers_common.py
import BigWorld
from gui.shared.utils.TimeInterval import TimeInterval
class TimerComponent(object):
__slots__ = ('_panel', '_typeID', '_viewID', '_totalTime', '_startTime', '_finishTime')
def __init__(self, panel, typeID, viewID, totalTime):
super(TimerComponent, self).__init__()
self._panel = panel
self._typeID = typeID
self._viewID = viewID
self._totalTime = totalTime
self._startTime = BigWorld.serverTime()
self._finishTime = self._startTime + totalTime if totalTime else 0
def __repr__(self):
return 'TimerComponent(typeID = {}, viewID = {}, totalTime = {})'.format(self._typeID, self._viewID, self._totalTime)
def clear(self):
self._panel = None
return
def show(self, isBubble = True):
self._showView(isBubble)
self._startTick()
def hide(self):
self._stopTick()
self._hideView()
@property
def typeID(self):
return self._typeID
@property
def viewID(self):
return self._viewID
@property
def finishTime(self):
return self._finishTime
@property
def totalTime(self):
return self._totalTime
def _startTick(self):
raise NotImplementedError
def _stopTick(self):
raise NotImplementedError
def _hideView(self):
raise NotImplementedError
def _showView(self, isBubble):
raise NotImplementedError
class PythonTimer(TimerComponent):
__slots__ = ('_timeInterval', '__weakref__')
def __init__(self, panel, typeID, viewID, totalTime):
super(PythonTimer, self).__init__(panel, typeID, viewID, totalTime)
self._timeInterval = TimeInterval(1.0, self, '_tick')
def clear(self):
self._timeInterval.stop()
super(PythonTimer, self).clear()
def _startTick(self):
if self._totalTime:
timeLeft = max(0, self._finishTime - BigWorld.serverTime())
if timeLeft:
self._setViewSnapshot(timeLeft)
self._timeInterval.start()
def _stopTick(self):
self._timeInterval.stop()
def _tick(self):
timeLeft = self._finishTime - BigWorld.serverTime()
if timeLeft >= 0:
self._setViewSnapshot(timeLeft)
else:
self.hide()
def _setViewSnapshot(self, timeLeft):
raise NotImplementedError
# okay decompyling c:\Users\PC\wotmods\files\originals\res\packages\scripts\scripts\client\gui\Scaleform\daapi\view\battle\shared\timers_common.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2017.08.29 21:46:09 Střední Evropa (letní čas)
|
[
"[email protected]"
] | |
e6c68eff9e6a0fdc168b30b5b841532a1cf4b03d
|
2daa3894e6d6929fd04145100d8a3be5eedbe21c
|
/tests/artificial/transf_inv/trend_constant/cycle_7/ar_/test_artificial_32_inv_constant_7__0.py
|
a69b8bc01e2baf94726fff51468a7e4e6843851d
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
Henri-Lo/pyaf
|
a1f73a0cc807873bd7b79648fe51de9cfd6c126a
|
08c968425d85dcace974d90db7f07c845a0fe914
|
refs/heads/master
| 2021-07-01T12:27:31.600232 | 2017-09-21T11:19:04 | 2017-09-21T11:19:04 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 305 |
py
|
import pyaf.Bench.TS_datasets as tsds
import pyaf.tests.artificial.process_artificial_dataset as art
dataset = tsds.generate_random_TS(N = 32 , FREQ = 'D', seed = 0, trendtype = "constant", cycle_length = 7, transform = "inv", sigma = 0.0, exog_count = 0, ar_order = 0);
art.process_dataset(dataset);
|
[
"[email protected]"
] | |
ade16edad2cbac40e9bacca1b0aba5e260577e2f
|
dfe925c32292ba1e054b86ea660546eb9eac921b
|
/example/gs/__init__.py
|
c371d57c6a673838f1d0eb1f56482200e99ebb74
|
[] |
no_license
|
keul/example.gs
|
bc64488d5e67492994b5a12a99d0fa64f1af87de
|
5435e9f4fde66b810ff184c25e2dc26aa40900df
|
refs/heads/master
| 2020-04-30T09:50:23.781896 | 2013-05-01T18:02:15 | 2013-05-01T18:02:15 | 9,789,567 | 2 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,012 |
py
|
# -*- coding: utf8 -*-
import logging
from zope.i18nmessageid import MessageFactory
from example.gs import config
from example.gs.tool import FooTool
from Products.Archetypes import atapi
from Products.CMFCore import utils
logger = logging.getLogger('example.gs')
gsMessageFactory = MessageFactory('example.gs')
def initialize(context):
"""Initializer called when used as a Zope 2 product."""
content_types, constructors, ftis = atapi.process_types(
atapi.listTypes(config.PROJECTNAME),
config.PROJECTNAME)
for atype, constructor in zip(content_types, constructors):
utils.ContentInit('%s: %s' % (config.PROJECTNAME, atype.portal_type),
content_types=(atype, ),
permission=config.ADD_PERMISSIONS[atype.portal_type],
extra_constructors=(constructor,),
).initialize(context)
# utils.ToolInit("Foo Tool",
# tools=(FooTool,),
# icon="qm.gif",
# ).initialize(context)
|
[
"[email protected]"
] | |
db2b8203bfcc6e719473a13b065bcf0d51007f50
|
b15fd3fa4431c3bc0e9098b8ece4cb1e3bb45d50
|
/data_providers/downloader.py
|
ec29f6d09b6514f00c036b6841ea965efcc7c89b
|
[] |
no_license
|
SoulDuck/DenseNet
|
0cdbb86f0cb4a685585f562374c894c165b3459f
|
96581dd8e2df973560cf69ff99da211e91af55bb
|
refs/heads/master
| 2021-07-10T04:22:31.868745 | 2017-10-06T13:23:57 | 2017-10-06T13:23:57 | 105,623,435 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,044 |
py
|
import sys ,os
from urllib import urlretrieve
import tarfile
import zipfile
def report_download_progress(count , block_size , total_size):
pct_complete = float(count * block_size) / total_size
msg = "\r {0:1%} already downloader".format(pct_complete)
sys.stdout.write(msg)
sys.stdout.flush()
def download_data_url(url, download_dir):
filename = url.split('/')[-1]
file_path = os.path.join(download_dir , filename)
if not os.path.exists(file_path):
try:
os.makedirs(download_dir)
except Exception :
pass
print "Download %s to %s" %(url , file_path)
file_path , _ = urlretrieve(url=url,filename=file_path,reporthook=report_download_progress)
print file_path
print('\nExtracting files')
if file_path.endswith(".zip"):
zipfile.ZipFile(file=file_path , mode="r").extracall(download_dir)
elif file_path.endswith(".tar.gz" , ".tgz"):
tarfile.open(name=file_path , mode='r:gz').extractall(download_dir)
|
[
"[email protected]"
] | |
2cd1a1a76fe6766a6854de9064bedf52a1da8564
|
a2f9d55d686425c4b47ce150aa1a23ea933055cc
|
/crossposting/spawnprocess.py
|
0fa69d71efbd3ebead59242be16e3f573bf5535b
|
[] |
no_license
|
wd5/blombum
|
b31c581f2c36c220164901189be1ba95a8341e0e
|
fe11efb369fe2cec67af1e79bc8935a266df2f80
|
refs/heads/master
| 2020-12-25T02:23:30.297939 | 2010-06-29T10:03:31 | 2010-06-29T10:03:31 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 331 |
py
|
#!/usr/bin/python
import subprocess
subprocess.Popen([
'/home/nide/code/kanobu/src/manage.py', 'rebuildindex', '--site_id', '4', '--parse', 'none'
])
subprocess.Popen([
'node', '/home/nide/code/blombum/crossposting/test.js'
], stdin = subprocess.PIPE).communicate('[{somevar: 1}, {somevar: 44}, {somevar: 22}]')
print 'kuku'
|
[
"[email protected]"
] | |
51da8e312770d0a2581c84ac2ef664dca607d04f
|
3d6bb3df9ca1d0de6f749b927531de0790aa2e1d
|
/full_segmentation_histogram_creator.py
|
97bc397018dc6ce79e45c96098caf6d100fa396d
|
[] |
no_license
|
standardgalactic/kuhner-python
|
da1d66a6d638a9a379ba6bae2affdf151f8c27c5
|
30b73554cc8bc9d532c8108b34dd1a056596fec7
|
refs/heads/master
| 2023-07-07T04:18:30.634268 | 2020-04-06T04:37:48 | 2020-04-06T04:37:48 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,715 |
py
|
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 1 12:43:28 2016
@author: lpsmith
"""
from __future__ import division
from os import walk
import lucianSNPLibrary as lsl
nsamples_min = 10 #Arbitrary value: minimum number of samples we require
data10_12 = []
data13_20 = []
data21_50 = []
data51_500 = []
data501_5000 = []
data5001_50000 = []
data50001_plus = []
dataall =[]
#fullseg_filenames = ["three_formal_cy_omni_mix3_b37RB.txt"]
fullseg_filenames = []
for (_, _, f) in walk("full_segmentation_output/"):
fullseg_filenames += f
break
discrepancies = open("full_segmentation_histograms/discrepancies.txt", "w")
for file in fullseg_filenames:
handle = open("full_segmentation_output/" + file, "r")
for line in handle:
(chr, start, end, pmean, pnmarkers, nmarkers, meanlog2r) = line.rstrip().split("\t")
if (chr=="chr"):
continue
if (pnmarkers != "?"):
pnmarkers = int(pnmarkers)
nmarkers = int(nmarkers)
if (pnmarkers != nmarkers):
print "Anomaly in", file, ": different nmarkers from partek vs. raw SNP data:"
print " ", line
line = file + "\t" + line
discrepancies.write(line)
if (nmarkers < nsamples_min):
continue
meanlog2r = float(meanlog2r)
dataall.append(meanlog2r)
if (nmarkers < 13):
data10_12.append(meanlog2r)
elif (nmarkers < 21):
data13_20.append(meanlog2r)
elif (nmarkers < 51):
data21_50.append(meanlog2r)
elif (nmarkers < 501):
data51_500.append(meanlog2r)
elif (nmarkers < 5001):
data501_5000.append(meanlog2r)
elif (nmarkers < 50001):
data5001_50000.append(meanlog2r)
elif (nmarkers < 500001):
data50001_plus.append(meanlog2r)
binwidth = 0.001
lsl.createPrintAndSaveHistogram(data10_12, "full_segmentation_histograms/data10_12.txt", binwidth)
lsl.createPrintAndSaveHistogram(data13_20, "full_segmentation_histograms/data13_20.txt", binwidth)
lsl.createPrintAndSaveHistogram(data21_50, "full_segmentation_histograms/data21_50.txt", binwidth)
lsl.createPrintAndSaveHistogram(data51_500, "full_segmentation_histograms/data51_500.txt", binwidth)
lsl.createPrintAndSaveHistogram(data501_5000, "full_segmentation_histograms/data501_5000.txt", binwidth)
lsl.createPrintAndSaveHistogram(data5001_50000, "full_segmentation_histograms/data5001_50000.txt", binwidth)
lsl.createPrintAndSaveHistogram(data50001_plus, "full_segmentation_histograms/data50001_plus.txt", binwidth)
lsl.createPrintAndSaveHistogram(dataall, "full_segmentation_histograms/dataall.txt", binwidth)
|
[
"[email protected]"
] | |
a7cb86c2e4cbd4332442225c33eccf63b66b7f00
|
de4e1332950d37707620c54a9613258c1dd9489c
|
/dongyeop/4주차/주식가격.py
|
66a1fd2587ee46a60471b010445f940fe0c01ebf
|
[] |
no_license
|
PnuLikeLion9th/Summer_algorithm
|
8fe74066b9673fb891b7205f75f808a04c7fe750
|
dcfcb6325854b3b4c529451d5c6b162298b53bc1
|
refs/heads/master
| 2023-07-10T13:57:05.511432 | 2021-08-15T07:50:00 | 2021-08-15T07:50:00 | 378,679,514 | 3 | 10 | null | 2021-08-15T07:50:01 | 2021-06-20T15:32:18 |
Python
|
UTF-8
|
Python
| false | false | 943 |
py
|
# 브루트포스
# def solution(prices):
# answer=[0]*len(prices)
# for i in range(len(prices)):
# for j in range(i+1,len(prices)):
# if prices[i] <=prices[j]:
# answer[i]+=1
# else:
# answer[i]+=1
# break
# return answer
def solution(prices):#스택
length = len(prices)
answer=[0]*length
stack = list()
for i,price in enumerate(prices):#가격들의 인덱스 값과 가격
while stack and price<prices[stack[-1]]:#스택이 존재하고 현재값이 더 작으면
index=stack.pop()#스택에서 빼주고
answer[index]=i-index#현재 인덱스와 스택에 담겼던 녀석의 인덱스를 빼면 시간임
stack.append(i)
while stack:#반복문이 다돌고 아직 남아있는 스택을 비워준다.
index=stack.pop()
answer[index] = length-index-1
return answer
|
[
"[email protected]"
] | |
a9959f969e1eb4d2abb88b4c50c283f909536ea4
|
3d19e1a316de4d6d96471c64332fff7acfaf1308
|
/Users/H/hanse/vol_essen.py
|
b2d4485b5664a460fc906ebcf35661445fb64799
|
[] |
no_license
|
BerilBBJ/scraperwiki-scraper-vault
|
4e98837ac3b1cc3a3edb01b8954ed00f341c8fcc
|
65ea6a943cc348a9caf3782b900b36446f7e137d
|
refs/heads/master
| 2021-12-02T23:55:58.481210 | 2013-09-30T17:02:59 | 2013-09-30T17:02:59 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,428 |
py
|
import scraperwiki
import lxml.html
pageCounter = 1
while True:
page = scraperwiki.scrape("http://essen.vol.at/welcome.asp?page=%d" % (pageCounter))
root = lxml.html.fromstring(page)
for entry in root.cssselect('div[class="Entry"]'):
data={
"Name":entry.cssselect('div[class="CompanyName"]')[0].text_content(),
"Street": entry.cssselect('div[class="CompanyStreet"]')[0].text_content(),
"City" : entry.cssselect('div[class="CompanyPlace"]')[0].text_content()
}
scraperwiki.sqlite.save(unique_keys=["Name"], data=data)
if root.cssselect('a[class="Next"]'):
pageCounter=pageCounter+1
else:
break import scraperwiki
import lxml.html
pageCounter = 1
while True:
page = scraperwiki.scrape("http://essen.vol.at/welcome.asp?page=%d" % (pageCounter))
root = lxml.html.fromstring(page)
for entry in root.cssselect('div[class="Entry"]'):
data={
"Name":entry.cssselect('div[class="CompanyName"]')[0].text_content(),
"Street": entry.cssselect('div[class="CompanyStreet"]')[0].text_content(),
"City" : entry.cssselect('div[class="CompanyPlace"]')[0].text_content()
}
scraperwiki.sqlite.save(unique_keys=["Name"], data=data)
if root.cssselect('a[class="Next"]'):
pageCounter=pageCounter+1
else:
break
|
[
"[email protected]"
] | |
bda191301750ca690fb5cac1d9f9abe3f859c48c
|
b773ca4e5f4a8642149316d3aded4c8b1e6037d2
|
/sprint-challenge/aq_dashboard.py
|
e05d226a6c975acfb3676de3141310ccde108ea6
|
[
"MIT"
] |
permissive
|
echiyembekeza/DS-Unit-3-Sprint-3-Productization-and-Cloud
|
c2157e9078ec49b1f59d28220146a197dda3b25c
|
64958ae8e9d2310d6c72606109a6ccf456bc5949
|
refs/heads/master
| 2020-08-04T18:39:27.405320 | 2019-12-11T03:11:28 | 2019-12-11T03:11:28 | 212,239,896 | 0 | 0 |
MIT
| 2019-10-02T02:27:48 | 2019-10-02T02:27:48 | null |
UTF-8
|
Python
| false | false | 1,662 |
py
|
"""OpenAQ Air Quality Dashboard with Flask."""
from flask import Flask, request
from flask_sqlalchemy import SQLAlchemy
from decouple import config
from os import getenv
import openaq
APP = Flask(__name__)
APP.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///db.sqlite3'
DB = SQLAlchemy(APP)
API = openaq.OpenAQ()
mment = API.measurements(city='Los Angeles', parameter='pm25')
body = mment[1]
def LAquery(k):
LAresults = body['results']
values = []
for k in LAresults:
kvalue = k.get('value')
kdate = k.get('date')
kutc = kdate.get('utc')
values.append((kvalue, kutc))
return values
class Record(DB.Model):
id = DB.Column(DB.Integer, primary_key=True)
datetime = DB.Column(DB.String(25))
value = DB.Column(DB.Float, nullable=False)
def __repr__(self):
return f"<id={self.id}, datetime={self.datetime}, value={self.value}>"
@APP.route('/')
def root():
"""Base view."""
records = Record.query.filter(Record.value>=10).all()
res=''
for rec in records:
res += 'datetime = '+ rec.datetime
res += ", "
res += 'value = '+ str(rec.value)
res += '</br>'
return res
@APP.route('/refresh')
def refresh():
"""Pull fresh data from Open AQ and replace existing data."""
DB.drop_all()
DB.create_all()
API_items = body['results']
for i in API_items:
ivalue = i.get('value')
idate = i.get('date')
iutc = idate.get('utc')
db_item = (Record(datetime=iutc, value=ivalue))
DB.session.add(db_item)
DB.session.commit()
return 'Data refreshed!'
if __name__ == "__main__":
APP.run()
|
[
"[email protected]"
] | |
695d0d073402440740fc8500b7e5f345c02b68c8
|
55c250525bd7198ac905b1f2f86d16a44f73e03a
|
/Python/Projects/Learn/PyCharm/Introduction to Python/Condition expressions/Boolean operators/tests.py
|
470706706d0ff91198294d3ffe34a83d348960e6
|
[
"LicenseRef-scancode-other-permissive"
] |
permissive
|
NateWeiler/Resources
|
213d18ba86f7cc9d845741b8571b9e2c2c6be916
|
bd4a8a82a3e83a381c97d19e5df42cbababfc66c
|
refs/heads/master
| 2023-09-03T17:50:31.937137 | 2023-08-28T23:50:57 | 2023-08-28T23:50:57 | 267,368,545 | 2 | 1 | null | 2022-09-08T15:20:18 | 2020-05-27T16:18:17 | null |
UTF-8
|
Python
| false | false | 128 |
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:539e49395fbd7e0e4b30294456efe7d922a26823d5ac4c0eea6d348bb18cfba3
size 413
|
[
"[email protected]"
] | |
318a74534f2ec00ecb9d3d2d90042ac5ad963a45
|
56a7dd75f2a3f45d599ca89aaa9ca45390fbd546
|
/ejercicios_preparcialito/parcialito_2/diccionarios/ejercicio_62.py
|
28412a76c350229698e62a66e240a63a6c3ce189
|
[] |
no_license
|
facundoPri/algoritmo-programacion-i-essaya
|
e030d74de832b7642ff84a77212f8ea429d560d8
|
5ff7a8fc66f6683d47bc9faf80a35f9902b1e1a3
|
refs/heads/master
| 2023-04-07T01:04:10.221473 | 2021-04-10T13:05:59 | 2021-04-10T13:05:59 | 299,450,415 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 563 |
py
|
"""
Escribir una función que reciba una cadena y devuelva un diccionario cuyas claves sean las letras y cuyos valores sean la cantidad de apariciones de dicha letra. Por ejemplo, si recibe 'catamarca' debe devolver: {'c':2, 'a':4, 't':1, 'r':1, 'm':1}.
"""
def contar_caracteres(cadena):
"""
Recibe una cadena
Devuelve un diccionarion con la cantidad de veces que aparece cada caracter
"""
contador = {}
for letra in cadena:
contador[letra] = contador.get(letra, 0) + 1
return contador
print(contar_caracteres("facundo"))
|
[
"[email protected]"
] | |
ec8498ae54869540f229014677d6853284fde9fc
|
d4c67b2a12e990d4193e7ab06f04824a348067bf
|
/rl_trainer/ddpg_impl/flower/actor_critic/tf_ddpg_agent.py
|
68c2dcbba15a6708b6789a492d9ba35ba24c020f
|
[
"BSD-3-Clause"
] |
permissive
|
Roboy/nips-2018-ai-for-prosthetics
|
2d57688ce85126379793e8643cbf0030c8f56beb
|
acb69f267a0cc852842828edbbfb47d1840c0a17
|
refs/heads/master
| 2020-03-26T05:39:25.565440 | 2018-11-01T23:28:08 | 2018-11-01T23:28:08 | 144,567,613 | 3 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,073 |
py
|
import numpy as np
from typing import Callable, Collection
import tensorflow as tf
from gym.spaces import Box
from overrides import overrides
from typeguard import typechecked
from rl_trainer.agent import GymAgent
from rl_trainer.agent.replay_buffer import ReplayBuffer, InMemoryReplayBuffer
from rl_trainer.commons import Episode, ExperienceTupleBatch
from rl_trainer.ddpg_impl.flower.actor_critic.tf_model_saver import TFModelSaver
from .action_noise import OrnsteinUhlenbeckActionNoise
from .q_network import OnlineQNetwork
from .policy_network import OnlinePolicyNetwork
class TensorFlowDDPGAgent(GymAgent):
def __init__(self, state_dim: int, action_space: Box, sess: tf.Session = None,
gamma: float = 0.99, replay_buffer: ReplayBuffer = None,
actor_noise: Callable = None, tau: float = 0.001,
critic_nn: OnlineQNetwork = None, actor_nn: OnlinePolicyNetwork = None,
tf_model_saver: TFModelSaver = None):
action_dim = action_space.shape[0]
self._gamma = gamma
self._sess = sess if sess else tf.Session()
self._Q = critic_nn if critic_nn else OnlineQNetwork(
sess=self._sess, state_dim=state_dim, action_dim=action_dim)
self._Qʹ = self._Q.create_target_network(tau=tau)
self._μ = actor_nn if actor_nn else OnlinePolicyNetwork(
action_bound=action_space.high, sess=self._sess,
state_dim=state_dim, action_dim=action_dim, action_space=action_space)
self._μʹ = self._μ.create_target_network(tau=tau)
with self._sess.graph.as_default():
self._model_saver = tf_model_saver if tf_model_saver else TFModelSaver()
self._sess.run(tf.global_variables_initializer())
self._actor_noise = actor_noise if actor_noise else OrnsteinUhlenbeckActionNoise(
mu=np.zeros(action_dim))
self._replay_buffer = replay_buffer if replay_buffer else InMemoryReplayBuffer()
self.episode_max_q = 0
self._update_target_nets()
def _update_target_nets(self):
self._μʹ.update()
self._Qʹ.update()
@typechecked
@overrides
def act(self, current_state: Collection[float]):
if self._replay_buffer.has_sufficient_samples():
self._train()
s = np.array([current_state]) # pack single state into tf action batch
action = self._μ(s=s)
return action[0] + self._actor_noise() # unpack tf batch shape
def _train(self):
batch = self._replay_buffer.sample_batch()
self._train_critic(batch)
self._train_actor(batch)
self._update_target_nets()
@typechecked
def _train_critic(self, batch: ExperienceTupleBatch) -> None:
μʹ = self._μʹ
γ = self._gamma
s2 = np.array(batch.states_2)
dones = batch.states_2_are_terminal
Qs_s2 = self._Qʹ(s=s2, a=μʹ(s=s2))
yᵢ = [(r + (1-done)*γ*Q_s2) for r, done, Q_s2 in zip(batch.rewards, dones, Qs_s2)]
yᵢ = np.array(yᵢ).reshape((-1, 1))
s = np.array(batch.states_1)
a = np.array(batch.actions)
self._Q.train(s=s, a=a, y_i=yᵢ)
self._log_max_q(batch=batch)
@typechecked
def _train_actor(self, batch: ExperienceTupleBatch) -> None:
"""Update the actor policy using the sampled gradient"""
s = np.array(batch.states_1)
μ = self._μ
grads_a = self._Q.grads_a(s=s, a=μ(s))
assert len(grads_a) == 1
μ.train(s=s, grads_a=grads_a[0]) # unpack tf batch shape
@typechecked
def _log_max_q(self, batch: ExperienceTupleBatch):
s, a = batch.states_1, batch.actions
q_vals = self._Q(s=s, a=a)
self.episode_max_q = np.amax(q_vals)
@typechecked
@overrides
def observe_episode(self, episode: Episode):
self._replay_buffer.extend(episode.experience_tuples)
self._model_saver.step(self._sess)
@typechecked
@overrides
def set_seed(self, seed: int):
tf.set_random_seed(seed)
|
[
"[email protected]"
] | |
e1c04602eb11935c3019f76fedd8f5debbf6c2c4
|
b9c4c4e2ba9a54cf79169bb2c43e29b6994618f4
|
/source/webapp/models.py
|
5d3cd21c50b5fbf0b491d7e211c3065189b6e5ec
|
[] |
no_license
|
big-arturka/exam_9
|
37bf8be08e0fd922bf36b0663babd4611d1ffb04
|
3505e39d9e2110c2912fc7f474e6ec297a8df4dd
|
refs/heads/master
| 2022-12-31T14:06:53.576579 | 2020-10-24T13:24:24 | 2020-10-24T13:24:24 | 306,677,149 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,384 |
py
|
from django.contrib.auth import get_user_model
from django.contrib.auth.models import User
from django.db import models
class Photo(models.Model):
image = models.ImageField(upload_to='images', verbose_name='Фото')
signature = models.CharField(max_length=200, verbose_name='Подпись')
created_at = models.DateTimeField(auto_now_add=True, verbose_name='Дата создания')
author = models.ForeignKey(get_user_model(), max_length=50, verbose_name='Автор',
related_name='image_author', on_delete=models.CASCADE)
def fav_by(self, user):
favs = self.favorite_photo.filter(author=user)
return favs
def __str__(self):
return f'{self.signature}-{self.author}'
class Meta:
verbose_name = 'Изображение'
verbose_name_plural = 'Изображения'
class Favorites(models.Model):
photo = models.ForeignKey('webapp.Photo', related_name='favorite_photo', verbose_name='Фото', on_delete=models.CASCADE)
author = models.ForeignKey(get_user_model(), related_name='favorite_author',
verbose_name='Автор', on_delete=models.CASCADE)
def __str__(self):
return f'{self.photo}-{self.author}'
class Meta:
verbose_name = 'Избранное'
verbose_name_plural = 'Избранные'
|
[
"[email protected]"
] | |
d9688ce59735aea7ef8f1d52da614763b7f2d036
|
dbe1f4110921a08cb13e22ea325d503bd5627195
|
/chuhuo_2.7_clickhouse/bluedon/bdwafd/newscantools/plugins/SiteEngine5_xPagejumpScript.py
|
36b3f98ef2796868c8a3a3a6381ac72f04f32ea9
|
[] |
no_license
|
Hehouhua/waf_branches
|
92dc1b1cbecba20f24ef6c7372dde7caa43f9158
|
ca76f3a1ed8150b423474c9e37aee37841a5ee35
|
refs/heads/main
| 2023-01-07T11:33:31.667688 | 2020-11-03T06:58:33 | 2020-11-03T06:58:33 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,184 |
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from lib.common import *
def run_domain(http,ob):
list = []
try:
domain = ob['domain']
detail = u''
url = "%s://%s%s" % (ob['scheme'],ob['domain'],ob['base_path'])
expurl="%s%s"%(url,"admin/images/css.css")
url+="api.php?action=logout&forward=http://www.baidu.com"
r,c=requestUrl(http,expurl,ob['task_id'],ob['domain_id'])
if c.find("siteengine")>=0:
res, content = requestUrl(http,url,ob['task_id'],ob['domain_id'])
if res.has_key('location') and res['location'] == 'http://www.baidu.com':
request = getRequest(url)
response = getResponse(res)
list.append(getRecord(ob,ob['scheme']+"://"+ob['domain'],ob['level'],detail,request,response))
except Exception,e:
logging.getLogger().error("File:SITEENGINE5.xpagejumpscript.py, run_domain function :" + str(e) + ",task id:" + ob['task_id'] + ",domain id:" + ob['domain_id'])
write_scan_log(ob['task_id'],ob['domain_id'],"File:SITEENGINE5.xpagejumpscript.py, run_domain function :" + str(e))
#end try
return list
#end def
|
[
"[email protected]"
] | |
2e858c17d93645b79fec3bc950bfad4291ef27b3
|
4e96f383d4703ad8ee58869ed91a0c8432c8a051
|
/Cura/Cura/cura/Backups/BackupsManager.py
|
ba6fcab8d75e54207a7423215cf29cc707d74109
|
[
"LGPL-3.0-only",
"GPL-3.0-only"
] |
permissive
|
flight7788/3d-printing-with-moveo-1
|
b2dba26010c4fa31815bc1d2d0966161a8600081
|
7fcb9c6b5da9245d54ac917de8c2a7f5148e42b0
|
refs/heads/Feature_Marlin_with_AlanBoy
| 2022-08-30T18:36:44.785058 | 2020-05-30T07:52:58 | 2020-05-30T07:52:58 | 212,583,912 | 0 | 0 |
MIT
| 2020-05-16T07:39:47 | 2019-10-03T13:13:01 |
C
|
UTF-8
|
Python
| false | false | 3,039 |
py
|
# Copyright (c) 2018 Ultimaker B.V.
# Cura is released under the terms of the LGPLv3 or higher.
from typing import Dict, Optional, Tuple, TYPE_CHECKING
from UM.Logger import Logger
from cura.Backups.Backup import Backup
if TYPE_CHECKING:
from cura.CuraApplication import CuraApplication
## The BackupsManager is responsible for managing the creating and restoring of
# back-ups.
#
# Back-ups themselves are represented in a different class.
class BackupsManager:
def __init__(self, application: "CuraApplication") -> None:
self._application = application
## Get a back-up of the current configuration.
# \return A tuple containing a ZipFile (the actual back-up) and a dict
# containing some metadata (like version).
def createBackup(self) -> Tuple[Optional[bytes], Optional[Dict[str, str]]]:
self._disableAutoSave()
backup = Backup(self._application)
backup.makeFromCurrent()
self._enableAutoSave()
# We don't return a Backup here because we want plugins only to interact with our API and not full objects.
return backup.zip_file, backup.meta_data
## Restore a back-up from a given ZipFile.
# \param zip_file A bytes object containing the actual back-up.
# \param meta_data A dict containing some metadata that is needed to
# restore the back-up correctly.
def restoreBackup(self, zip_file: bytes, meta_data: Dict[str, str]) -> None:
if not meta_data.get("cura_release", None):
# If there is no "cura_release" specified in the meta data, we don't execute a backup restore.
Logger.log("w", "Tried to restore a backup without specifying a Cura version number.")
return
self._disableAutoSave()
backup = Backup(self._application, zip_file = zip_file, meta_data = meta_data)
restored = backup.restore()
if restored:
# At this point, Cura will need to restart for the changes to take effect.
# We don't want to store the data at this point as that would override the just-restored backup.
self._application.windowClosed(save_data = False)
## Here we try to disable the auto-save plug-in as it might interfere with
# restoring a back-up.
def _disableAutoSave(self) -> None:
auto_save = self._application.getAutoSave()
# The auto save is only not created if the application has not yet started.
if auto_save:
auto_save.setEnabled(False)
else:
Logger.log("e", "Unable to disable the autosave as application init has not been completed")
## Re-enable auto-save after we're done.
def _enableAutoSave(self) -> None:
auto_save = self._application.getAutoSave()
# The auto save is only not created if the application has not yet started.
if auto_save:
auto_save.setEnabled(True)
else:
Logger.log("e", "Unable to enable the autosave as application init has not been completed")
|
[
"[email protected]"
] | |
3c639d64247b4a49b28c974d5d915777ea97abc0
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/egHeSWSjHTgzMysBX_11.py
|
07299dceba5a669196df27a142df5458fa762af5
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 501 |
py
|
"""
Create a function that takes a number as an argument and returns half of it.
### Examples
half_a_fraction("1/2") ➞ "1/4"
half_a_fraction("6/8") ➞ "3/8"
half_a_fraction("3/8") ➞ "3/16"
### Notes
Always return the simplified fraction.
"""
def half_a_fraction(fract):
fraction = fract.split("/")
if int(fraction[0]) % 2 == 0:
return '{}/{}'.format(int(int(fraction[0])/2), int(fraction[1]))
return '{}/{}'.format(int(fraction[0]), int(fraction[1])*2)
|
[
"[email protected]"
] | |
22cca10d1314023d365cc0bdaae9d23ec9feeb56
|
ac4b9385b7ad2063ea51237fbd8d1b74baffd016
|
/.history/utils/ocr/handle_image_20210209170155.py
|
17a1ffee42abc92b121aff59d84aa5bebaf2bf31
|
[] |
no_license
|
preethanpa/ssoemprep
|
76297ef21b1d4893f1ac2f307f60ec72fc3e7c6f
|
ce37127845253c768d01aeae85e5d0d1ade64516
|
refs/heads/main
| 2023-03-09T00:15:55.130818 | 2021-02-20T06:54:58 | 2021-02-20T06:54:58 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,092 |
py
|
import os
import cv2
import re
import numpy as np
from PIL import Image
import pytesseract
from pytesseract import Output
from fpdf import FPDF
'''
IMAGE HANDLING METHODS
'''
# get grayscale image
def get_grayscale(image):
return cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# blur removal
def remove_blur(image):
return cv2.medianBlur(image,5)
# noise removal
def remove_noise(image):
return cv2.fastNlMeansDenoisingColored(image, None, 10, 10, 7, 15)
#thresholding
def thresholding(image):
return cv2.threshold(image, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]
#dilation
def dilate(image):
kernel = np.ones((5,5),np.uint8)
return cv2.dilate(image, kernel, iterations = 1)
#erosion
def erode(image):
kernel = np.ones((5,5),np.uint8)
return cv2.erode(image, kernel, iterations = 1)
def extract_pdf_from_image(fileName='', pdf_path='', action='', psm=3):
'''
Extract text from image and save as PDF.
fileName=''
pdf_path='',
action='',
psm=3
'''
print(f'FileName is {fileName}')
#custom_config = r'-c tessedit_char_whitelist=123456789MALEPQRETHANabcdefghijklmnopqrstuvwxyz --psm 6'
#custom_config = r'-l eng --psm 11'
custom_config = r'-l eng --psm ' + str(psm)
pdfdir = pdf_path
if not os.path.exists(pdfdir):
os.makedirs(pdfdir)
# pdfFileName = os.path.basename(fileName).split('.')[0] + '.pdf'
pdfFileName = os.path.basename(fileName).split('.')[0]+ '.pdf'
pdfFilePath = pdfdir + '/' + pdfFileName
print(f'PDF File Path {pdfFilePath}')
#d = pytesseract.image_to_data(img, output_type=Output.DICT)
img = cv2.imread(fileName)
img1 = None
if (action == 1):
img1 = remove_noise(img)
if (action == 2):
img1 = get_grayscale(img)
#img1 = erode(img)
if (action == 3):
img1 = remove_blur(img)
#text = pytesseract.image_to_string(img1, config=custom_config,lang='eng')
text = pytesseract.image_to_pdf_or_hocr(img1, extension='pdf')
with open(pdfFilePath, mode = 'w+b') as f:
f.write(text)
return pdfFilePath
def convert_text_to_pdf(text='', pdf_path='', filename=''):
'''
Convert text file to PDF
text=''
pdf_path=''
filename=''
'''
tempdir = "/tmp"
pdfdir = pdf_path
textFileName = tempdir + '/' + filename + ".txt"
pdfFileName = pdfdir + '/' + filename + ".pdf"
if not os.path.exists(tempdir):
os.makedirs(tempdir)
if not os.path.exists(pdfdir):(
os.makedirs(pdfdir)
# save FPDF() class into a
# variable pdf
pdf = FPDF()
# Add a page
pdf.add_page()
# set style and size of font
# that you want in the pdf
pdf.set_font("Arial", size = 15)
with open(textFileName, mode = 'w+b') as f:
f.write(text)
line = 1
f = open(textFileName, "r")
for x in f:
x1 = re.sub(u"(\u2014|\u2018|\u2019|\u201c|\u201d)", "", x)
pdf.cell(100, 10, txt=x1, ln=line, align='L')
line=line+1
#save the pdf with name .pdf
pdf.output(pdfFileName,'F')
def mark_region(image_path):
im = cv2.imread(image_path)
gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray, (9,9), 0)
thresh = cv2.adaptiveThreshold(blur,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV,11,30)
# Dilate to combine adjacent text contours
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (9,9))
dilate = cv2.dilate(thresh, kernel, iterations=4)
# Find contours, highlight text areas, and extract ROIs
cnts = cv2.findContours(dilate, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
line_items_coordinates = []
for c in cnts:
area = cv2.contourArea(c)
x,y,w,h = cv2.boundingRect(c)
if y >= 600 and x <= 1000:
if area > 10000:
image = cv2.rectangle(im, (x,y), (2200, y+h), color=(255,0,255), thickness=3)
line_items_coordinates.append([(x,y), (2200, y+h)])
if y >= 2400 and x<= 2000:
image = cv2.rectangle(im, (x,y), (2200, y+h), color=(255,0,255), thickness=3)
line_items_coordinates.append([(x,y), (2200, y+h)])
return image, line_items_coordinates)
|
[
"{[email protected]}"
] | |
b15ae00c90717a2a67c39cb9e72a1951ed5f1ae4
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_217/ch21_2019_08_26_19_58_29_478795.py
|
8e46bdfeb1e79e43246166f70246709b75ed0188
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 184 |
py
|
def valor_da_conta(valor):
valor = int(input('Qual valor da conta?:' ))
com10% = valor + valor*(10/100)
return com10%
print("Valor da conta com 10%: R${0}".format(com10%))
|
[
"[email protected]"
] | |
6c907dbb07bf1ef1ce4fdced214be391d28b2ca8
|
eda9187adfd53c03f55207ad05d09d2d118baa4f
|
/python3_base/python_class_method.py
|
4bb8dc5866ce970db1d955879a443c4426f31c41
|
[] |
no_license
|
HuiZhaozh/python_tutorials
|
168761c9d21ad127a604512d7c6c6b38b4faa3c7
|
bde4245741081656875bcba2e4e4fcb6b711a3d9
|
refs/heads/master
| 2023-07-07T20:36:20.137647 | 2020-04-24T07:18:25 | 2020-04-24T07:18:25 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,137 |
py
|
# -*- coding:utf-8 -*-
# /usr/bin/python
'''
@Author: Yan Errol
@Email:[email protected]
@Date: 2019-05-20 17:22
@File:class_method.py
@Describe:静态方法
'''
from math import sqrt
class Triangle(object):
def __init__(self, a, b, c):
self._a = a
self._b = b
self._c = c
@staticmethod
def is_valid(a, b, c):
return a + b > c and b + c > a and a + c > b
def perimeter(self):
return self._a + self._b + self._c
def area(self):
half = self.perimeter() / 2
return sqrt(half * (half - self._a) *
(half - self._b) * (half - self._c))
def main():
a, b, c = 3, 4, 5
# 静态方法和类方法都是通过给类发消息来调用的
if Triangle.is_valid(a, b, c):
t = Triangle(a, b, c)
print(t.perimeter())
# 也可以通过给类发消息来调用对象方法但是要传入接收消息的对象作为参数
# print(Triangle.perimeter(t))
print(t.area())
# print(Triangle.area(t))
else:
print('无法构成三角形.')
if __name__ == "__main__":
main()
|
[
"[email protected]"
] | |
b6db4caaa1b3f409974642244d2e45e58bea2b74
|
d94d22ab20a11ab6c473d8aba4038c97f75806c4
|
/python小栗子/t57.py
|
c34766c279355c2457734c45293ae01587fccbaf
|
[] |
no_license
|
songszw/python
|
a1d0419b995df13aee5997d24c09dccab91ac9e0
|
5135a3efcdcc2a37f7321ae19271c9315f48bcaf
|
refs/heads/master
| 2020-02-26T16:24:28.411919 | 2017-04-26T09:08:41 | 2017-04-26T09:08:41 | 71,195,225 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,392 |
py
|
print('|--- 欢迎进入宋氏通讯录 ---|')
print('|--- 1:查询联系人资料 ---|')
print('|--- 2:插入新的联系人 ---|')
print('|--- 3:删除已有联系人 ---|')
print('|--- 4:退出通讯录程序 ---|')
contacts = dict()
while 1:
num = int(input('please enter the number you want to do: '))
if num==1:
name = input('please enter the name you waht to check: ')
if name in contacts:
print(name+':'+contacts[name])
else:
print('sorry,the man who wasn\'t here')
if num==2:
name = input('please enter your name:')
if name in contacts:
print('sorry, the man is already in the contacts -->>',end=' ')
print(name+":"+contacts[name])
if input('do you want to change the name ?[YES/NO]:')=='YES':
contacts[name]=input('please enter the phone number:')
else:
contacts[name] =input('please enter the phone number:')
else:
contacts[name]=input('please enter the phone number:')
if num==3:
name = input('please enter the name who you want to delete:')
if name in contacts:
contacts.pop(name)
else:
print('sorry, the man who wasn\'t here')
if num==4:
break
print('|--- 感谢使用通讯录程序 ---|')
|
[
"[email protected]"
] | |
509b043958ecf41f0f38c5f2c9c22a9f3826f34b
|
074279d6b63c9cd25c1353624710ed1fb422b30f
|
/j2ee模式-前端控制器模式.py
|
53e5ab14544ddcb6c8ff3233c365a535f8179b88
|
[] |
no_license
|
qqizai/python36patterns
|
edd106f496a1aa7eda5d9070a6d82f142a808621
|
39052df13db9a54cb8322d87edbc2dbe6ff06a07
|
refs/heads/master
| 2022-11-12T14:01:32.341802 | 2020-06-29T02:23:46 | 2020-06-29T02:23:46 | 281,970,231 | 0 | 1 | null | 2020-07-23T14:13:31 | 2020-07-23T14:13:30 | null |
UTF-8
|
Python
| false | false | 2,562 |
py
|
# -*- coding: utf-8 -*-
# @Author : ydf
# @Time : 2019/10/9 0009 15:32
"""
前端控制器模式(Front Controller Pattern)是用来提供一个集中的请求处理机制,所有的请求都将由一个单一的处理程序处理。该处理程序可以做认证/授权/记录日志,或者跟踪请求,然后把请求传给相应的处理程序。以下是这种设计模式的实体。
前端控制器(Front Controller) - 处理应用程序所有类型请求的单个处理程序,应用程序可以是基于 web 的应用程序,也可以是基于桌面的应用程序。
调度器(Dispatcher) - 前端控制器可能使用一个调度器对象来调度请求到相应的具体处理程序。
视图(View) - 视图是为请求而创建的对象。
从java转化来,命名规范懒得改了。
"""
from abc import ABCMeta, abstractmethod
from monkey_print2 import print
class HomeView:
def show(self):
print('显示 Home 页面')
class StudentView:
def show(self):
print('显示 Student 页面')
class Dispatcher:
def __init__(self):
self.student_view = StudentView()
self.home_view = HomeView()
def dispatch(self, request: str):
if request.upper() == 'STUDENT':
self.student_view.show()
else:
self.home_view.show()
class FrontController:
def __init__(self):
self.__dispatcher = Dispatcher()
def is_authentic_user(self):
print("用户鉴权成功")
return True
def track_request(self, request):
print("被请求页面: " + request)
def dispatch_request(self, request):
self.track_request(request)
if self.is_authentic_user():
self.__dispatcher.dispatch(request)
if __name__ == '__main__':
front_controller = FrontController()
front_controller.dispatch_request("HOME")
front_controller.dispatch_request("STUDENT")
"""
"D:/coding2/python36patterns/j2ee模式-前端控制器模式.py:49" 16:54:03 被请求页面: HOME
"D:/coding2/python36patterns/j2ee模式-前端控制器模式.py:45" 16:54:03 用户鉴权成功
"D:/coding2/python36patterns/j2ee模式-前端控制器模式.py:20" 16:54:03 显示 Home 页面
"D:/coding2/python36patterns/j2ee模式-前端控制器模式.py:49" 16:54:03 被请求页面: STUDENT
"D:/coding2/python36patterns/j2ee模式-前端控制器模式.py:45" 16:54:03 用户鉴权成功
"D:/coding2/python36patterns/j2ee模式-前端控制器模式.py:25" 16:54:03 显示 Student 页面
"""
|
[
"[email protected]"
] | |
d81ca3d2f986e4c7af9c64432aef10385266e46b
|
8cc30a27835e205a3476783106ca1605a6a85c48
|
/amy/workshops/migrations/0066_merge.py
|
ef0455252831a8b0cfaf3e51343f4267be07ade1
|
[
"MIT"
] |
permissive
|
gaybro8777/amy
|
d968edc78bbd3f63f3353450334721628dbbc0f4
|
3cf99aed58a0f0acf83d2645a30d8408208ccea9
|
refs/heads/develop
| 2023-03-07T22:08:28.692700 | 2021-02-23T18:06:06 | 2021-02-23T18:06:06 | 341,930,505 | 0 | 0 |
MIT
| 2021-02-24T17:22:08 | 2021-02-24T14:40:43 | null |
UTF-8
|
Python
| false | false | 304 |
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('workshops', '0064_dc_instructor_badge'),
('workshops', '0065_multiple_memberships'),
]
operations = [
]
|
[
"[email protected]"
] | |
f9023a1da5efba1124204d1d8a76778d9c15c29d
|
d18f74c0683fa412833fc7b68f737226dcf0f5df
|
/setup.py
|
70e68c224d914b125b04f0aa01c8f602ff39fa0f
|
[] |
no_license
|
phymhan/gomoku
|
ab22b19c2f59ea63aba3015f2b3ce53bf1b440e5
|
e48e215fe24236ccccfa5edb0709a22bed4624b9
|
refs/heads/master
| 2021-08-28T23:06:50.620937 | 2017-12-13T07:49:45 | 2017-12-13T07:49:45 | 114,087,358 | 5 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,361 |
py
|
import cx_Freeze
executables = [cx_Freeze.Executable("fivechessonline21.py")]
cx_Freeze.setup(
name = "Five-Chess",
options = {"build_exe": {"packages": ["pygame"],
"include_files": ["./sources/pics/board.png",
"./sources/pics/cp_k_29.png",
"./sources/pics/cp_w_29.png",
"./sources/pics/panel.png",
"./sources/pics/catsmall.png",
"./sources/music/BackgroundMusic.ogg",
"./sources/music/Snd_click.ogg"]}},
executables = executables
)
##cx_Freeze.setup(
## name = "Five-Chess",
## options = {"build_exe": {"packages": ["pygame"],
## "include_files": ["board.png",
## "cp_k_29.png",
## "cp_w_29.png",
## "panel.png",
## "catsmall.png",
## "BackgroundMusic.ogg",
## "Snd_click.ogg"]}},
## executables = executables
## )
|
[
"[email protected]"
] | |
54fdb90defd17f79a01648b7ef2c33d62cb46d3b
|
c4b8e1e09dedbccd37ca008ecaaca4438610bbaf
|
/cpmpy/sliding_sum.py
|
01b48796c45a4a65f16a0e45cf1d93b7cf1cdcf1
|
[
"MIT"
] |
permissive
|
hakank/hakank
|
4806598b98cb36dd51b24b0ab688f52dadfe9626
|
c337aaf8187f15dcdc4d5b09cd2ed0dbdb2e72c2
|
refs/heads/master
| 2023-08-15T00:21:52.750270 | 2023-07-27T16:21:40 | 2023-07-27T16:21:40 | 11,933,517 | 336 | 97 |
MIT
| 2023-07-27T11:19:42 | 2013-08-06T20:12:10 |
JavaScript
|
UTF-8
|
Python
| false | false | 1,355 |
py
|
"""
Sliding sum constraint in cpmpy.
From Global Constraint Catalogue
http://www.emn.fr/x-info/sdemasse/gccat/Csliding_sum.html
'''
sliding_sum(LOW,UP,SEQ,VARIABLES)
Purpose
Constrains all sequences of SEQ consecutive variables of the collection VARIABLES so that the
sum of the variables belongs to interval [LOW, UP].
Example
(
3, 7, 4,<1, 4, 2, 0, 0, 3, 4>
)
The example considers all sliding sequences of SEQ=4 consecutive values of <1, 4, 2, 0,0,3, 4>
collection and constraints the sum to be in [LOW,UP] = [3, 7]. The sliding_sum constraint holds
since the sum associated with the corresponding subsequences 1 4 2 0, 4 2 0 0, 2 0 0 3, and
0 0 3 4 are respectively 7, 6, 5 and 7.
'''
This cpmpy model was written by Hakan Kjellerstrand ([email protected])
See also my cpmpy page: http://hakank.org/cpmpy/
"""
from cpmpy import *
import cpmpy.solvers
import numpy as np
from cpmpy_hakank import *
def sliding_sum_test(n=7,seq=4,low=3,up=7):
x = intvar(0,4,shape=n,name="x")
# low = intvar(0,10,name="low")
# up = intvar(0,10,name="up")
model = Model(sliding_sum(low,up,seq,x))
ss = CPM_ortools(model)
ss.ort_solver.parameters.linearization_level = 0
ss.ort_solver.parameters.cp_model_probing_level = 0
num_solutions = ss.solveAll(display=x)
print("num_solutions:", num_solutions)
sliding_sum_test()
|
[
"[email protected]"
] | |
c73953e48af931827b1da62eb65e647668cfd10d
|
5e45f1d1d9f58aa1456777b0d75334d6efd43840
|
/challenges/hackerrank/algorithms/dynamic/max_subarray/python/max_subarray.py
|
7a4bd11931255b0775dd3b4438356b773e6b06e1
|
[] |
no_license
|
missingdays/nerdy.school
|
604953dc9b3c38a0f71793f066ce2707aa980dae
|
051673e0ebc54bc2f7e96a6477697d1d528dc45c
|
refs/heads/master
| 2021-01-17T08:10:19.558851 | 2016-06-06T15:29:01 | 2016-06-06T15:29:01 | 59,897,184 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,187 |
py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2015 missingdays <missingdays@missingdays>
#
# Distributed under terms of the MIT license.
"""
Maximum subarray problem solution
"""
def max_subarray(array):
curr_sum = 0
curr_index = 0
best_sum = 0
best_start_index = 0
best_ending_index = 0
for i in range(len(array)):
val = curr_sum + array[i]
if val > 0:
if curr_sum == 0:
curr_index = i
curr_sum = val
else:
curr_sum = 0
if curr_sum > best_sum:
best_sum = curr_sum
best_start_index = curr_index
best_ending_index = i
return array[best_start_index:best_ending_index+1]
def sum_positive(array):
s = 0
for elem in array:
if elem > 0:
s += elem
if s == 0:
mv = array[0]
for elem in array:
if elem > mv:
mv = elem
return mv
else:
return s
for i in range(int(input())):
n = input()
inp = list(map(int, input().split()))
print(sum(max_subarray(inp)), end=" ")
print(sum_positive(inp))
|
[
"[email protected]"
] | |
878bdb34e11bc1501de1b6b0dfd2018dfcf3aa4a
|
63191be7f688591af69263972d68423d76fb5f74
|
/geekshop/adminapp/views/categories.py
|
b42b65dd0c4181601279fe52418c7aef8c7ee7a5
|
[] |
no_license
|
IliaNiyazof/Django
|
5eee4c226a1f06178fdbb5626444fff406886de7
|
052cb4f3f142c4224454ebac9fb27f63de9cbc47
|
refs/heads/master
| 2021-07-19T05:52:56.620026 | 2020-06-05T16:17:47 | 2020-06-05T16:17:47 | 166,776,966 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,332 |
py
|
from django.contrib.auth.decorators import user_passes_test
from django.shortcuts import render, HttpResponseRedirect, reverse, get_object_or_404
from mainapp.models import ProductCategory
from adminapp.models.categories import ProductCategoryEditForm
@user_passes_test(lambda u: u.is_superuser)
def categories(request):
title = 'админка/категории'
categories_list = ProductCategory.objects.all()
content = {
'title': title,
'objects': categories_list
}
return render(request, 'adminapp/categories/read.html', content)
@user_passes_test(lambda u: u.is_superuser)
def category_create(request):
title = 'категории/создание'
if request.method == 'POST':
category_form = ProductCategoryEditForm(request.POST, request.FILES)
if category_form.is_valid():
category_form.save()
return HttpResponseRedirect(reverse('admin:categories'))
else:
category_form = ProductCategoryEditForm()
content = {'title': title, 'update_form': category_form}
return render(request, 'adminapp/categories/update.html', content)
@user_passes_test(lambda u: u.is_superuser)
def category_update(request, pk):
title = 'категории/редактирование'
edit_category = get_object_or_404(ProductCategory, pk=pk)
if request.method == 'POST':
edit_form = ProductCategoryEditForm(request.POST, request.FILES, instance=edit_category)
if edit_form.is_valid():
edit_form.save()
return HttpResponseRedirect(reverse('admin:category_update', args=[edit_category.pk]))
else:
edit_form = ProductCategoryEditForm(instance=edit_category)
content = {'title': title, 'update_form': edit_form}
return render(request, 'adminapp/categories/update.html', content)
@user_passes_test(lambda u: u.is_superuser)
def category_delete(request, pk):
title = 'категории/удаление'
category = get_object_or_404(ProductCategory, pk=pk)
if request.method == 'POST':
category.is_active = False
category.save()
return HttpResponseRedirect(reverse('admin:categories'))
content = {'title': title, 'category_to_delete': category}
return render(request, 'adminapp/categories/delete.html', content)
|
[
"[email protected]"
] | |
91feb4ba59077254f4d6a9ed644bd5d3663554bf
|
60bb3ef7dd8a147761918f1fa021918d6898202d
|
/.history/main_20210623141903.py
|
eee65d95a67254bb6a9d60f7a4da85315eba9d6c
|
[] |
no_license
|
sanjayMamidipaka/bbtautomationscripts
|
c1d29d9ea5c0fa982a53895b10db50b66e475c8f
|
12c35a3459cb0ead71ae616b2efad21c555cf8a0
|
refs/heads/master
| 2023-06-06T06:25:09.152797 | 2021-06-23T18:21:34 | 2021-06-23T18:21:34 | 362,836,875 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,370 |
py
|
import pandas as pd
import numpy as np
import csv
import xlsxwriter
from timeprofile import timeprofile
from masterdata import masterdata
from virtualReference import virtualReference
from keyfigures import keyfigures
from planninglevels import planninglevels
from attributesaskf import attributesaskf
from attributes import attributes
#Steps:
# create class
# add import
# change writer
#change this line to the desired output path
output_path = '/Users/sanjaymamidipaka/Downloads/Energizer_Latest_output1.xlsx'
writer = pd.ExcelWriter(output_path, engine='xlsxwriter')
paths = []
masterdatapath = str(input('Enter the masterdata path: '))
plevelspath = str(input('Enter the masterdata path: '))
keyfigurespath = str(input('Enter the masterdata path: '))
attributesaskfpath = str(input('Enter the masterdata path: '))
timeprofilepath = str(input('Enter the masterdata path: '))
paa = str(input('Enter the masterdata path: '))
energizerpaths = ['/Users/sanjaymamidipaka/Downloads/Energizer Production_ZSAPIBP1C_2021-05-04_21_01/ZSAPIBP1C_MASTERDATATYPES_2021-05-04_21_01.csv',
'/Users/sanjaymamidipaka/Downloads/Energizer Production_ZSAPIBP1C_2021-05-04_21_01/ZSAPIBP1C_PLEVELS_ATTRS_2021-05-04_21_01.csv',
'/Users/sanjaymamidipaka/Downloads/Energizer Production_ZSAPIBP1C_2021-05-04_21_01/ZSAPIBP1C_KEYFIGURES_2021-05-04_21_01.csv',
'/Users/sanjaymamidipaka/Downloads/Energizer Production_ZSAPIBP1C_2021-05-04_21_01/ZSAPIBP1C_ATTRIBUTES_AS_KEYFIGURE_2021-05-04_21_01.csv',
'/Users/sanjaymamidipaka/Downloads/Energizer Production_ZSAPIBP1C_2021-05-04_21_01/ZSAPIBP1C_TIMEPROFILE_2021-05-04_21_01.csv',
'/Users/sanjaymamidipaka/Downloads/Energizer Production_ZSAPIBP1C_2021-05-04_21_01/ZSAPIBP1C_PA_ATTRIBUTES_2021-05-04_21_01.csv']
natureswaypaths = ['/Users/sanjaymamidipaka/Downloads/natureswaydata/CFGSNA2_MASTERDATATYPES_2020-12-02_15_09.csv',
'/Users/sanjaymamidipaka/Downloads/natureswaydata/CFGSNA2_PLEVELS_ATTRS_2020-12-02_15_09.csv',
'/Users/sanjaymamidipaka/Downloads/natureswaydata/CFGSNA2_KEYFIGURES_2020-12-02_15_09.csv',
'/Users/sanjaymamidipaka/Downloads/natureswaydata/CFGSNA2_ATTRIBUTES_AS_KEYFIGURE_2020-12-02_15_09.csv',
'/Users/sanjaymamidipaka/Downloads/natureswaydata/CFGSNA2_TIMEPROFILE_2020-12-02_15_09.csv',
'/Users/sanjaymamidipaka/Downloads/natureswaydata/CFGSNA2_PA_ATTRIBUTES_2020-12-02_15_09.csv']
energizertestpaths = ['/Users/sanjaymamidipaka/Downloads/Energizer_2021-05-05_21_29_Test/ZSAPIBP1C_MASTERDATATYPES_2021-05-05_21_29.csv',
'/Users/sanjaymamidipaka/Downloads/Energizer_2021-05-05_21_29_Test/ZSAPIBP1C_PLEVELS_ATTRS_2021-05-05_21_29.csv',
'/Users/sanjaymamidipaka/Downloads/Energizer_2021-05-05_21_29_Test/ZSAPIBP1C_KEYFIGURES_2021-05-05_21_29.csv',
'/Users/sanjaymamidipaka/Downloads/Energizer_2021-05-05_21_29_Test/ZSAPIBP1C_ATTRIBUTES_AS_KEYFIGURE_2021-05-05_21_29.csv',
'/Users/sanjaymamidipaka/Downloads/Energizer_2021-05-05_21_29_Test/ZSAPIBP1C_TIMEPROFILE_2021-05-05_21_29.csv',
'/Users/sanjaymamidipaka/Downloads/Energizer_2021-05-05_21_29_Test/ZSAPIBP1C_PA_ATTRIBUTES_2021-05-05_21_29.csv']
energizerproductionspaths = ['/Users/sanjaymamidipaka/Downloads/Energizer_2021-05-05_21_32_Production/ZSAPIBP1C_MASTERDATATYPES_2021-05-05_21_32.csv',
'/Users/sanjaymamidipaka/Downloads/Energizer_2021-05-05_21_32_Production/ZSAPIBP1C_PLEVELS_ATTRS_2021-05-05_21_32.csv',
'/Users/sanjaymamidipaka/Downloads/Energizer_2021-05-05_21_32_Production/ZSAPIBP1C_KEYFIGURES_2021-05-05_21_32.csv',
'/Users/sanjaymamidipaka/Downloads/Energizer_2021-05-05_21_32_Production/ZSAPIBP1C_ATTRIBUTES_AS_KEYFIGURE_2021-05-05_21_32.csv',
'/Users/sanjaymamidipaka/Downloads/Energizer_2021-05-05_21_32_Production/ZSAPIBP1C_TIMEPROFILE_2021-05-05_21_32.csv',
'/Users/sanjaymamidipaka/Downloads/Energizer_2021-05-05_21_32_Production/ZSAPIBP1C_PA_ATTRIBUTES_2021-05-05_21_32.csv']
energizerlatestpaths = ['/Users/sanjaymamidipaka/Downloads/Energizer Latest/ZSAPIBP1C_MASTERDATATYPES_2021-05-05_23_58.csv',
'/Users/sanjaymamidipaka/Downloads/Energizer Latest/ZSAPIBP1C_PLEVELS_ATTRS_2021-05-05_23_58.csv',
'/Users/sanjaymamidipaka/Downloads/Energizer Latest/ZSAPIBP1C_KEYFIGURES_2021-05-05_23_58.csv',
'/Users/sanjaymamidipaka/Downloads/Energizer Latest/ZSAPIBP1C_ATTRIBUTES_AS_KEYFIGURE_2021-05-05_23_58.csv',
'/Users/sanjaymamidipaka/Downloads/Energizer Latest/ZSAPIBP1C_TIMEPROFILE_2021-05-05_23_58.csv',
'/Users/sanjaymamidipaka/Downloads/Energizer Latest/ZSAPIBP1C_PA_ATTRIBUTES_2021-05-05_23_58.csv']
timeprofile_instance = timeprofile(writer, energizerlatestpaths)
timeprofile_instance.run()
masterdata_instance = masterdata(writer, energizerlatestpaths)
masterdata_instance.run()
virtualReference_instance = virtualReference(writer, energizerlatestpaths)
virtualReference_instance.run()
attributes_instance = attributes(writer, energizerlatestpaths)
attributes_instance.run()
planninglevels_instance = planninglevels(writer, energizerlatestpaths)
planninglevels_instance.run()
keyfigures_instance = keyfigures(writer, energizerlatestpaths)
keyfigures_instance.run()
attributesaskf_instance = attributesaskf(writer, energizerlatestpaths)
attributesaskf_instance.run()
writer.book.close() #close the workbook
|
[
"[email protected]"
] | |
ad56100aae986b9d39225d2009c1864556132f8f
|
5a7a3447d434a458a7bb63f2aa11b64c284d5492
|
/test/image_load.py
|
834165562d2d100c68a6bd98d20ca2faaea7dd90
|
[] |
no_license
|
woshimayi/mypython
|
35792e12036a7a05f12d3ef7006637b2b03f0e2e
|
7f1eb38e8585bf6d2f21d3ad0f64dace61425875
|
refs/heads/master
| 2023-09-01T08:59:12.301836 | 2023-08-30T05:30:54 | 2023-08-30T05:30:54 | 130,017,052 | 4 | 0 | null | 2018-12-02T16:18:14 | 2018-04-18T06:50:36 |
HTML
|
UTF-8
|
Python
| false | false | 622 |
py
|
#coding=utf-8
import urllib
import re
def getHtml(url):
page = urllib.urlopen(url)
html = page.read()
return html
def getImg(html):
reg = r'src="(.+?\.jpg)" pic_ext'
imgre = re.compile(reg)
imglist = re.findall(imgre,html)
x = 0
for imgurl in imglist:
urllib.urlretrieve(imgurl,'%s.jpg' % x)
x+=1
html = getHtml("http://cn.bing.com/images/search?q=%E8%8B%B1%E5%9B%BD%E8%AE%AE%E4%BC%9A%E5%A4%A7%E5%8E%A6%E6%81%90%E6%80%96%E8%A2%AD%E5%87%BB&FORM=ISTRTH&id=F1E1C03F7EB1F290F78351F68318CB06438FD2B9&cat=%E4%BB%8A%E6%97%A5%E7%83%AD%E5%9B%BE&lpversion=")
print getImg(html)
|
[
"[email protected]"
] | |
ef9743d94d29deebd30fc55ae31439a2db8b093d
|
f87f51ec4d9353bc3836e22ac4a944951f9c45c0
|
/.history/HW06_20210715222321.py
|
fcf2188b6928a2756355ea80e53ded7f525f6620
|
[] |
no_license
|
sanjayMamidipaka/cs1301
|
deaffee3847519eb85030d1bd82ae11e734bc1b7
|
9ddb66596497382d807673eba96853a17884d67b
|
refs/heads/main
| 2023-06-25T04:52:28.153535 | 2021-07-26T16:42:44 | 2021-07-26T16:42:44 | 389,703,530 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,763 |
py
|
"""
Georgia Institute of Technology - CS1301
HW06 - Text Files & CSV
Collaboration Statement:
"""
#########################################
"""
Function Name: findCuisine()
Parameters: filename (str), cuisine (str)
Returns: list of restaurants (list)
"""
#########################################
########## WRITE FUNCTION HERE ##########
#########################################
def findCuisine(filename, cuisine):
file = open(filename,'r')
content = file.readlines()
listOfRestaurants = []
for i in range(len(content)):
if content[i].strip() == cuisine:
listOfRestaurants.append(content[i-1].strip()) #add the name of the restaurant, which is the previous line
file.close()
return listOfRestaurants
"""
Function Name: restaurantFilter()
Parameters: filename (str)
Returns: dictionary that maps cuisine type (str)
to a list of restaurants of the same cuisine type (list)
"""
#########################################
########## WRITE FUNCTION HERE ##########
#########################################
def restaurantFilter(filename):
dict = {}
file = open(filename,'r')
content = file.readlines()
cuisines = []
for i in range(1,len(content),4):
line = content[i].strip()
if line not in cuisines:
cuisines.append(line)
for i in range(len(cuisines)):
dict[cuisines[i]] = []
for i in range(0,len(content),4):
line = content[i].strip()
lineBelow = content[i+1].strip()
dict[lineBelow].append(line)
return dict
"""
Function Name: createDirectory()
Parameters: filename (str), output filename (str)
Returns: None (NoneType)
"""
#########################################
########## WRITE FUNCTION HERE ##########
#########################################
def createDirectory(filename, outputFilename):
readFile = open(filename, 'r')
writeFile = open(outputFilename, 'w')
content = readFile.readlines()
fastfood = []
sitdown = []
fastfoodcounter = 1
sitdowncouter = 1
for i in range(2,len(content), 4):
restaurant = content[i-2].strip()
cuisine = content[i-1].strip()
group = content[i].strip()
if group == 'Fast Food':
fastfood.append(str(fastfoodcounter) + '. ' + restaurant + ' - ' + cuisine + '\n')
fastfoodcounter += 1
else:
sitdown.append(str(sitdowncouter) + '. ' + restaurant + ' - ' + cuisine)
sitdowncouter += 1
writeFile.write('Restaurant Directory' + '\n')
writeFile.write('Fast Food' + '\n')
writeFile.writelines(fastfood)
writeFile.write('Sit-down' + '\n')
for i in range(len(sitdown)):
if i != len(sitdown)-1:
writeFile.write(sitdown[i] + '\n')
else:
writeFile.write(sitdown[i])
"""
Function Name: extraHours()
Parameters: filename (str), hour (int)
Returns: list of (person, extra money) tuples (tuple)
"""
#########################################
########## WRITE FUNCTION HERE ##########
#########################################
def extraHours(filename, hour):
overtime = []
file = open(filename, 'r')
header = file.readline()
content = file.readlines()
for i in content:
line = i.strip().split(',')
name = line[0]
wage = int(line[2])
hoursWorked = int(line[4])
if hoursWorked > hour:
compensation = (hoursWorked - hour) * wage
overtime.append((name, compensation))
return overtime
"""
Function Name: seniorStaffAverage()
Parameters: filename (str), year (int)
Returns: average age of senior staff members (float)
"""
#########################################
########## WRITE FUNCTION HERE ##########
#########################################
def seniorStaffAverage(filename, year):
file = open(filename, 'r')
header = file.readline()
content = file.readlines()
for i in content:
line = i.strip().split(',')
age = int(line[1])
yearHired = line[3]
hoursWorked = int(line[4])
if hoursWorked > hour:
compensation = (hoursWorked - hour) * wage
overtime.append((name, compensation))
"""
Function Name: ageDict()
Parameters: filename (str), list of age ranges represented by strings (list)
Returns: dictionary (dict) that maps each age range (str) to a list of employees (list)
"""
#########################################
########## WRITE FUNCTION HERE ##########
#########################################
# print(findCuisine('restaurants.txt', 'Mexican'))
#print(restaurantFilter('restaurants.txt'))
#print(createDirectory('restaurants.txt','output.txt'))
# print(extraHours('employees.csv', 40))
|
[
"[email protected]"
] | |
1a17c0e753532ecf7f5f5d0b99fb308e5ec83ca9
|
bdcab42a9124d7a3878a904076170bd4bff7451f
|
/src/hessian/random_sample_points.py
|
44047bd5934ab4c7ec808b9b9c3a87972695717a
|
[] |
no_license
|
hwang595/data_augmentation_playground
|
aa30685213083bb271ae56996d8aff831ef975ab
|
5b11a5d5c2d9254b5ffa293eebf8e3e6269edd69
|
refs/heads/master
| 2021-01-25T09:14:30.059368 | 2020-03-01T21:33:06 | 2020-03-01T21:33:06 | 93,801,194 | 2 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,705 |
py
|
import numpy as np
from sklearn.decomposition import PCA
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
DIST_ = 20
def rand_point_generator(point_num=None):
'''
we want y \in [1.5, 2.5], x \in [-0.5 0.5] for datapoints with label 1
we want y \in [-0.5, 0.5], x \in [1.5 2.5] for datapoints with label -1
return:
point_num data points with label 1, point_num data points with label -1
'''
pos_data_points = []
neg_data_points = []
while len(pos_data_points) < point_num or len(neg_data_points) < point_num:
# first settings
x_pos_ = np.random.randint(low=-1000, high=-100) / float(1000) * DIST_
y_pos_ = np.random.randint(low=600, high=1400) / float(1000) * DIST_
x_neg_ = np.random.randint(low=500, high=1500) / float(1000) * DIST_
y_neg_ = np.random.randint(low=-1000, high=-200) / float(1000) * DIST_
# second settings shift very far
'''
x_pos_ = np.random.randint(low=-1000, high=-200) / float(1000)
y_pos_ = np.random.randint(low=50000, high=51000) / float(1000)
x_neg_ = np.random.randint(low=29000, high=31000) / float(1000)
y_neg_ = np.random.randint(low=-5000, high=-4000) / float(1000)
'''
if [x_pos_, y_pos_] not in pos_data_points:
pos_data_points.append([x_pos_, y_pos_, 1])
if [x_neg_, y_neg_] not in neg_data_points:
neg_data_points.append([x_neg_, y_neg_, -1])
return np.array(pos_data_points), np.array(neg_data_points)
def find_point_with_distance(center_point_0=None, center_point_1=None, distance=None):
# find normalized direction vector between center0 and center1
v_ = (center_point_1 - center_point_0) / float(np.linalg.norm(center_point_1 - center_point_0))
return center_point_0 + distance * v_
def rand_point_generator_high_dim(point_num=None, dim=None, dist=None):
'''
param: point_num: num of data points we want for both pos and neg dataset
param: dim: in what dimension the data points in
param: dist: how far away we want the two data points
'''
np.random.seed(seed=42)
POS_HIGH_ = -200
POS_LOW_ = -1200
NEG_HIGH_ = 1800
NEG_LOW_ = 400
sigma_ = 0.1
pos_data_points = []
neg_data_points = []
pos_labels = []
neg_labels = []
tmp_pos_ = np.zeros(dim)
tmp_neg_ = np.zeros(dim)
# we randomly generate two data points first, then based on them, we further generate more
# data points
for i in range(dim):
tmp_pos_[i] = np.random.randint(low=POS_LOW_, high=POS_HIGH_) / float(1000)
tmp_neg_[i] = np.random.randint(low=NEG_LOW_, high=NEG_HIGH_) / float(1000)
# we generate another center by one center and distance predefined
while len(pos_data_points) < point_num or len(neg_data_points) < point_num:
pos_data_point = np.zeros(dim)
neg_data_point = np.zeros(dim)
for i in range(dim):
pos_data_point[i] = np.random.randint(low=POS_LOW_, high=POS_HIGH_) / float(1000) * dist
neg_data_point[i] = np.random.randint(low=NEG_LOW_, high=NEG_HIGH_) / float(1000) * dist
pos_data_points.append(pos_data_point)
neg_data_points.append(neg_data_point)
pos_labels.append(1)
neg_labels.append(-1)
'''
pos = tmp_pos_
new_neg = find_point_with_distance(tmp_pos_, tmp_neg_, distance=dist)
while len(pos_data_points) < point_num or len(neg_data_points) < point_num:
pos_data_point = np.zeros(dim)
neg_data_point = np.zeros(dim)
for i in range(dim):
pos_data_point[i] = np.random.normal(pos[i], sigma_)
neg_data_point[i] = np.random.normal(new_neg[i], sigma_)
pos_data_points.append(pos_data_point)
neg_data_points.append(neg_data_point)
pos_labels.append(1)
neg_labels.append(-1)
'''
return np.array(pos_data_points), np.array(neg_data_points), np.array(pos_labels), np.array(neg_labels)
def get_transformation(angle=None):
'''
angles determined here is in anti-clockwise
'''
theta = np.radians(angle)
c, s = np.cos(theta), np.sin(theta)
R = np.matrix('{} {}; {} {}'.format(c, -s, s, c))
return np.array(R)
if __name__ == "__main__":
np.random.seed(seed=42)
X_pos, X_neg, y_pos, y_neg = rand_point_generator_high_dim(point_num=50, dim=6, dist=0.5)
X = np.concatenate((X_pos, X_neg), axis=0)
#plt.show()
'''
pca_pos = PCA(n_components=2)
pca_neg = PCA(n_components=2)
X_decomp_pos=pca_pos.fit_transform(X_pos)
X_decomp_neg=pca_neg.fit_transform(X_neg)
'''
pca = PCA(n_components=2)
X_decomp = pca.fit_transform(X)
# fig = plt.figure()
# ax = fig.add_subplot(111, projection='3d')
# ax.scatter(X_pos[:, 0], X_pos[:, 1], X_pos[:, 2], c='r', marker='^')
# ax.scatter(X_neg[:, 0], X_neg[:, 1], X_neg[:, 2], c='b', marker='s')
# plt.show()
#print(X_decomp_pos.shape)
#print(X_decomp_neg.shape)
plt.figure(2)
plt.hold(True)
for i in range(X_decomp.shape[0]):
if i < X_decomp.shape[0] / 2:
plt.plot(X_decomp[i, 0], X_decomp[i, 1], '^r')
else:
plt.plot(X_decomp[i, 0], X_decomp[i, 1], '^b')
#plt.plot(X_decomp_neg[:, 0], X_decomp_neg[:, 1], 'sb')
plt.show()
#print(np.linalg.norm(tmp_pos-new_neg))
#print(tmp_pos.shape)
#print(new_neg.shape)
'''
pos_data_points, neg_data_points=rand_point_generator(point_num=50)
dataset = np.concatenate((pos_data_points, neg_data_points), axis=0)
rotation_matrix = get_transformation(angle=60)
pos_transformed = np.dot(pos_data_points[:,0:2], rotation_matrix)
neg_transformed = np.dot(neg_data_points[:,0:2], rotation_matrix)
fig = plt.figure(1)
plt.scatter([x[0] for x in pos_data_points], [x[1] for x in pos_data_points], c='r')
plt.scatter([x[0] for x in neg_data_points], [x[1] for x in neg_data_points], c='b')
#fig_2 = plt.figure(2)
plt.scatter([x[0] for x in pos_transformed], [x[1] for x in pos_transformed], c='r', marker='^')
plt.scatter([x[0] for x in neg_transformed], [x[1] for x in neg_transformed], c='b', marker='^')
plt.show()
'''
|
[
"[email protected]"
] | |
54aa25a623bcd141ceb60503e4862c6560334415
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_340/ch79_2020_04_08_17_16_37_430613.py
|
3692e00bb61b220fb835ac8e529d71a5ac2851ad
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 133 |
py
|
def monta_dicionario(lista1, lista2):
dicionario={}
for i in range(len(lista1)):
dicionario[lista1[i]]=lista2[i]
|
[
"[email protected]"
] | |
ff93f81a89b9f25fa80f463b60f894e744aea0dd
|
69a36ca23409b994a31759bad58971b197cad236
|
/config/settings.py
|
3445021fab5cfbc2c6ca87cdbd98f719463686c2
|
[] |
no_license
|
matt700395/awesome_repo
|
56601cf817106df0e210e78c7bb1f11af1e60c3a
|
d49b1e55b6ade24f1f2058319ac6859b45e511bc
|
refs/heads/master
| 2023-08-27T16:45:15.570115 | 2021-11-11T14:16:06 | 2021-11-11T14:16:06 | 427,026,282 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,346 |
py
|
"""
Django settings for config project.
Generated by 'django-admin startproject' using Django 3.0.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = "cc)*5=(s+i2-&9x7&&&o+y7$g5!db3tvu85ykok#mwxf#6gir2"
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
DJANGO_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
]
PROJECT_APPS = [
"core.apps.CoreConfig",
"users.apps.UsersConfig",
"rooms.apps.RoomsConfig",
]
THIRD_PARTY_APPS = []
INSTALLED_APPS = DJANGO_APPS + PROJECT_APPS + THIRD_PARTY_APPS
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "config.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
WSGI_APPLICATION = "config.wsgi.application"
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": os.path.join(BASE_DIR, "db.sqlite3"),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
},
{"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator"},
{"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator"},
{"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator"},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = "/static/"
MEDIA_ROOT = os.path.join(BASE_DIR, "uploads")
MEDIA_URL = "/media/"
# Auth
AUTH_USER_MODEL = "users.User"
|
[
"[email protected]"
] | |
c474ddcdc642369145b11ba23644182f63331500
|
116a4a2fcd3e9c3d216f96103006c707daa6001a
|
/HelloDjango/apps/awards/migrations/0017_auto_20200726_0254.py
|
1dccd60f9a05a0237dcea616506c43eae765cb60
|
[] |
no_license
|
Eldar1988/a_white_birds
|
22d743ed1fa651062f070c0e81b7ac665be7a72a
|
0430d5322b3a55b6f55e9541675d6670f5d8a518
|
refs/heads/master
| 2022-12-18T20:23:26.293059 | 2020-09-15T04:27:59 | 2020-09-15T04:27:59 | 283,169,602 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,212 |
py
|
# Generated by Django 3.0.6 on 2020-07-25 20:54
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('awards', '0016_juryapproved_project'),
]
operations = [
migrations.RemoveField(
model_name='jury',
name='user',
),
migrations.DeleteModel(
name='Promocode',
),
migrations.AddField(
model_name='profile',
name='interview',
field=models.URLField(null=True, verbose_name='Ссылка на интервью (только для жюри)'),
),
migrations.AddField(
model_name='profile',
name='preview',
field=models.TextField(max_length=500, null=True, verbose_name='Краткая информация - один абзац (только для жюри)'),
),
migrations.AddField(
model_name='profile',
name='professional',
field=models.CharField(max_length=200, null=True, verbose_name='Профессия (только для жюри)'),
),
migrations.DeleteModel(
name='Jury',
),
]
|
[
"[email protected]"
] | |
ec6e3a87299b3f0b27c39ebb22357a57cd9e2f35
|
04afb34356de112445c3e5733fd2b773d92372ef
|
/Sem1/FP/S13/venv/Scripts/pip-script.py
|
ecfdd60747e705166efa7dda1830c8ac7fb753a9
|
[] |
no_license
|
AndreeaCimpean/Uni
|
a4e48e5e1dcecbc0c28ad45ddd3b0989ff7985c8
|
27df09339e4f8141be3c22ae93c4c063ffd2b172
|
refs/heads/master
| 2020-08-21T19:12:49.840044 | 2020-05-15T17:22:50 | 2020-05-15T17:22:50 | 216,222,647 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 405 |
py
|
#!D:\Facultate\UniRepo\Sem1\FP\S13\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip')()
)
|
[
"[email protected]"
] | |
07ee5ca8244bc40fdcfdffc0e184e8d66225d837
|
91d13f45f8527c368ebc6e44c75142a043f0583b
|
/test_zappa_cookiecutter/users/tests/test_drf_urls.py
|
5c5a28e94cb0566c442fdcd429e5dbf1a914a39c
|
[
"MIT"
] |
permissive
|
Andrew-Chen-Wang/cookiecutter-django-lambda
|
6beed03d82eeecf95281c7f03a279c9c8b2ca85c
|
c4c64e174f653205c399ffa683918141f2f058d7
|
refs/heads/master
| 2022-11-16T12:20:00.589856 | 2020-07-19T20:19:41 | 2020-07-19T20:19:41 | 280,943,511 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 676 |
py
|
import pytest
from django.urls import resolve, reverse
from test_zappa_cookiecutter.users.models import User
pytestmark = pytest.mark.django_db
def test_user_detail(user: User):
assert (
reverse("api:user-detail", kwargs={"username": user.username})
== f"/api/users/{user.username}/"
)
assert resolve(f"/api/users/{user.username}/").view_name == "api:user-detail"
def test_user_list():
assert reverse("api:user-list") == "/api/users/"
assert resolve("/api/users/").view_name == "api:user-list"
def test_user_me():
assert reverse("api:user-me") == "/api/users/me/"
assert resolve("/api/users/me/").view_name == "api:user-me"
|
[
"[email protected]"
] | |
221eabeb7855ab26b445ce0626620cf82ea4dd10
|
781e2692049e87a4256320c76e82a19be257a05d
|
/all_data/exercism_data/python/series/a713022194c640d79ae14ee2e504dd88.py
|
eb7a127a4563a635852c50f164844820a748ca91
|
[] |
no_license
|
itsolutionscorp/AutoStyle-Clustering
|
54bde86fe6dbad35b568b38cfcb14c5ffaab51b0
|
be0e2f635a7558f56c61bc0b36c6146b01d1e6e6
|
refs/heads/master
| 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null |
UTF-8
|
Python
| false | false | 221 |
py
|
def slices( N, size ):
if (size == 0 and len(N) > 0) or (len(N) < size):
raise ValueError('Bad input!')
return [ [ int(d) for d in N[s:s+size] ]
for s in range( len(N) - size + 1 ) ]
|
[
"[email protected]"
] | |
deca411428980e2f3479946e16bec2cf5d7bc3c3
|
516932b326f58f9dc7c008e379f80cafd820acc0
|
/src/helixtariff/test/logic/test_user_tariff.py
|
efde456f49fbfc657f1b000a05a4043a1fc4b16b
|
[] |
no_license
|
sand8080/helixtariff
|
ffa4021fac16876bbbad8a4a8f1c53a9e4fd71d7
|
0bb56ad9e954509961db6bf636bce3a541709b93
|
refs/heads/master
| 2020-12-24T14:57:01.276045 | 2012-07-12T14:59:56 | 2012-07-12T14:59:56 | 1,605,281 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,923 |
py
|
import unittest
from helixcore.error import RequestProcessingError
from helixtariff.test.logic.actor_logic_test import ActorLogicTestCase
class UserTariffTestCase(ActorLogicTestCase):
u_id = 22
def test_add_user_tariff(self):
t_id = self._add_tariff('tariff one', currency='RUB')
self._add_user_tariff(t_id, self.u_id)
def test_add_user_tariff_duplication(self):
name = 'tariff one'
t_id = self._add_tariff(name, currency='RUB')
self._add_user_tariff(t_id, self.u_id)
self.assertRaises(RequestProcessingError, self._add_user_tariff, t_id, self.u_id)
def test_add_wrong_tariff(self):
self.assertRaises(RequestProcessingError, self._add_user_tariff, 555, self.u_id)
def test_delete_user_tariff(self):
t_id = self._add_tariff('t', currency='RUB')
self._add_user_tariff(t_id, self.u_id)
user_tariffs = self._get_user_tariffs([self.u_id])
self.assertEquals([t_id], user_tariffs[0]['tariff_ids'])
sess = self.login_actor()
req = {'session_id': sess.session_id, 'user_id': self.u_id,
'tariff_ids': [t_id]}
resp = self.delete_user_tariffs(**req)
self.check_response_ok(resp)
user_tariffs = self._get_user_tariffs([self.u_id])
self.assertEquals(0, len(user_tariffs))
def test_get_user_tariffs(self):
self._add_tariff('t0', currency='RUB')
t_id_1 = self._add_tariff('t1', currency='RUB')
user_tariffs = self._get_user_tariffs([self.u_id])
self.assertEquals(0, len(user_tariffs))
self._add_user_tariff(t_id_1, self.u_id)
user_tariffs = self._get_user_tariffs([self.u_id])
self.assertEquals(1, len(user_tariffs))
self.assertEquals(self.u_id, user_tariffs[0]['user_id'])
self.assertEquals([t_id_1], user_tariffs[0]['tariff_ids'])
if __name__ == '__main__':
unittest.main()
|
[
"[email protected]"
] | |
0e512d5cc3c40a98f88773bb04257a5009284703
|
a8062308fb3bf6c8952257504a50c3e97d801294
|
/test/test_1680_concatenation_of_consecutive_binary_numbers.py
|
2e973fa95d840dec8ee1b362d393d6690776c76f
|
[] |
no_license
|
wan-catherine/Leetcode
|
650d697a873ad23c0b64d08ad525bf9fcdb62b1b
|
238995bd23c8a6c40c6035890e94baa2473d4bbc
|
refs/heads/master
| 2023-09-01T00:56:27.677230 | 2023-08-31T00:49:31 | 2023-08-31T00:49:31 | 143,770,000 | 5 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 460 |
py
|
from unittest import TestCase
from problems.N1680_Concatenation_Of_Consecutive_Binary_Numbers import Solution
class TestSolution(TestCase):
def test_concatenatedBinary(self):
self.assertEqual(1, Solution().concatenatedBinary(1))
def test_concatenatedBinary_1(self):
self.assertEqual(27, Solution().concatenatedBinary(3))
def test_concatenatedBinary_2(self):
self.assertEqual(505379714, Solution().concatenatedBinary(12))
|
[
"[email protected]"
] | |
20a59d30363f13db08a271bd7d4156a4795b5037
|
9fa71d5834dae1c8900b3444f564b11326374d36
|
/packages/ipm_cloud_postgresql/folha/rotinas_envio/tipo-afastamento.py
|
81f76c9ccfb467f9f87b432e8845eb17d8d9c18f
|
[] |
no_license
|
JoaoPauloLeal/toolbox
|
a85e726cfeb74603cb64d73c4af64757a9a60db7
|
924c063ba81395aeddc039a51f8365c02e527963
|
refs/heads/master
| 2023-06-07T02:17:42.069985 | 2021-06-28T19:06:40 | 2021-06-28T19:06:40 | 381,128,045 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,411 |
py
|
import packages.ipm_cloud_postgresql.model as model
import bth.interacao_cloud as interacao_cloud
import json
import logging
from datetime import datetime
tipo_registro = 'tipo-afastamento'
sistema = 300
limite_lote = 500
url = "https://pessoal.cloud.betha.com.br/service-layer/v1/api/tipo-afastamento"
def iniciar_processo_envio(params_exec, *args, **kwargs):
dados_assunto = coletar_dados(params_exec)
dados_enviar = pre_validar(params_exec, dados_assunto)
if not params_exec.get('somente_pre_validar'):
iniciar_envio(params_exec, dados_enviar, 'POST')
model.valida_lotes_enviados(params_exec, tipo_registro=tipo_registro)
def coletar_dados(params_exec):
print('- Iniciando a consulta dos dados a enviar.')
df = None
try:
query = model.get_consulta(params_exec, tipo_registro + '.sql')
pgcnn = model.PostgreSQLConnection()
df = pgcnn.exec_sql(query, index_col='id')
print(f'- Consulta finalizada. {len(df.index)} registro(s) encontrado(s).')
except Exception as error:
print(f'Erro ao executar função {tipo_registro}. {error}')
finally:
return df
def pre_validar(params_exec, dados):
print('- Iniciando pré-validação dos registros.')
dados_validados = []
registro_erros = []
try:
lista_dados = dados.to_dict('records')
for linha in lista_dados:
registro_valido = True
if registro_valido:
dados_validados.append(linha)
print(f'- Pré-validação finalizada. Registros validados com sucesso: '
f'{len(dados_validados)} | Registros com advertência: {len(registro_erros)}')
except Exception as error:
logging.error(f'Erro ao executar função "pre_validar". {error}')
finally:
return dados_validados
def iniciar_envio(params_exec, dados, metodo, *args, **kwargs):
print('- Iniciando envio dos dados.')
lista_dados_enviar = []
lista_controle_migracao = []
hoje = datetime.now().strftime("%Y-%m-%d")
token = params_exec['token']
contador = 0
for item in dados:
hash_chaves = model.gerar_hash_chaves(sistema, tipo_registro, item['id_entidade'], item['codigo'])
dict_dados = {
'idIntegracao': hash_chaves,
'conteudo': {
'descricao': None if 'descricao' not in item else item['descricao'],
'classificacao': None if 'classificacao' not in item else item['classificacao'],
'tipoMovimentacaoPessoal': None if 'tipomovimentacaopessoal' not in item else item['tipomovimentacaopessoal'],
'diasPrevistos': None if 'diasprevistos' not in item else item['diasprevistos'],
'perdeTempoServico': None if 'perdetemposervico' not in item else item['perdetemposervico'],
'consideraVencimento': None if 'consideravencimento' not in item else item['consideravencimento'],
'reduz13Salario': None if 'reduz13salario' not in item else item['reduz13salario'],
'reduzFerias': None if 'reduzferias' not in item else item['reduzferias'],
'justificado': None if 'justificado' not in item else item['justificado'],
'reduzFgts': None if 'reduzfgts' not in item else item['reduzfgts']
}
}
contador += 1
print(f'Dados gerados ({contador}): ', dict_dados)
lista_dados_enviar.append(dict_dados)
lista_controle_migracao.append({
'sistema': sistema,
'tipo_registro': tipo_registro,
'hash_chave_dsk': hash_chaves,
'descricao_tipo_registro': 'Cadastro de Tipo de Afastamento',
'id_gerado': None,
'i_chave_dsk1': item['id_entidade'],
'i_chave_dsk2': item['codigo']
})
if True:
model.insere_tabela_controle_migracao_registro2(params_exec, lista_req=lista_controle_migracao)
req_res = interacao_cloud.preparar_requisicao(lista_dados=lista_dados_enviar,
token=token,
url=url,
tipo_registro=tipo_registro,
tamanho_lote=limite_lote)
model.insere_tabela_controle_lote(req_res)
print('- Envio de dados finalizado.')
|
[
"[email protected]"
] | |
c3a9262abc44ac5508726e238bdcacc3f8454599
|
24cee07743790afde5040c38ef95bb940451e2f6
|
/acode/abc284/e/update.py
|
cbe323dede2e63602d87336c493cc58525a7c3eb
|
[] |
no_license
|
tinaba96/coding
|
fe903fb8740d115cf5a7f4ff5af73c7d16b9bce1
|
d999bf5620e52fabce4e564c73b9f186e493b070
|
refs/heads/master
| 2023-09-01T02:24:33.476364 | 2023-08-30T15:01:47 | 2023-08-30T15:01:47 | 227,594,153 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,006 |
py
|
import sys
sys.setrecursionlimit(500005)
#sys.setrecursionlimit(10**9)
#import pypyjit # this is for solving slow issue for pypy when using recursion but python will not need this (test will fail but submit works)
#pypyjit.set_param('max_unroll_recursion=-1')
N, M = list(map(int, input().split()))
mp = [[] for n in range(N+1)]
for i in range(M):
u, v = list(map(int, input().split()))
mp[u].append(v)
mp[v].append(u)
al = set()
cnt = 0
def dfs(p, e):
global cnt
if p not in al:
al.add(p)
cnt += 1
if len(al) > 10**6:
print(10**6)
exit()
for n in mp[e]:
if str(n) in p:
continue
dfs(p+str(n), n)
return
dfs('1', 1)
print(cnt)
# WA: 全探索ができていない?
# TLE: len(al)やstr(n) in p に時間を要している? それともpythonの再帰だから? -> len(al) is O(1), str(n) in p is almopst O(NlogN) (this is the cause of TLE)
# len(al) can costs almost 10**6 specially at the end. -> this is wrong see below
# str(n) in p costs O(len(p)) which is O(N) at maximum -> almost O(NlogN)
'''
ask question in LINE
ME
ABC284Eなのですが、このように実装して提出した結果、AC: 21 WA: 9 TLE: 3というような結果になってしまいました。
TLEになる原因は、len(al)やstr(n) in p だと思うのですが、WAになる原因が分かりません。パスを文字列として、setに格納していく実装なのですが、WAの原因分かる方いらっしゃいますでしょうか。
answer1
p = '1'+'2'のときに12も行ったことになるとか?
path graph (一直線のグラフ)だとalに入る文字数がO(n^2)になって大変なことになりませんか
ME
そうですね!確かにこれだと0-9までの頂点しか機能しないですね!
ありがとうございます!
ans2
dfs(p+‘$’+str(n), n)
とかってしたらこの問題は解決できそうですね
ME
al.add(p)のpの(文字列の)長さlen(p)がO(n^2)なるということでしょうか。(for ans1)
確かに頭に文字列をつければ、探索する際も特定できますね!ありがとうございます!(for ans2)
ans1
alに入っている文字列の合計の長さです
単純グラフなので、DFSする限りでは毎回必ず違ったpになるので、個数だけ管理しておけばよいです
ME
確かにそうなりますね!気づきませんでした、、
これは単純にメモリ制限的に引っかかるという考え方で良いのでしょうか。
勉強になります!
ans1
基本的にそのはず…賢い言語実装だとメモリ節約してくれるのもあった気がしますが
ME
ありがとうございます!
ちなみに、dfsの部分はO(N+M)だと思っているのですが、
それに加え、len(al)やstr(n) in p の部分がさらにO(N)かかり、全体的にO(N(N+M))ではないかと考えたのですが、考え方はあっているのでしょうか。
len(al)やstr(n) in pの部分はそれぞれalとpの長さの分計算コストかかると思っているのですが、それぞれの長さがNくらいになるのは最後の方だけだと思います。全体としてO(N(N+M)と考えて良いのでしょうか。
len(al)やstr(n) in pの部分は、ならし計算量でもO(1)にならないと思うので、ならし計算量でO(1)にならなければ、O(N)と考えれば良いのでしょうか?
asn3
(余計なお世話かもしれませんがnを文字列で表した時の長さはO(log n)なのでalに含まれる文字列の長さの合計にもlogが付くと思います)
ans4
len は定数時間じゃないですか?
ME
ありがとうございます!
これは、グラフの分岐があるためlogがつくということでしょうか。
一直線のグラフなどの最悪ケースでO(n^2)になるという理解で良いでしょうか? (for ans3)
pythonは長さを別で用意していて、len()はO(1)のようでした。
ご指摘ありがとうございます!(for ans4)
ans3
nを文字列で表そうとすると、その桁数分の文字が必要で、その桁数というのがO(log n)なので文字列の長さ、つまり文字の個数の合計にlogが付くという話です
例えば1や3は1桁なので1文字で良いですが、100000は6桁なので6文字必要です
ans5
その問題、再帰関数を用いたdfsが一般的だと思うのですが、スタックを用いたdfs で実装するのは厳しそうですかね?
ME
そういうことですね!理解できました。ありがとうございます!(for ans3)
となると、TLEの原因はstr(n) in pの部分でpの長さ分コストがかかるという理解で良いのでしょうか。pは最大N回文字列が足され、それぞれ足される文字列の長さがO(logN)と考えるとpの長さは O (NlogN)という感じでしょうか。
実装まではしていないのですが、pythonの再帰処理が苦手であることを考えるとスタックによる実装の方が早くなるとは思います。
ただこれがTLEの原因なのでしょうか。それとも上記のstr(n) in pがボトルネックになっているのでしょうか。(for ans5)
ans3
正しいと思います
TLEの原因がこれで、もしTLが無限であった場合今度はalのメモリが原因でMLEになると思います
ans4
+str(n) も PyPy だと遅そうなのと、なんか "123" か 1 → 2 → 3 なのか 1 → 23 なのかの曖昧性があって壊れませんか?
後者が WA になってそうで、例えば
1 → 23 → 2 のときに、2 が踏めないと判断されそうです
あ、既に指摘されてましたごめんなさい
ME
ありがとうございます!非常に納得がいき、勉強になりました!(for ans3)
いえいえ!ありがとうございます!
具体例も非常に勉強になりました!(for ans4)
'''
|
[
"[email protected]"
] | |
00e61e3359148ae5195cff96ee8a1f87917fa3ba
|
6f05f7d5a67b6bb87956a22b988067ec772ba966
|
/data/test/python/e0a24819976e888969becc8f9ec8d2f0e7e377efurls.py
|
e0a24819976e888969becc8f9ec8d2f0e7e377ef
|
[
"MIT"
] |
permissive
|
harshp8l/deep-learning-lang-detection
|
93b6d24a38081597c610ecf9b1f3b92c7d669be5
|
2a54293181c1c2b1a2b840ddee4d4d80177efb33
|
refs/heads/master
| 2020-04-07T18:07:00.697994 | 2018-11-29T23:21:23 | 2018-11-29T23:21:23 | 158,597,498 | 0 | 0 |
MIT
| 2018-11-21T19:36:42 | 2018-11-21T19:36:41 | null |
UTF-8
|
Python
| false | false | 591 |
py
|
#coding:utf-8
from controller.index import Index
from controller.article import Article
from controller.signin import Signin
from controller.write import Write
from controller.signout import Signout
from controller.page import About,Type
from controller.api import Article as ART,Comment as com
urls = [
#首页
(r'/', Index),
#文章
(r'/article/([^\n]*)',Article),
#登录
(r'/signin',Signin),
#发表
(r'/write',Write),
#API文章
(r'/api/article/([^\n]*)',ART),
(r'/api/comment',com),
#退出
(r'/signout',Signout),
#关于
(r'/about',About),
# 分类
(r'/type',Type)
]
|
[
"[email protected]"
] | |
626c922de9219080952e7221d26a8a4a2740ad29
|
6e8b606bca1eaddd8858fffc0fdeda039a438af5
|
/source/precipitation/precip_stats_for_central_arctic_to_timeseries.py
|
3af2eff5c86951f4471ed3fd8fddbaeec12bb877
|
[] |
no_license
|
andypbarrett/SnowOnSeaIce
|
1f93a0523933fff0bfdd89fc87ad32b371bae359
|
b8fe84a23bf790eb8efc43f4b89725fb7ba7d73c
|
refs/heads/master
| 2023-01-20T11:53:30.835890 | 2023-01-18T17:43:19 | 2023-01-18T17:43:19 | 137,275,118 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,836 |
py
|
#----------------------------------------------------------------------
# Calculates mean of precipitation stats for Arctic Ocean excluding
# Barents and Kara seas. This region conforms to the regions with
# data from the NP drifting stations.
#----------------------------------------------------------------------
import pandas as pd
import os
import utilities as util
from constants import arctic_mask_region as region
from constants import accumulation_period_filepath
def make_outfilepath(fili):
"""Returns output filepath"""
_, ext = os.path.splitext(fili)
return fili.replace(ext, '.npsnow_region.csv')
def precip_stats_for_central_arctic_to_time_series(reanalysis, verbose=False):
ds = util.load_annual_accumulation(reanalysis)
ds['drizzle'] = ds['precTot'] - ds['wetdayTot']
# Make mask for central Arctic excluding Barents and Kara seas
mask = util.read_region_mask()
newmask = (mask == region['CENTRAL_ARCTIC']) | \
(mask == region['BEAUFORT']) | \
(mask == region['CHUKCHI']) | \
(mask == region['LAPTEV']) | \
(mask == region['EAST_SIBERIAN'])
region_mean = ds.where(newmask).mean(dim=['x','y']).to_dataframe()
filo = make_outfilepath(accumulation_period_filepath[reanalysis])
#annual_accumulation_filepath[reanalysis].replace('.nc','.RegionSeries.csv')
print (f'Writing time series to {filo}')
region_mean.to_csv(filo)
return
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Calculates time series of average precip stats for NPSNOW Arctic region")
parser.add_argument('reanalysis', type=str, help='Reanalysis to process')
args = parser.parse_args()
precip_stats_for_central_arctic_to_time_series(args.reanalysis, verbose=True)
|
[
"[email protected]"
] | |
b586bc73c8abf2ab0858af5c05cb97731f7d31fa
|
a366db0f2a117e0a8cf923e9b4de5c643155e047
|
/bench/state.py
|
08dad6e8fdc6e4fca68111ef1035197012e312ac
|
[] |
no_license
|
genome/nessy-server
|
d2ff6aa7bb692f50e5cabb435a380670be75b2b9
|
f8207310d33bf259130df806b4d759ef1a883e56
|
refs/heads/master
| 2021-01-10T18:59:38.910186 | 2014-12-29T22:11:16 | 2014-12-29T22:11:16 | 15,785,645 | 0 | 0 | null | 2014-12-29T22:11:16 | 2014-01-10T01:57:38 |
Python
|
UTF-8
|
Python
| false | false | 2,115 |
py
|
import collections
import datetime
class State(object):
UNSET = object()
def __init__(self, resource_names):
self._state_index = collections.defaultdict(set)
self._state_index['released'].update(resource_names)
self._resource_index = {r: 'released' for r in resource_names}
self._claim_urls = {}
self.transition_count = 0
self._request_times = collections.defaultdict(list)
def get_claim_url(self, resource):
return self._claim_urls[resource]
def resources_in_states(self, *states):
blah = [self._state_index[s] for s in states]
return set.union(*blah)
def set_resource_state(self, resource, state, claim_url=UNSET):
self.transition_count += 1
old_state = self._resource_index.pop(resource)
self._resource_index[resource] = state
self._state_index[old_state].remove(resource)
self._state_index[state].add(resource)
if claim_url is not self.UNSET:
if claim_url is None and resource in self._claim_urls:
self._claim_urls.pop(resource)
else:
self._claim_urls[resource] = claim_url
def noop(self):
self.transition_count += 1
def start_timer(self):
self._begin_time = datetime.datetime.now()
def stop_timer(self):
self._end_time = datetime.datetime.now()
@property
def _total_runtime(self):
return (self._end_time - self._begin_time).total_seconds()
def report(self):
tag_times = {
tag: {
'mean': sum(times) / len(times),
'number': len(times),
'rps': len(times) / sum(times),
}
for tag, times in self._request_times.iteritems()
}
return {
'total_requests': self.transition_count,
'total_runtime': self._total_runtime,
'rps': self.transition_count / self._total_runtime,
'times': tag_times,
}
def register_request(self, tag, seconds):
self._request_times[tag].append(seconds)
|
[
"[email protected]"
] | |
fad0a9d402c2a9c652ef1ffc6eb8328b5bf559c7
|
5257652fc34ec87fe45d390ba49b15b238860104
|
/nn_interpretation/nn_unique/get_nn_unique.py
|
0aefadbf1cc44379399634748c270b52f7fc9a45
|
[] |
no_license
|
thekingofall/alzheimers_parkinsons
|
cd247fa2520c989e8dd853ed22b58a9bff564391
|
4ceae6ea3eb4c58919ff41aed8803855bca240c8
|
refs/heads/master
| 2022-11-30T22:36:37.201334 | 2020-08-12T01:23:55 | 2020-08-12T01:23:55 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 233 |
py
|
buddies_nn=set(open('buddies_nn.txt','r').read().strip().split('\n'))
sig_svm=set(open('sig_svm.txt','r').read().strip().split('\n'))
nn_unique=buddies_nn-sig_svm
outf=open('nn_unique.txt','w')
outf.write('\n'.join(nn_unique)+'\n')
|
[
"[email protected]"
] | |
184bfebb357383b520e0be4fda111faf8a4b9ffa
|
e4fcd551a9d83e37a2cd6d5a2b53a3cc397ccb10
|
/codes/eval_metrics/writing/mmocr/tools/dataset_converters/textdet/synthtext_converter.py
|
811b1cc0e669b8dd185dbcf8156595002713a850
|
[
"Apache-2.0"
] |
permissive
|
eslambakr/HRS_benchmark
|
20f32458a47c6e1032285b44e70cf041a64f842c
|
9f153d8c71d1119e4b5c926b899bb556a6eb8a59
|
refs/heads/main
| 2023-08-08T11:57:26.094578 | 2023-07-22T12:24:51 | 2023-07-22T12:24:51 | 597,550,499 | 33 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,177 |
py
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import json
import os.path as osp
import time
import lmdb
import mmcv
import mmengine
import numpy as np
from scipy.io import loadmat
from shapely.geometry import Polygon
from mmocr.utils import check_argument
def trace_boundary(char_boxes):
"""Trace the boundary point of text.
Args:
char_boxes (list[ndarray]): The char boxes for one text. Each element
is 4x2 ndarray.
Returns:
boundary (ndarray): The boundary point sets with size nx2.
"""
assert check_argument.is_type_list(char_boxes, np.ndarray)
# from top left to to right
p_top = [box[0:2] for box in char_boxes]
# from bottom right to bottom left
p_bottom = [
char_boxes[idx][[2, 3], :]
for idx in range(len(char_boxes) - 1, -1, -1)
]
p = p_top + p_bottom
boundary = np.concatenate(p).astype(int)
return boundary
def match_bbox_char_str(bboxes, char_bboxes, strs):
"""match the bboxes, char bboxes, and strs.
Args:
bboxes (ndarray): The text boxes of size (2, 4, num_box).
char_bboxes (ndarray): The char boxes of size (2, 4, num_char_box).
strs (ndarray): The string of size (num_strs,)
"""
assert isinstance(bboxes, np.ndarray)
assert isinstance(char_bboxes, np.ndarray)
assert isinstance(strs, np.ndarray)
bboxes = bboxes.astype(np.int32)
char_bboxes = char_bboxes.astype(np.int32)
if len(char_bboxes.shape) == 2:
char_bboxes = np.expand_dims(char_bboxes, axis=2)
char_bboxes = np.transpose(char_bboxes, (2, 1, 0))
if len(bboxes.shape) == 2:
bboxes = np.expand_dims(bboxes, axis=2)
bboxes = np.transpose(bboxes, (2, 1, 0))
chars = ''.join(strs).replace('\n', '').replace(' ', '')
num_boxes = bboxes.shape[0]
poly_list = [Polygon(bboxes[iter]) for iter in range(num_boxes)]
poly_box_list = [bboxes[iter] for iter in range(num_boxes)]
poly_char_list = [[] for iter in range(num_boxes)]
poly_char_idx_list = [[] for iter in range(num_boxes)]
poly_charbox_list = [[] for iter in range(num_boxes)]
words = []
for s in strs:
words += s.split()
words_len = [len(w) for w in words]
words_end_inx = np.cumsum(words_len)
start_inx = 0
for word_inx, end_inx in enumerate(words_end_inx):
for char_inx in range(start_inx, end_inx):
poly_char_idx_list[word_inx].append(char_inx)
poly_char_list[word_inx].append(chars[char_inx])
poly_charbox_list[word_inx].append(char_bboxes[char_inx])
start_inx = end_inx
for box_inx in range(num_boxes):
assert len(poly_charbox_list[box_inx]) > 0
poly_boundary_list = []
for item in poly_charbox_list:
boundary = np.ndarray((0, 2))
if len(item) > 0:
boundary = trace_boundary(item)
poly_boundary_list.append(boundary)
return (poly_list, poly_box_list, poly_boundary_list, poly_charbox_list,
poly_char_idx_list, poly_char_list)
def convert_annotations(root_path, gt_name, lmdb_name):
"""Convert the annotation into lmdb dataset.
Args:
root_path (str): The root path of dataset.
gt_name (str): The ground truth filename.
lmdb_name (str): The output lmdb filename.
"""
assert isinstance(root_path, str)
assert isinstance(gt_name, str)
assert isinstance(lmdb_name, str)
start_time = time.time()
gt = loadmat(gt_name)
img_num = len(gt['imnames'][0])
env = lmdb.open(lmdb_name, map_size=int(1e9 * 40))
with env.begin(write=True) as txn:
for img_id in range(img_num):
if img_id % 1000 == 0 and img_id > 0:
total_time_sec = time.time() - start_time
avg_time_sec = total_time_sec / img_id
eta_mins = (avg_time_sec * (img_num - img_id)) / 60
print(f'\ncurrent_img/total_imgs {img_id}/{img_num} | '
f'eta: {eta_mins:.3f} mins')
# for each img
img_file = osp.join(root_path, 'imgs', gt['imnames'][0][img_id][0])
img = mmcv.imread(img_file, 'unchanged')
height, width = img.shape[0:2]
img_json = {}
img_json['file_name'] = gt['imnames'][0][img_id][0]
img_json['height'] = height
img_json['width'] = width
img_json['annotations'] = []
wordBB = gt['wordBB'][0][img_id]
charBB = gt['charBB'][0][img_id]
txt = gt['txt'][0][img_id]
poly_list, _, poly_boundary_list, _, _, _ = match_bbox_char_str(
wordBB, charBB, txt)
for poly_inx in range(len(poly_list)):
polygon = poly_list[poly_inx]
min_x, min_y, max_x, max_y = polygon.bounds
bbox = [min_x, min_y, max_x - min_x, max_y - min_y]
anno_info = dict()
anno_info['iscrowd'] = 0
anno_info['category_id'] = 1
anno_info['bbox'] = bbox
anno_info['segmentation'] = [
poly_boundary_list[poly_inx].flatten().tolist()
]
img_json['annotations'].append(anno_info)
string = json.dumps(img_json)
txn.put(str(img_id).encode('utf8'), string.encode('utf8'))
key = b'total_number'
value = str(img_num).encode('utf8')
txn.put(key, value)
def parse_args():
parser = argparse.ArgumentParser(
description='Convert synthtext to lmdb dataset')
parser.add_argument('synthtext_path', help='synthetic root path')
parser.add_argument('-o', '--out-dir', help='output path')
args = parser.parse_args()
return args
# TODO: Refactor synthtext
def main():
args = parse_args()
synthtext_path = args.synthtext_path
out_dir = args.out_dir if args.out_dir else synthtext_path
mmengine.mkdir_or_exist(out_dir)
gt_name = osp.join(synthtext_path, 'gt.mat')
lmdb_name = 'synthtext.lmdb'
convert_annotations(synthtext_path, gt_name, osp.join(out_dir, lmdb_name))
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
dba9826fd017a5155e4aeb88ce6828001cac6adb
|
f4a4c9a68a4ead50c0882832f3f73b9cb29271f6
|
/backend/cardgameapp_22189/settings.py
|
5d1e2e15e8ab4ff8efd7b0549a39e0e1e23558cb
|
[] |
no_license
|
crowdbotics-apps/cardgameapp-22189
|
276e0c18661a3e1ae474f2deb11b6fc32b66eb38
|
200ca6880781d3d832be39f44b8aa290db481ec2
|
refs/heads/master
| 2023-01-11T11:45:35.488695 | 2020-11-01T17:50:20 | 2020-11-01T17:50:20 | 309,153,974 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,111 |
py
|
"""
Django settings for cardgameapp_22189 project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import environ
import logging
env = environ.Env()
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool("DEBUG", default=False)
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env.str("SECRET_KEY")
ALLOWED_HOSTS = env.list("HOST", default=["*"])
SITE_ID = 1
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
SECURE_SSL_REDIRECT = env.bool("SECURE_REDIRECT", default=False)
# Application definition
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"django.contrib.sites",
"course",
]
LOCAL_APPS = [
"home",
"users.apps.UsersConfig",
]
THIRD_PARTY_APPS = [
"rest_framework",
"rest_framework.authtoken",
"rest_auth",
"rest_auth.registration",
"bootstrap4",
"allauth",
"allauth.account",
"allauth.socialaccount",
"allauth.socialaccount.providers.google",
"django_extensions",
"drf_yasg",
# start fcm_django push notifications
"fcm_django",
# end fcm_django push notifications
]
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "cardgameapp_22189.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
WSGI_APPLICATION = "cardgameapp_22189.wsgi.application"
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": os.path.join(BASE_DIR, "db.sqlite3"),
}
}
if env.str("DATABASE_URL", default=None):
DATABASES = {"default": env.db()}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
},
{
"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
},
{
"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
},
{
"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = "/static/"
MIDDLEWARE += ["whitenoise.middleware.WhiteNoiseMiddleware"]
AUTHENTICATION_BACKENDS = (
"django.contrib.auth.backends.ModelBackend",
"allauth.account.auth_backends.AuthenticationBackend",
)
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_DIRS = [os.path.join(BASE_DIR, "static")]
STATICFILES_STORAGE = "whitenoise.storage.CompressedManifestStaticFilesStorage"
# allauth / users
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = "email"
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = "optional"
ACCOUNT_CONFIRM_EMAIL_ON_GET = True
ACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True
ACCOUNT_UNIQUE_EMAIL = True
LOGIN_REDIRECT_URL = "users:redirect"
ACCOUNT_ADAPTER = "users.adapters.AccountAdapter"
SOCIALACCOUNT_ADAPTER = "users.adapters.SocialAccountAdapter"
ACCOUNT_ALLOW_REGISTRATION = env.bool("ACCOUNT_ALLOW_REGISTRATION", True)
SOCIALACCOUNT_ALLOW_REGISTRATION = env.bool("SOCIALACCOUNT_ALLOW_REGISTRATION", True)
REST_AUTH_SERIALIZERS = {
# Replace password reset serializer to fix 500 error
"PASSWORD_RESET_SERIALIZER": "home.api.v1.serializers.PasswordSerializer",
}
REST_AUTH_REGISTER_SERIALIZERS = {
# Use custom serializer that has no username and matches web signup
"REGISTER_SERIALIZER": "home.api.v1.serializers.SignupSerializer",
}
# Custom user model
AUTH_USER_MODEL = "users.User"
EMAIL_HOST = env.str("EMAIL_HOST", "smtp.sendgrid.net")
EMAIL_HOST_USER = env.str("SENDGRID_USERNAME", "")
EMAIL_HOST_PASSWORD = env.str("SENDGRID_PASSWORD", "")
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# start fcm_django push notifications
FCM_DJANGO_SETTINGS = {"FCM_SERVER_KEY": env.str("FCM_SERVER_KEY", "")}
# end fcm_django push notifications
# Swagger settings for api docs
SWAGGER_SETTINGS = {
"DEFAULT_INFO": f"{ROOT_URLCONF}.api_info",
}
if DEBUG or not (EMAIL_HOST_USER and EMAIL_HOST_PASSWORD):
# output email to console instead of sending
if not DEBUG:
logging.warning(
"You should setup `SENDGRID_USERNAME` and `SENDGRID_PASSWORD` env vars to send emails."
)
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
|
[
"[email protected]"
] | |
e1286fa2a637e5aa1f0465a38d82e1bd3905c8d1
|
659a7a65c877f2eb0adbb6001a1f85f063d01acd
|
/mscreen/autodocktools_prepare_py3k/AutoDockTools/VisionInterface/Adt/Input/PublicServerLigandDB.py
|
26991dad68bfc2d248eec6fec64dacb18f2d6a6b
|
[
"MIT"
] |
permissive
|
e-mayo/mscreen
|
da59771be250ebe341feb102e0cbf41aab70de43
|
a50f0b2f7104007c730baa51b4ec65c891008c47
|
refs/heads/main
| 2023-06-21T17:47:06.519307 | 2021-08-09T16:06:29 | 2021-08-09T16:06:29 | 345,008,321 | 10 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,110 |
py
|
#########################################################################
#
# Date: Nov 2001 Authors: Michel Sanner
#
# [email protected]
#
# The Scripps Research Institute (TSRI)
# Molecular Graphics Lab
# La Jolla, CA 92037, USA
#
# Copyright: Michel Sanner and TSRI
#
#########################################################################
from NetworkEditor.items import NetworkNode
from AutoDockTools.VisionInterface.Adt.LigandDB import LigandDB
from mglutil.util.packageFilePath import getResourceFolderWithVersion
import os
import time
import urllib.request, urllib.error, urllib.parse
class PublicServerLigandDB(NetworkNode):
"""
List of available public libraries on the virtual screening server.
A description of the ligand libraries can be found on
http://nbcr.sdsc.edu/pub/wiki/index.php?title=Virtual_Screening_Libraries
Input: a public ligand library name
Output: LigandDB object containing info about the info
"""
def __init__(self, name='PublicServerLigandDB', **kw):
import urllib.request, urllib.parse, urllib.error
kw['name'] = name
NetworkNode.__init__(*(self,), **kw)
kw['name'] = name
NetworkNode.__init__(*(self,), **kw)
ip = self.inputPortsDescr
ip.append(datatype='string', name='server_lib', required=True, )
fqdn = "kryptonite.nbcr.net"
url = "http://" + fqdn + "/pub_ligand_libs.txt"
publibdir = os.path.join(getResourceFolderWithVersion(), 'ws')
if not (os.path.exists(publibdir)):
os.mkdir(publibdir)
publiblocal = os.path.join(publibdir, 'publibs.txt')
lock = publiblocal + '.lock'
if os.path.exists(lock) and time.time() - os.path.getmtime(lock) > 15:
os.remove(lock)
try:
if not(os.path.exists(lock)):
open(lock, 'w').close()
publibweb = urllib.request.urlopen(url)
outfile = open(publiblocal, 'w')
outfile.write(publibweb.read())
outfile.close()
os.remove(lock)
except:
print("[INFO]: Getting list of public server libs from cache")
pass
try:
f = open(publiblocal, 'r')
self.choices = f.read().split()
f.close()
except:
self.choices = []
print("[ERROR]: Unable to public server libs from the web and from cache")
self.widgetDescr['server_lib'] = {
'class':'NEComboBox', 'master':'node',
'choices':self.choices,
'fixedChoices':True,
'entryfield_entry_width':18,
'labelGridCfg':{'sticky':'w'},
'widgetGridCfg':{'sticky':'w'},
'labelCfg':{'text':'Server Libraries:'}}
op = self.outputPortsDescr
op.append(datatype='LigandDB', name='ligDB')
code = """def doit(self, server_lib):
ligDB = LigandDB(server_lib=server_lib)
self.outputData(ligDB=ligDB)
"""
self.setFunction(code)
|
[
"[email protected]"
] | |
a625b979deaf6a06f61b88dd43ac56027f5f5322
|
c59d6587ed5d7e7c4f4cbad2e4c8188eee741ad9
|
/conftest.py
|
134e36c3a4f3b5465cce6e8c54ef587ba3565484
|
[] |
no_license
|
n1k0din/kekino-api
|
921d83b1be0c50e7dfb0b2411ba63fd9f3dc8039
|
4fef4b1c6bdec970fae1b599be4c719eee06e999
|
refs/heads/master
| 2023-08-27T18:21:16.640031 | 2021-11-07T11:13:01 | 2021-11-07T11:13:01 | 425,223,294 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 228 |
py
|
import pytest
from django.conf import settings
@pytest.fixture(scope='session')
def django_db_setup():
settings.DATABASES['default'] = {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'db.sqlite3',
}
|
[
"[email protected]"
] | |
d564c8aa72b8618e3d89a78ea6866c695c94cd74
|
7462f315c3f011f50dc0d1ce89cf3d5f2eb024db
|
/tramp/likelihoods/abs_likelihood.py
|
8ad31af49a0340c934ae371dcc2c870f70851570
|
[
"MIT"
] |
permissive
|
Artaxerces/tramp
|
060bcceb50f59ad5de96ab4eba8aa322651d90cf
|
e5351e65676f2e9a1b90d0f4eaf11d8259b548ef
|
refs/heads/master
| 2023-04-03T04:49:14.345162 | 2021-04-08T08:55:54 | 2021-04-08T08:55:54 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,252 |
py
|
import numpy as np
from scipy.stats import norm
from .base_likelihood import Likelihood
from ..utils.integration import gaussian_measure_2d
class AbsLikelihood(Likelihood):
def __init__(self, y, y_name="y"):
self.y_name = y_name
self.size = self.get_size(y)
self.repr_init()
self.y = y
def sample(self, X):
return np.abs(X)
def math(self):
return r"$\mathrm{abs}$"
def compute_backward_posterior(self, az, bz, y):
rz = y * np.tanh(bz * y)
# 1 / cosh**2 leads to overflow
v = (y**2) * (1 - np.tanh(bz * y)**2)
vz = np.mean(v)
return rz, vz
def beliefs_measure(self, az, tau_z, f):
"NB: Assumes that f(bz, y) pair in y."
u_eff = np.maximum(0, az * tau_z - 1)
sz_eff = np.sqrt(az * u_eff)
def f_scaled(xi_b, xi_y):
bz = sz_eff * xi_b
y = bz / az + xi_y / np.sqrt(az)
return f(bz, y)
mu = gaussian_measure_2d(0, 1, 0, 1, f_scaled)
return mu
def measure(self, y, f):
return f(+y) + f(-y)
def compute_log_partition(self, az, bz, y):
logZ = np.sum(
-0.5*az*(y**2) + np.logaddexp(bz*y, -bz*y)
)
return logZ
|
[
"[email protected]"
] | |
f357eb496bccb34a809712c97c9517ac6f0fdd70
|
8ed3d2d285bb7255209b56a5ff9ec83bb4b8f430
|
/setup.py
|
6a083b3b06d7ee5d3ed16d73aacfe015edf07f6e
|
[] |
no_license
|
MarkLuro/requests-html
|
f4af9211353e09908f254a9edc0965c084c59a36
|
f43f3241f0c63cd50bb4286edffcc1f8ee5ae7bd
|
refs/heads/master
| 2021-01-24T02:11:25.628019 | 2018-02-25T13:23:40 | 2018-02-25T13:23:40 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,023 |
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Note: To use the 'upload' functionality of this file, you must:
# $ pip install twine
import io
import os
import sys
from shutil import rmtree
from setuptools import find_packages, setup, Command
# Package meta-data.
NAME = 'requests-html'
DESCRIPTION = 'HTML Parsing for Humans.'
URL = 'https://github.com/requests/requests'
EMAIL = '[email protected]'
AUTHOR = 'Kenneth Reitz'
VERSION = '0.1.0'
# What packages are required for this module to be executed?
REQUIRED = [
'requests', 'pyquery', 'html2text', 'fake-useragent', 'parse'
]
# The rest you shouldn't have to touch too much :)
# ------------------------------------------------
# Except, perhaps the License and Trove Classifiers!
# If you do change the License, remember to change the Trove Classifier for that!
here = os.path.abspath(os.path.dirname(__file__))
# Import the README and use it as the long-description.
# Note: this will only work if 'README.rst' is present in your MANIFEST.in file!
with io.open(os.path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = '\n' + f.read()
class UploadCommand(Command):
"""Support setup.py upload."""
description = 'Build and publish the package.'
user_options = []
@staticmethod
def status(s):
"""Prints things in bold."""
print('\033[1m{0}\033[0m'.format(s))
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
try:
self.status('Removing previous builds…')
rmtree(os.path.join(here, 'dist'))
except OSError:
pass
self.status('Building Source and Wheel (universal) distribution…')
os.system('{0} setup.py sdist bdist_wheel --universal'.format(sys.executable))
self.status('Uploading the package to PyPi via Twine…')
os.system('twine upload dist/*')
sys.exit()
# Where the magic happens:
setup(
name=NAME,
version=VERSION,
description=DESCRIPTION,
long_description=long_description,
author=AUTHOR,
author_email=EMAIL,
url=URL,
# If your package is a single module, use this instead of 'packages':
py_modules=['requests_html'],
# entry_points={
# 'console_scripts': ['mycli=mymodule:cli'],
# },
install_requires=REQUIRED,
include_package_data=True,
license='MIT',
classifiers=[
# Trove classifiers
# Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy'
],
# $ setup.py publish support.
cmdclass={
'upload': UploadCommand,
},
)
|
[
"[email protected]"
] | |
233da41e7bd6d8bc26423d834ec30979432da47b
|
77f65ea86ebc544c3f3e66c0152086e45669068c
|
/ch09-objects/e42b2_recent_dict.py
|
6449ace10c6bb5805d30a84d5cf9f40f10adaedd
|
[] |
no_license
|
Cptgreenjeans/python-workout
|
e403f48b0694ff4db32fe5fc3f87f02f48a1a68e
|
b9c68520d572bf70eff8e554a8ee9c8702c88e6e
|
refs/heads/master
| 2023-07-16T21:49:14.198660 | 2021-08-29T13:49:12 | 2021-08-29T13:49:12 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 374 |
py
|
#!/usr/bin/env python3
"""Solution to chapter 9, exercise 42, beyond 2: recent_dict"""
class RecentDict(dict):
def __init__(self, maxsize):
super().__init__()
self.maxsize = maxsize
def __setitem__(self, key, value):
dict.__setitem__(self, str(key), value)
if len(self) > self.maxsize:
self.pop(list(self.keys())[0])
|
[
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.