blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9dfd42487ccf8e73d4d4d17ab1e7e7ba183ef477
|
9a6323fd69286f2ddce8a755612fb235a4b85956
|
/cruzdb/sequence.py
|
30e04e8a8cfcd79bf5f6b0eedf6e1ed94650416c
|
[
"MIT"
] |
permissive
|
lardenoije/cruzdb-1
|
44b544ede144e11462b33967c6ce63cf7a7cecb0
|
7858dc4da7c2574751ccace41b3a525c11e629dc
|
refs/heads/master
| 2022-12-14T13:06:37.660233 | 2020-08-27T11:55:14 | 2020-08-27T11:55:14 | 289,082,044 | 0 | 0 |
MIT
| 2020-08-20T18:37:47 | 2020-08-20T18:37:45 | null |
UTF-8
|
Python
| false | false | 782 |
py
|
import urllib as U
__all__ = ('sequence', )
def _seq_from_xml(xml):
start = xml.find(">", xml.find("<DNA")) + 1
end = xml.rfind("</DNA>")
return xml[start:end].replace(' ', '').replace('\n', '').strip()
def sequence(db, chrom, start, end):
"""
return the sequence for a region using the UCSC DAS
server. note the start is 1-based
each feature will have it's own .sequence method which sends
the correct start and end to this function.
>>> sequence('hg18', 'chr2', 2223, 2230)
'caacttag'
"""
url = "http://genome.ucsc.edu/cgi-bin/das/%s" % db
url += "/dna?segment=%s:%i,%i"
xml = U.urlopen(url % (chrom, start, end)).read()
return _seq_from_xml(xml)
if __name__ == "__main__":
import doctest
doctest.testmod()
|
[
"[email protected]"
] | |
549cb424af0757bf36fd12680c9129b25d719bc8
|
1a95cda55573a6836ac631bc6e16ec312e07759b
|
/server.py
|
494512434222bc9a674e78746f4547874003cbfc
|
[] |
no_license
|
olajowon/dormer-web
|
b021c01694f72b31b8fc498811ec54da99786daa
|
d7a06a7c2be5ca6d34bedff26fbf70cd9e969ae8
|
refs/heads/master
| 2023-03-13T10:43:04.280104 | 2021-03-11T08:39:23 | 2021-03-11T08:39:23 | 320,578,280 | 3 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,451 |
py
|
# Created by zhouwang on 2020/11/13.
import os
import sys
PROJECT_DIR = os.path.dirname(os.path.abspath(__file__))
VENV_DIR = os.path.join(PROJECT_DIR, 'venv')
def start():
print('#### start\n')
os.system(f'rm -rf {PROJECT_DIR}/static && \cp -rf {PROJECT_DIR}/ui/dist/static {PROJECT_DIR}/static')
os.system(f'\cp -rf {PROJECT_DIR}/ui/dist/index.html {PROJECT_DIR}/templates/index.html')
os.system(f'source {VENV_DIR}/bin/activate && '
f'pip install -r {PROJECT_DIR}/requirements.txt')
status = os.system(f'source {VENV_DIR}/bin/activate && cd {PROJECT_DIR} &&'
f'uwsgi --ini {PROJECT_DIR}/uwsgi_socket.ini')
print('#### start %s\n' % ('successful' if status == 0 else 'fail'))
def restart():
print('#### restart\n')
os.system(f'rm -rf {PROJECT_DIR}/static && \cp -rf {PROJECT_DIR}/ui/dist/static {PROJECT_DIR}/static')
os.system(f'\cp -rf {PROJECT_DIR}/ui/dist/index.html {PROJECT_DIR}/templates/index.html')
os.system(f'source {VENV_DIR}/bin/activate && '
f'pip install -r {PROJECT_DIR}/requirements.txt')
status = os.system(f'source {VENV_DIR}/bin/activate && uwsgi --reload /var/run/dormer-web-uwsgi.pid')
print('#### restart %s\n' % ('successful' if status == 0 else 'fail'))
def stop():
print('#### stop\n')
status = os.system(f'source {VENV_DIR}/bin/activate && uwsgi --stop /var/run/dormer-web-uwsgi.pid')
print('#### stop %s\n' % ('successful' if status == 0 else 'fail'))
def build():
print('#### build\n')
status = os.system(f'cd {PROJECT_DIR}/ui && npm install && npm run build')
print('#### build %s\n' % ('successful' if status == 0 else 'fail'))
def migrate():
print('#### migrate\n')
status = os.system(f'source {VENV_DIR}/bin/activate && python {PROJECT_DIR}/manage.py migrate')
print('#### migrate %s\n' % ('successful' if status == 0 else 'fail'))
def venv():
if not os.path.isdir(VENV_DIR):
status = os.system(f'cd {PROJECT_DIR} && python3 -m venv venv')
print('#### venv %s\n' % ('successful' if status == 0 else 'fail'))
def init():
venv()
def main():
init()
argv = sys.argv
if argv[1] == 'start':
start()
elif argv[1] == 'stop':
stop()
elif argv[1] == 'restart':
restart()
elif argv[1] == 'build':
build()
elif argv[1] == 'migrate':
migrate()
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
e78a3b00445a74f43bada0cdbef0f2f5aeff4fcd
|
9eb972f4623e6eb424238dd747ea0a480efd0e8a
|
/futura_ui/app/ui/dialogs/transfer_production.py
|
012029c15e6f9aad783b6cb7d84ef193cf8d2005
|
[
"BSD-3-Clause"
] |
permissive
|
pjamesjoyce/futura
|
727136dedeebed7fbf464612192f0472c31d1f6d
|
c0f129b4964f2b01a2853828b83d57ed53eabcb8
|
refs/heads/master
| 2023-04-13T02:09:03.476138 | 2023-03-16T10:32:50 | 2023-03-16T10:32:50 | 218,276,474 | 9 | 2 |
BSD-3-Clause
| 2020-01-15T10:05:07 | 2019-10-29T11:59:27 |
Python
|
UTF-8
|
Python
| false | false | 1,163 |
py
|
from PySide2 import QtWidgets, QtGui
from ..utils import load_ui_file
import os
class TransferProductionDialog(QtWidgets.QDialog):
def __init__(self, parent=None):
super(TransferProductionDialog, self).__init__(parent)
ui_path = 'transfer_production.ui'
ui_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), ui_path)
load_ui_file(ui_path, self)
validator = QtGui.QDoubleValidator()
validator.setDecimals(0)
self.newValueLineEdit.setValidator(validator)
self.transferLabel.setText('Percentage:')
self.newValueLineEdit.setInputMask("009%")
self.percentageRadioButton.clicked.connect(self.radio_button_change)
self.amountRadioButton.clicked.connect(self.radio_button_change)
def radio_button_change(self):
if self.percentageRadioButton.isChecked():
self.transferLabel.setText('Percentage:')
self.newValueLineEdit.setInputMask("009%")
elif self.amountRadioButton.isChecked():
self.transferLabel.setText('Amount:')
self.newValueLineEdit.setInputMask(None)
print('Amount')
|
[
"[email protected]"
] | |
6d04867690361f9c47fb28ebc97f304c557ec557
|
ff6248be9573caec94bea0fa2b1e4b6bf0aa682b
|
/output/StudentProblem/10.21.9.62/1/1569576134.py
|
10eaa60b5f53e410b34cd127f6fbef1654f01b12
|
[] |
no_license
|
LennartElbe/codeEvo
|
0e41b1a7705204e934ef71a5a28c047366c10f71
|
e89b329bc9edd37d5d9986f07ca8a63d50686882
|
refs/heads/master
| 2020-12-21T17:28:25.150352 | 2020-03-26T10:22:35 | 2020-03-26T10:22:35 | 236,498,032 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,671 |
py
|
============================= test session starts ==============================
platform darwin -- Python 3.7.4, pytest-5.4.1, py-1.8.1, pluggy-0.13.1
rootdir: /tmp
collected 0 items / 1 error
==================================== ERRORS ====================================
________________________ ERROR collecting test session _________________________
../../../Library/Python/3.7/lib/python/site-packages/_pytest/python.py:513: in _importtestmodule
mod = self.fspath.pyimport(ensuresyspath=importmode)
../../../Library/Python/3.7/lib/python/site-packages/py/_path/local.py:701: in pyimport
__import__(modname)
<frozen importlib._bootstrap>:983: in _find_and_load
???
<frozen importlib._bootstrap>:967: in _find_and_load_unlocked
???
<frozen importlib._bootstrap>:677: in _load_unlocked
???
../../../Library/Python/3.7/lib/python/site-packages/_pytest/assertion/rewrite.py:143: in exec_module
source_stat, co = _rewrite_test(fn, self.config)
../../../Library/Python/3.7/lib/python/site-packages/_pytest/assertion/rewrite.py:328: in _rewrite_test
tree = ast.parse(source, filename=fn)
/usr/local/Cellar/python/3.7.4_1/Frameworks/Python.framework/Versions/3.7/lib/python3.7/ast.py:35: in parse
return compile(source, filename, mode, PyCF_ONLY_AST)
E File "/private/tmp/blabla.py", line 17
E elif x is ""_
E ^
E SyntaxError: invalid syntax
=========================== short test summary info ============================
ERROR ../../../../../tmp
!!!!!!!!!!!!!!!!!!!! Interrupted: 1 error during collection !!!!!!!!!!!!!!!!!!!!
=============================== 1 error in 0.17s ===============================
|
[
"[email protected]"
] | |
6f7c62cb236795bd7f112d1fae1ae534b8839097
|
a7ea1fa36385301db3c8abdf2281916f09006a2a
|
/057.py
|
e1348e00e7c99c5286e46d67667aa931c9422298
|
[] |
no_license
|
sanand0/euler
|
a3995c155e32dd8415cf31a1558674f9f0f8e425
|
6ae583d94f968e27102fa5bb7216bb0645dcc42c
|
refs/heads/master
| 2020-05-07T09:31:50.356639 | 2017-08-05T04:43:23 | 2017-08-05T04:43:23 | 1,225,954 | 9 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 887 |
py
|
'''
It is possible to show that the square root of two can be expressed as an infinite continued fraction.
sqrt(2) = 1 + 1/(2 + 1/(2 + 1/(2 + ... ))) = 1.414213...
By expanding this for the first four iterations, we get:
1 + 1/2 = 3/2 = 1.5
1 + 1/(2 + 1/2) = 7/5 = 1.4
1 + 1/(2 + 1/(2 + 1/2)) = 17/12 = 1.41666...
1 + 1/(2 + 1/(2 + 1/(2 + 1/2))) = 41/29 = 1.41379...
The next three expansions are 99/70, 239/169, and 577/408, but the eighth expansion, 1393/985, is the first example where the number of digits in the numerator exceeds the number of digits in the denominator.
In the first one-thousand expansions, how many fractions contain a numerator with more digits than denominator?
'''
num, den, count = 3, 2, 0
for iter in xrange(0, 1000):
num, den = num + den + den, num + den
if len(str(num)) > len(str(den)):
count += 1
print count
|
[
"[email protected]"
] | |
5e7b99cc19ea8b6717e0e650adb3a97b3ef95fb0
|
6e932aa6ec9424ae0238c559112fdd0214c52be6
|
/ffawp/ch03/9_excel_value_meets_condition_all_worksheets.py
|
381e78707679567f29dfa8a3629e04d9d439b903
|
[] |
no_license
|
LingChenBill/python_first_introduce
|
d1c780dcd3653ef4cda39cc4a0c631a99071f088
|
32ff4a16fe10505fcb49e4762fc573f5f1c62167
|
refs/heads/master
| 2020-07-29T13:03:15.447728 | 2020-06-09T13:39:07 | 2020-06-09T13:39:07 | 209,813,590 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,202 |
py
|
# Date:2020/5/25
# Author:Lingchen
# Mark: 要使用基础Python在所有工作表中筛选出销售额大于$2000.00的所有行
# python 9_excel_value_meets_condition_all_worksheets.py data/sales_2013.xlsx data/output/9_output.xlsx
import sys
from datetime import date
from xlrd import open_workbook, xldate_as_tuple
from xlwt import Workbook
input_file = sys.argv[1]
output_file = sys.argv[2]
output_workbook = Workbook()
output_worksheet = output_workbook.add_sheet('filtered_rows_all_worksheets')
# 销售额列索引
sales_column_index = 3
# 销售额阈值
threshold = 2000.0
# 第一个工作表Flg
first_worksheet = True
with open_workbook(input_file) as workbook:
# 汇总数据列表
data = []
for worksheet in workbook.sheets():
# 只在第一个sheet中处理标题行
if first_worksheet:
header_row = worksheet.row_values(0)
data.append(header_row)
first_worksheet = False
# 处理标题行以外的数据行
for row_index in range(1, worksheet.nrows):
# 循环列数据列表
row_list = []
sale_amount = worksheet.cell_value(row_index, sales_column_index)
# 筛选销售额处理
if sale_amount > threshold:
for column_index in range(worksheet.ncols):
cell_value = worksheet.cell_value(row_index, column_index)
cell_type = worksheet.cell_type(row_index, column_index)
# 处理日期列
if cell_type == 3:
date_cell = xldate_as_tuple(cell_value, workbook.datemode)
date_cell = date(*date_cell[0:3]).strftime('%m/%d/%Y')
row_list.append(date_cell)
else:
row_list.append(cell_value)
if row_list:
data.append(row_list)
# 将所有符合条件的数据写入新的excel中
for list_index, output_list in enumerate(data):
for element_index, element in enumerate(output_list):
output_worksheet.write(list_index, element_index, element)
output_workbook.save(output_file)
|
[
"[email protected]"
] | |
807223eb8782e42786914cfe301694dc05950c8c
|
c47340ae6bcac6002961cc2c6d2fecb353c1e502
|
/test/test_ctmvar_get_result_item.py
|
468eb96cb02f551c4285294922b0852f7b500fd4
|
[
"MIT"
] |
permissive
|
rafaeldelrey/controlm_py
|
6d9f56b8b6e72750f329d85b932ace6c41002cbd
|
ed1eb648d1d23e587321227217cbfcc5065535ab
|
refs/heads/main
| 2023-04-23T09:01:32.024725 | 2021-05-19T00:25:53 | 2021-05-19T00:25:53 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 947 |
py
|
# coding: utf-8
"""
Control-M Services
Provides access to BMC Control-M Services # noqa: E501
OpenAPI spec version: 9.20.115
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import controlm_py
from controlm_py.models.ctmvar_get_result_item import CtmvarGetResultItem # noqa: E501
from controlm_py.rest import ApiException
class TestCtmvarGetResultItem(unittest.TestCase):
"""CtmvarGetResultItem unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testCtmvarGetResultItem(self):
"""Test CtmvarGetResultItem"""
# FIXME: construct object with mandatory attributes with example values
# model = controlm_py.models.ctmvar_get_result_item.CtmvarGetResultItem() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"[email protected]"
] | |
c0d47f8307c3798e6edc8b33f7c824987feb7e12
|
c848015268e430b10f1bc39a2fd5a6f7a8cda44d
|
/bin/Compare_unique/Sum_unique.py
|
b751b593c4924b7a4001cba2afeabe8f2079e20b
|
[] |
no_license
|
wangpanqiao/Transposition
|
36b87d2f9032170112fce993f17454b6562bb108
|
e102de63df2bcd5f7b41075a447eb937ee753832
|
refs/heads/master
| 2020-08-27T00:07:00.923134 | 2015-12-01T22:30:45 | 2015-12-01T22:30:45 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,987 |
py
|
#!/opt/Python/2.7.3/bin/python
import sys
from collections import defaultdict
import numpy as np
import re
import os
import argparse
from Bio import SeqIO
def usage():
test="name"
message='''
python Sum_unique.py --input RIL275_RelocaTEi.CombinedGFF.characterized.unique_mPing.gff
'''
print message
def fasta_id(fastafile):
fastaid = defaultdict(str)
for record in SeqIO.parse(fastafile,"fasta"):
fastaid[record.id] = 1
return fastaid
#split unit[8] of gff
def gff_attr8(unit8):
temp = defaultdict(lambda : str())
attrs = re.split(r';', unit8)
for attr in attrs:
#print attr
if not attr == '':
#print 'yes'
idx, value = re.split(r'\=', attr)
temp[idx] = value
return temp
#correct mping index for these not accurate calls
def readtable_ril_mping_correct(infile):
mping_correct = defaultdict(lambda : str())
with open (infile, 'r') as filehd:
for line in filehd:
line = line.rstrip()
if len(line) > 2:
unit = re.split(r'\t',line)
if not unit[1] == unit[10]:
mping= '%s:%s-%s' %(unit[0], unit[3], unit[4])
attrs1 = gff_attr8(unit[8])
attrs2 = gff_attr8(unit[17])
if attrs1['TSD'] == 'supporting_junction' or not len(attrs1['TSD']) == 3:
if not mping_correct.has_key(mping):
if not attrs2['TSD'] == 'supporting_junction' and len(attrs2['TSD']) == 3:
mping_correct[mping] = '%s:%s-%s' %(unit[9], unit[12], unit[13])
return mping_correct
##overlap with ril
#Chr1 RIL231_0 transposable_element_attribute 4228091 4228092 + . . ID=Chr1.4228092.spanners;Strain=RIL231_0;avg_flankers=6;spanners=0;type=homozygous;TE=mping;TSD=TT Chr1 RIL231_0
#some of mPing insertion sites are not accurate. we create a dict to store correct index of this mping using their overlap.
#the resulted allele frequency should have correct position for all the mping
def readtable_ril(infile, mping_correct):
data = defaultdict(lambda : defaultdict(lambda : int()))
r = re.compile(r'RIL(\d+)')
with open (infile, 'r') as filehd:
for line in filehd:
line = line.rstrip()
if len(line) > 2:
unit = re.split(r'\t',line)
if not unit[1] == unit[10]:
mping1= '%s:%s-%s' %(unit[0], unit[3], unit[4])
mping2= '%s:%s-%s' %(unit[9], unit[12], unit[13])
ril1 = r.search(unit[1]).groups(0)[0] if r.search(unit[1]) else 'NA'
ril2 = r.search(unit[10]).groups(0)[0] if r.search(unit[10]) else 'NA'
if mping_correct.has_key(mping1):
mping1 = mping_correct[mping1]
if mping_correct.has_key(mping2):
mping2 = mping_correct[mping2]
print '%s\t%s\t%s\t%s' %(mping1, ril1, mping2, ril2)
data[mping1][ril1] = 1
data[mping2][ril2] = 1
return data
##overlap with HEG4
def readtable_nonref(infile, mping_correct):
data = defaultdict(lambda : defaultdict(lambda : int()))
r = re.compile(r'RIL(\d+)')
with open (infile, 'r') as filehd:
for line in filehd:
line = line.rstrip()
if len(line) > 2:
unit = re.split(r'\t',line)
mping= '%s:%s-%s' %(unit[0], unit[3], unit[4])
ril = r.search(unit[1]).groups(0)[0] if r.search(unit[1]) else 'NA'
if mping_correct.has_key(mping):
mping = mping_correct[mping]
data[mping][ril] = 1
return data
##unique mping
def readtable(infile):
data = defaultdict(lambda : int())
r = re.compile(r'RIL(\d+)')
with open (infile, 'r') as filehd:
for line in filehd:
line = line.rstrip()
if len(line) > 2:
unit = re.split(r'\t',line)
ril = r.search(unit[1]).groups(0)[0] if r.search(unit[1]) else 'NA'
data[ril] += 1
return data
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input')
parser.add_argument('-o', '--output')
parser.add_argument('-v', dest='verbose', action='store_true')
args = parser.parse_args()
try:
len(args.input) > 0
except:
usage()
sys.exit(2)
prefix = os.path.splitext(os.path.splitext(args.input)[0])[0]
#mping_correct_index = defaultdict(lambda : str())
mping_correct_index = readtable_ril_mping_correct('%s.overlap_ril' %(prefix))
mping_ovlp_rils = readtable_ril('%s.overlap_ril' %(prefix), mping_correct_index)
mping_ovlp_heg4 = readtable_nonref('%s.overlap_ref' %(prefix), mping_correct_index)
r = re.compile(r'(\w+):(\d+)-(\d+)')
##mPing_allele_frequency
ofile = open('%s.mping.ril.frequency' %(prefix), 'w')
ofile1 = open('%s.mping.ril.list' %(prefix), 'w')
for mping in mping_ovlp_rils.keys():
m = r.search(mping)
chro, start, end = ['', 0, 0]
if m:
chro = m.groups(0)[0]
start = m.groups(0)[1]
end = m.groups(0)[2]
count = len(mping_ovlp_rils[mping].keys())
print >> ofile1, '%s\t%s' %(mping, ','.join(map(str, mping_ovlp_rils[mping].keys())))
print >> ofile, '%s\t%s\t%s\t%s\t%s\t%s\t%s' %(chro, start, end, mping, '+', count, float(count)/275)
ofile.close()
ofile1.close()
##RILs shared and unique mPing
#shared with ril
ril_mping_count = defaultdict(lambda : int())
for mping in mping_ovlp_rils.keys():
for ril in mping_ovlp_rils[mping].keys():
if mping_ovlp_heg4[mping][ril] == 0:
ril_mping_count[ril] += 1
#shared with heg4
heg4_mping_count = defaultdict(lambda : int())
for mping in mping_ovlp_heg4.keys():
for ril in mping_ovlp_heg4[mping].keys():
if mping_ovlp_heg4[mping][ril] > 0:
heg4_mping_count[ril] += 1
#unique
unique_mping = readtable(args.input)
#output table
ofile = open('%s.mping.shared_unique_table.txt' %(prefix), 'w')
print >> ofile, 'Sample\tShared_HEG4\tShared_RILs\tShared\tUnique'
for ril in sorted(heg4_mping_count.keys(), key=int):
shared_heg4 = heg4_mping_count[ril]
shared_rils = ril_mping_count[ril]
shared = int(shared_heg4) + int(shared_rils)
unique = unique_mping[ril]
print >> ofile, 'RIL%s\t%s\t%s\t%s\t%s' %(ril, shared_heg4, shared_rils, shared, unique)
ofile.close()
print 'Sample\tUnique_mPing'
#unique_mping = readtable(args.input)
for ril in sorted(unique_mping.keys(), key=int):
print 'RIL%s\t%s' %(ril, unique_mping[ril])
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
5c39feec3f3cde848840efa165bb4fa1ed38e075
|
4bc5f6cc69448d54a7d0fd3be19317613999ceb3
|
/authentication-with-flask-login/app/forms.py
|
05908ee49f2e2d0610662672ff5bbe608e021d63
|
[] |
no_license
|
ThiaguinhoLS/flask-repo
|
9aee2b6ff7bf9d61001ee3e3cbea1478caf108cd
|
81e68c85ee8a1560a33bbaf78cbcb581c351ebb9
|
refs/heads/master
| 2020-04-26T16:24:32.721289 | 2019-03-12T06:26:27 | 2019-03-12T06:26:27 | 173,677,188 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 711 |
py
|
# -*- coding: utf-8 -*-
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, SubmitField
from wtforms.validators import DataRequired, EqualTo
class RegisterForm(FlaskForm):
username = StringField("Username", validators=[DataRequired()])
password = PasswordField("Password", validators=[DataRequired()])
confirm_password = PasswordField(
"Confirm Password",
validators=[DataRequired(), EqualTo("password")]
)
submit = SubmitField("Register")
class LoginForm(FlaskForm):
username = StringField("Username", validators=[DataRequired()])
password = PasswordField("Password", validators=[DataRequired()])
submit = SubmitField("Login")
|
[
"[email protected]"
] | |
9f635f6fa32ef161386cd71ad13f0caeb9e69192
|
419d6346d722589ecff72a33f2431775f9bf3dfa
|
/sampleCode/sample8/cities.py
|
8453e6fb5a1129f1be656645813d88e7dec5d11d
|
[] |
no_license
|
wzmf038827/pythonFlask
|
dc414c37322ace036a1b9858ce5626a59dcbda4e
|
216db3c846ecc7a49c7f3cc7d1b15d6d3be7905a
|
refs/heads/master
| 2023-04-11T17:44:52.093854 | 2021-05-03T06:06:11 | 2021-05-03T06:06:11 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,123 |
py
|
import os
import sqlite3
from flask_sqlalchemy import SQLAlchemy
from main import app
basePath = os.path.abspath(os.path.dirname(__file__))
cityPath = os.path.join(basePath, 'citys.db')
conn = sqlite3.connect(cityPath)
print('開啟資料庫成功')
c = conn.cursor()
cursor = c.execute("select * from city")
print(cursor.__class__)
citys = list(cursor)
print("select 成功")
conn.close()
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + os.path.join(basePath,'citys.sqlite')
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
class City(db.Model):
__tablename__ = 'city'
id = db.Column(db.Integer, primary_key=True)
cityName = db.Column(db.String(64), unique=True)
continent = db.Column(db.String(64), nullable=False)
country = db.Column(db.String(64), nullable=False)
image = db.Column(db.String(64), nullable=False)
description = db.Column(db.Text)
lat = db.Column(db.Float, nullable=False)
lon = db.Column(db.Float, nullable=False)
url = db.Column(db.String(256))
def getAllCities():
cityList = City.query.all()
print(cityList)
|
[
"[email protected]"
] | |
a7c6313e7d96e4efb70302abea076553c2bc4427
|
d9c1890cf137489fa24bf0419d565b1e1edbd2cd
|
/circus/web/server.py
|
6d6a07d58bb2fade819c4a6daa5b3e13ab132590
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
magopian/circus
|
47f7b6fcab833eaec19af6e9822d769bc9dd5050
|
e2eef7f008050c30e43d1a10d615dd920fb6583a
|
refs/heads/master
| 2021-01-18T04:54:23.720743 | 2012-11-11T19:07:39 | 2012-11-11T19:07:39 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,207 |
py
|
import socket
from bottle import ServerAdapter
class SocketIOServer(ServerAdapter):
def __init__(self, host='127.0.0.1', port=8080, **config):
super(SocketIOServer, self).__init__(host, port, **config)
self.fd = config.get('fd')
if self.fd is not None:
self.fd = int(self.fd)
def run(self, handler):
try:
from socketio.server import SocketIOServer
except ImportError:
raise ImportError('You need to install gevent_socketio')
from gevent import monkey
from gevent_zeromq import monkey_patch
monkey.patch_all()
monkey_patch()
namespace = self.options.get('namespace', 'socket.io')
policy_server = self.options.get('policy_server', False)
if self.fd is not None:
sock = socket.fromfd(self.fd, socket.AF_INET, socket.SOCK_STREAM)
else:
sock = (self.host, self.port)
socket_server = SocketIOServer(sock, handler,
namespace=namespace,
policy_server=policy_server)
handler.socket_server = socket_server
socket_server.serve_forever()
|
[
"[email protected]"
] | |
4d952b651224199a592af47dc19fd9166eb94aa9
|
6e12c2e6d453ea1caf64c0eafaf3410b30f434e0
|
/shop/migrations/0011_auto_20160607_1347.py
|
b6dc57869b17135e8c679bb3ac3d6d5cf83edca0
|
[] |
no_license
|
andreynovikov/django-shop
|
43b66ec639037473cd72f7480f83811d911104fb
|
8f843b0b15354903a335c324daa65714bfb2f8cc
|
refs/heads/master
| 2023-04-28T01:26:16.938227 | 2023-04-22T15:33:29 | 2023-04-22T15:33:29 | 43,815,663 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,046 |
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('shop', '0010_manufacturer'),
]
operations = [
migrations.CreateModel(
name='Supplier',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, verbose_name='ID', serialize=False)),
('name', models.CharField(verbose_name='название', max_length=100)),
],
options={
'verbose_name_plural': 'поставщики',
'verbose_name': 'поставщик',
},
),
migrations.AddField(
model_name='product',
name='manufacturer',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='shop.Manufacturer', verbose_name='Производитель', default=1),
preserve_default=False,
),
]
|
[
"[email protected]"
] | |
0c4bb10e987777fae9cc78ed90940998e95d1024
|
6805b5299355005536d7408a2092db5cdf7f78d3
|
/utils/saver.py
|
94ca9f01d939bba00e341c74d18af9c619879727
|
[] |
no_license
|
harisris/draw-mnist
|
050609c9bcc3a1690836467179660af186d544a9
|
7cfaa76336714ec4c290d84243115b5184142768
|
refs/heads/master
| 2021-01-12T10:03:39.163704 | 2016-06-08T09:19:53 | 2016-06-08T09:19:53 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,863 |
py
|
import cslab_environ
import fnmatch
import logger
import os
import yaml
import tensorflow as tf
kModelOptFilename = 'model_opt.yaml'
kDatasetOptFilename = 'dataset_opt.yaml'
kMaxToKeep = 2
class Saver():
def __init__(self, folder, model_opt=None, data_opt=None):
if not os.path.exists(folder):
os.makedirs(folder)
self.folder = folder
self.log = logger.get()
self.tf_saver = None
if model_opt is not None:
self.save_opt(os.path.join(folder, kModelOptFilename), model_opt)
if data_opt is not None:
self.save_opt(os.path.join(folder, kDatasetOptFilename), data_opt)
pass
def save(self, sess, global_step=None):
"""Save checkpoint.
Args:
global_step:
"""
if self.tf_saver is None:
self.tf_saver = tf.train.Saver(
tf.all_variables(), max_to_keep=kMaxToKeep)
ckpt_path = os.path.join(self.folder, 'model.ckpt')
self.log.info('Saving checkpoint to {}'.format(ckpt_path))
self.tf_saver.save(sess, ckpt_path, global_step=global_step)
pass
def save_opt(self, fname, opt):
with open(fname, 'w') as f:
yaml.dump(opt, f, default_flow_style=False)
def get_latest_ckpt(self):
"""Get the latest checkpoint filename in a folder."""
ckpt_fname_pattern = os.path.join(self.folder, 'model.ckpt-*')
ckpt_fname_list = []
for fname in os.listdir(self.folder):
fullname = os.path.join(self.folder, fname)
if fnmatch.fnmatch(fullname, ckpt_fname_pattern):
if not fullname.endswith('.meta'):
ckpt_fname_list.append(fullname)
if len(ckpt_fname_list) == 0:
raise Exception('No checkpoint file found.')
ckpt_fname_step = [int(fn.split('-')[-1]) for fn in ckpt_fname_list]
latest_step = max(ckpt_fname_step)
latest_ckpt = os.path.join(self.folder,
'model.ckpt-{}'.format(latest_step))
latest_graph = os.path.join(self.folder,
'model.ckpt-{}.meta'.format(latest_step))
return (latest_ckpt, latest_graph, latest_step)
def get_ckpt_info(self):
"""Get info of the latest checkpoint."""
if not os.path.exists(self.folder):
raise Exception('Folder "{}" does not exist'.format(self.folder))
model_id = os.path.basename(self.folder.rstrip('/'))
self.log.info('Restoring from {}'.format(self.folder))
model_opt_fname = os.path.join(self.folder, kModelOptFilename)
data_opt_fname = os.path.join(self.folder, kDatasetOptFilename)
if os.path.exists(model_opt_fname):
with open(model_opt_fname) as f_opt:
model_opt = yaml.load(f_opt)
else:
model_opt = None
self.log.info('Model options: {}'.format(model_opt))
if os.path.exists(data_opt_fname):
with open(data_opt_fname) as f_opt:
data_opt = yaml.load(f_opt)
else:
data_opt = None
ckpt_fname, graph_fname, latest_step = self.get_latest_ckpt()
self.log.info('Restoring at step {}'.format(latest_step))
return {
'ckpt_fname': ckpt_fname,
'graph_fname': graph_fname,
'model_opt': model_opt,
'data_opt': data_opt,
'step': latest_step,
'model_id': model_id
}
def restore(self, sess, ckpt_fname=None):
"""Restore the checkpoint file."""
if ckpt_fname is None:
ckpt_fname = self.get_latest_ckpt()[0]
if self.tf_saver is None:
self.tf_saver = tf.train.Saver(tf.all_variables())
self.tf_saver.restore(sess, ckpt_fname)
pass
|
[
"[email protected]"
] | |
aa00df2b260e6c6ab031c83ba1170fc89fa50eee
|
f6814281de06f6d6eff1cc9b40f9596274455ece
|
/segmentation/local_maxima.py
|
7ba208efda2582ce2001beca2c44eabccf246bdb
|
[
"MIT"
] |
permissive
|
file-campuran/scikit-image-clustering-scripts
|
806ad366202f3054bf0f602e414c083428372177
|
2197f23b904463b358421bc8a8bd85a3cb4cc2f1
|
refs/heads/master
| 2022-10-07T23:17:18.814705 | 2020-06-02T18:00:37 | 2020-06-02T18:00:37 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,000 |
py
|
#!/usr/bin/env python
# http://stackoverflow.com/questions/9111711/get-coordinates-of-local-maxima-in-2d-array-above-certain-value
import numpy as np
from osgeo import gdal
import scipy.ndimage as ndimage
import scipy.ndimage.filters as filters
# initialize driver
driver = gdal.GetDriverByName('GTiff')
def write_image(img, filename):
"""
Write img array to a file with the given filename
Args:
img (Band)
filename (str)
"""
x_size = img.shape[1]
y_size = img.shape[0]
dataset = driver.Create(filename, x_size, y_size)
dataset.GetRasterBand(1).WriteArray(img)
# load original image
dataset = gdal.Open('img/mozambique-after-subset.tif')
band = dataset.GetRasterBand(1)
img = band.ReadAsArray().astype(np.uint8)
# position of local maxima
data_max = filters.maximum_filter(img, 5)
maxima = (img == data_max)
data_min = filters.minimum_filter(img, 5)
diff = ((data_max - data_min) > 150)
maxima[diff == 0] = 0
write_image(maxima, 'img/maxima.tif')
|
[
"[email protected]"
] | |
6ba44a2283a1c3e70f0210af98521fbb9c634623
|
294c35259125e1c55cfdd5247cee651a07b3cc01
|
/src/map/migrations/0001_initial.py
|
52a0310446ae9821e95ec290c04c7762db920978
|
[
"MIT"
] |
permissive
|
andgein/sis-2016-winter-olymp
|
a82557d61b7748c6813e31779bcc74e92874a96c
|
70962d861b3cf69e982949878ae4dccc2818b618
|
refs/heads/master
| 2021-08-30T15:00:10.691639 | 2017-12-18T11:05:27 | 2017-12-18T11:05:27 | 114,627,656 | 2 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,224 |
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2016-12-31 05:13
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import djchoices.choices
import relativefilepathfield.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('contenttypes', '0002_remove_content_type_name'),
]
operations = [
migrations.CreateModel(
name='AbstractTile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('row', models.PositiveIntegerField(help_text='Номер строки')),
('column', models.PositiveIntegerField(help_text='Номер колонки')),
('ejudge_short_name', models.CharField(db_index=True, max_length=255)),
('name', models.CharField(max_length=255)),
('statement_file_name', relativefilepathfield.fields.RelativeFilePathField(match='.*\\.pdf', path=settings.PROBLEMS_STATEMENTS_DIR)),
('automatic_open_time', models.PositiveIntegerField(blank=True, default=None, help_text='Время в минутах, после которого задача откроется автоматически. Если NULL, то не откроется', null=True)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='TileStatus',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('status', models.PositiveIntegerField(choices=[(0, 'Closed'), (1, 'Opened'), (2, 'Read'), (3, 'Tried'), (4, 'Solved')], db_index=True, validators=[djchoices.choices.ChoicesValidator({0: 'Closed', 1: 'Opened', 2: 'Read', 3: 'Tried', 4: 'Solved'})])),
],
options={
'ordering': ['status'],
},
),
migrations.CreateModel(
name='AbstractBonus',
fields=[
('abstracttile_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='map.AbstractTile')),
],
options={
'abstract': False,
},
bases=('map.abstracttile',),
),
migrations.CreateModel(
name='Problem',
fields=[
('abstracttile_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='map.AbstractTile')),
('solved_award', models.PositiveIntegerField(help_text='Приз за правильное решение задачи')),
('wrong_penalty', models.PositiveIntegerField(help_text='Штраф за неправильную попытку')),
],
options={
'abstract': False,
},
bases=('map.abstracttile',),
),
migrations.AddField(
model_name='tilestatus',
name='tile',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='statuses', to='map.AbstractTile'),
),
migrations.AddField(
model_name='tilestatus',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='tiles_statuses', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='abstracttile',
name='polymorphic_ctype',
field=models.ForeignKey(editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='polymorphic_map.abstracttile_set+', to='contenttypes.ContentType'),
),
migrations.CreateModel(
name='CallMasterBonus',
fields=[
('abstractbonus_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='map.AbstractBonus')),
],
options={
'abstract': False,
},
bases=('map.abstractbonus',),
),
migrations.CreateModel(
name='GetAnyTestBonus',
fields=[
('abstractbonus_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='map.AbstractBonus')),
],
options={
'abstract': False,
},
bases=('map.abstractbonus',),
),
migrations.CreateModel(
name='GetTangerinesBonus',
fields=[
('abstractbonus_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='map.AbstractBonus')),
],
options={
'abstract': False,
},
bases=('map.abstractbonus',),
),
migrations.CreateModel(
name='OpenAnyTileBonus',
fields=[
('abstractbonus_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='map.AbstractBonus')),
],
options={
'abstract': False,
},
bases=('map.abstractbonus',),
),
migrations.CreateModel(
name='OpenWideLocalityBonus',
fields=[
('abstractbonus_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='map.AbstractBonus')),
],
options={
'abstract': False,
},
bases=('map.abstractbonus',),
),
]
|
[
"[email protected]"
] | |
02a32ea594f73011395598d554f22b7b5b9e2724
|
d52a193fbcc4fda5dddef59c5bd691933c81fe0b
|
/isomaticAppWeb/preguntaDiez.py
|
762c49090454dde7f39501730cea10e8dbf6c5c9
|
[] |
no_license
|
solprmat/solpromatcore
|
69c299357bb6f307b2822d985692b06db6f4c73c
|
56dabd4f0189831215578bec5a623e99aeea143f
|
refs/heads/master
| 2022-12-10T16:56:46.629211 | 2019-11-09T20:58:08 | 2019-11-09T20:58:08 | 205,091,159 | 0 | 0 | null | 2022-12-08T06:05:07 | 2019-08-29T05:57:38 |
Python
|
UTF-8
|
Python
| false | false | 412 |
py
|
from django import forms
class PreguntanDiez(forms.Form):
RESPUESTA_PREGUNTA_DIEZ = (
('a', 'a. Ana'),
('b', 'b. Jana'),
)
respuesta = forms.TypedChoiceField(
# label='preubas',
choices=RESPUESTA_PREGUNTA_DIEZ,
widget=forms.RadioSelect(attrs={
'class': 'custom-control-indicator',
})
)
fecha_registro_P010 = forms.DateTimeField
|
[
"[email protected]"
] | |
b5612e469ab15c7cfa726e1f6d7ef51c8a9253ec
|
fe4073028c22079c9908bba9d1f558256b3d3a73
|
/app.py
|
d3eb02e6315a5462f6f5a86462442f07596f4069
|
[] |
no_license
|
romanannaev/BlogFlaskFinish
|
fb35c8fbe3a77753f0664641215cc44f4617b98f
|
3aaecab0b1c3694aa584229963d7c521a301c33e
|
refs/heads/master
| 2021-10-22T08:45:07.250060 | 2020-01-08T14:29:54 | 2020-01-08T14:29:54 | 231,746,001 | 0 | 0 | null | 2021-03-20T02:37:58 | 2020-01-04T10:37:40 |
Python
|
UTF-8
|
Python
| false | false | 2,169 |
py
|
import os
from flask import Flask, request
from flask_sqlalchemy import SQLAlchemy
#create admin
from flask_admin import Admin
from flask_admin.contrib.sqla import ModelView
#flask security, the storage of users, roles
from flask_security import SQLAlchemyUserDatastore
from flask_security import Security
app = Flask(__name__)
app.config.from_object(os.environ['APP_SETTINGS'])
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
import view
from posts.blueprint import posts
app.register_blueprint(posts, url_prefix='/blog') #registration posts app(blueprint)
### Admin ####
from models import *
#integration Admin and Flask Security
from flask_security import current_user
from flask import redirect, url_for, request
class AdminMixin:
def is_accessible(self):
return current_user.has_role('admin')
def inaccessible_callback(self, name, **kwargs):
#localhost/admin/
return redirect(url_for('security.login', next=request.url))
#Point to admin panel to use our class-constructors
#Allows our to change slug at the moment creating or editing Post from admin Panel
class BaseModelView(ModelView):
def on_model_change(self, form, model , is_created):
model.generate_slug()
return super(BaseModelView, self).on_model_change(form, model, is_created)
#class constrains allow to models POst and Tag in AdminPanel
class AdminView(AdminMixin, ModelView):
pass
#class constrains allow to AdminPanel at all
from flask_admin import AdminIndexView
class HomeAdminView(AdminMixin, AdminIndexView):
pass
class PostAdminView(AdminMixin, BaseModelView):
form_columns = ['title', 'body', 'tags', 'image']
class TagAdminView(AdminMixin, BaseModelView):
form_columns = ['name', 'posts']
admin = Admin(app, 'FlaskApp', url='/', index_view=HomeAdminView(name='Home'))
admin.add_view(PostAdminView(Post, db.session)) #before was ModelView instead of AdminView --->PostAdminView
admin.add_view(TagAdminView(Tag, db.session))
## flask security ##
user_datastore = SQLAlchemyUserDatastore(db, User, Role)
security = Security(app, user_datastore)
if __name__ == '__main__':
app.run()
|
[
"[email protected]"
] | |
e34ddccd94638c82612a301dcb783977751ee558
|
685038d4be188fa72e9dba1d2213a47ee3aa00a2
|
/ECOS2021/Demands/Inputs/Surveys/A/S4/Jul_S4_A.py
|
39db06fef88624a9f7eead973c903d14f3b922fc
|
[] |
no_license
|
CIE-UMSS/Tradeoff-between-Installed-Capacity-and-Unserved-Energy
|
e5599e4e4ac60b97f0c4c57c5de95e493b1b5ac4
|
459f31552e3ab57a2e52167ab82f8f48558e173c
|
refs/heads/master
| 2023-06-01T18:09:29.839747 | 2021-06-19T15:56:26 | 2021-06-19T15:56:26 | 343,720,452 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,963 |
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 31 14:33:07 2020
@author: alejandrosoto
Script for 2 class of household in Raqaypampa.
"""
# -*- coding: utf-8 -*-
"""
@author: Alejandro Soto
"""
from core import User, np
User_list = []
#User classes definition
HI = User("high income",1)
User_list.append(HI)
LI = User("low income",0)
User_list.append(LI)
'''
Base scenario (BSA): Indoor bulb (3), outdoor bulb (1), radio (1), tv (1), phone charger (2), Water Heater (1), Mixer (1)
Base scenario (B): Indoor bulb (3), outdoor bulb (1), radio (1), tv (1), phone charger (2)
A
Scenario 1: BSA + Fridge (1) + Freezer* (1).
Scenario 2: BSA + Fridge (1).
Scenario 3: BSA + Fridge (1)*.
Scenario 4: BSA + Freezer (1).
Scenario 5: BSA + Wheler (1).
Scerario 6: BSA + Grinder (1).
Scanerio 7: Add + Dryer (1),
Scenario 9: All
B
Scenario 8: BSB + Water Heater** (1).
Scenario 10: BSA + Pump Water (1).
Scenario 11: BSA + DVD (1).
Scenario 12: BSA + Blender (1).
Scenario 13: BSA + Iron (1).
Scerario 14: BSA + Mill (1).
* With seasonal variation
** Occasional use
Cold Months: May-Aug Std Cycle 8:00-18:00 Above 10 degrees
Warm Months: Jan-Apr Std Cycle 0:00-23:59 Above 10 degrees
Hot Nonths: Sep-Dec Std Cycle 0:00-10:00; 15:01-23:59 Above 10 degrees
Int Cycle 10:01-15:00
'''
#High-Income
#indoor bulb
HI_indoor_bulb = HI.Appliance(HI,3,7,1,320,0.6,190)
HI_indoor_bulb.windows([1080,1440],[0,0])
#outdoor bulb
HI_outdoor_bulb = HI.Appliance(HI,1,13,1,340,0.1,300)
HI_outdoor_bulb.windows([1100,1440],[0,0])
HI_Radio = HI.Appliance(HI,1,7,1,280,0.3,110)
HI_Radio.windows([420,708],[0,0])
#tv
HI_TV = HI.Appliance(HI,1,60,3,300,0.38,114)
HI_TV.windows([1140,1440],[651,1139],0.35,[300,650])
#phone charger
HI_Phone_charger = HI.Appliance(HI,2,5,3,250,0.4,95)
HI_Phone_charger.windows([1190,1440],[0,420],0.35,[421,1189])
#water_heater
HI_Water_heater = HI.Appliance(HI,1,150,1,60,0.05,30)
HI_Water_heater.windows([0,1440],[0,0])
#mixer
HI_Mixer = HI.Appliance(HI,1,50,1,10,0.5,5,occasional_use = 0.3)
HI_Mixer.windows([420,560],[0,0])
#freezer
HI_Freezer = HI.Appliance(HI,1,200,1,1440,0,30,'yes',3)
HI_Freezer.windows([0,1440],[0,0])
HI_Freezer.specific_cycle_1(200,20,5,10)
HI_Freezer.specific_cycle_2(200,15,5,15)
HI_Freezer.specific_cycle_3(200,10,5,20)
HI_Freezer.cycle_behaviour([600,900],[0,0],[0,0],[0,0],[0,599],[901,1440])
#Lower Income
#indoor bulb
LI_indoor_bulb = LI.Appliance(LI,3,7,2,287,0.4,124)
LI_indoor_bulb.windows([1153,1440],[0,300],0.5)
#outdoor bulb
LI_outdoor_bulb = LI.Appliance(LI,1,13,1,243,0.3,71)
LI_outdoor_bulb.windows([1197,1440],[0,0])
#radio
LI_Radio = LI.Appliance(LI,1,7,2,160,0.3,49)
LI_Radio.windows([480,840],[841,1200],0.5)
#TV
LI_TV = LI.Appliance(LI,1,100,3,250,0.3,74)
LI_TV.windows([1170,1420],[551,1169],0.3,[300,550])
#phone charger
LI_Phone_charger = LI.Appliance(LI,2,5,3,200,0.4,82)
LI_Phone_charger.windows([1020,1440],[0,420],0.3,[720,1019])
|
[
"[email protected]"
] | |
e25e5edb0703725283c9e674fc6085ad431c3b52
|
ed4587c16e0708f0b618d8703b0ea9e86f5c3237
|
/Layers/HiddenLayer.py
|
c0b7f6809f033afd968f23563e71d82e0f8955df
|
[] |
no_license
|
CaoDuyThanh/NN_Autoencoder
|
b85256e4c4a1a71072c876c45098606244966cf8
|
0a0fc8e23e39249c6562249cf538e3b5898037f5
|
refs/heads/master
| 2021-01-23T22:15:06.380045 | 2017-02-26T13:33:50 | 2017-02-26T13:33:50 | 83,121,739 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,858 |
py
|
import theano
import numpy
import cPickle
import theano.tensor as T
from theano.tensor.shared_randomstreams import RandomStreams
class HiddenLayer:
def __init__(self,
rng, # Random seed
input, # Data input
numIn, # Number neurons of input
numOut, # Number reurons out of layer
activation = T.tanh, # Activation function
W = None,
b = None,
corruption = None
):
# Set parameters
self.Rng = rng;
self.Input = input
self.NumIn = numIn
self.NumOut = numOut
self.Activation = activation
self.Corruption = corruption
# Create shared parameters for hidden layer
if W is None:
""" We create random weights (uniform distribution) """
# Create boundary for uniform generation
wBound = numpy.sqrt(6.0 / (self.NumIn + self.NumOut))
self.W = theano.shared(
numpy.asarray(
rng.uniform(
low=-wBound,
high=wBound,
size=(self.NumIn, self.NumOut)
),
dtype=theano.config.floatX
),
borrow=True
)
else:
""" Or simply set weights from parameter """
self.W = W
if b is None:
""" We create zeros bias """
# Create bias
self.b = theano.shared(
numpy.zeros(
shape = (self.NumOut, ),
dtype=theano.config.floatX
),
borrow=True
)
else:
""" Or simply set bias from parameter """
self.b = b
def getCorruptedInput(self, input, corruptionLevel):
theano_rng = RandomStreams(self.Rng.randint(2 ** 30))
return theano_rng.binomial(size=input.shape, n=1,
p=1 - corruptionLevel,
dtype=theano.config.floatX) * input
def Output(self):
input = self.Input
if self.Corruption is not None:
self.Input = self.getCorruptedInput(self.Input, self.Corruption)
output = T.dot(input, self.W) + self.b
if self.Activation is None:
return output
else:
return self.Activation(output)
'''
Return transpose of weight matrix
'''
def WTranspose(self):
return self.W.T
def Params(self):
return [self.W, self.b]
def LoadModel(self, file):
self.W.set_value(cPickle.load(file), borrow = True)
self.b.set_value(cPickle.load(file), borrow = True)
|
[
"[email protected]"
] | |
ea5aa679209b5f87452309f4ae5d2b6780c1cbc6
|
6b2a8dd202fdce77c971c412717e305e1caaac51
|
/solutions_5634697451274240_0/Python/elsw/revenge_pancakes.py
|
5b7acca8e7d41f5d660ecf7e2208da6c469f5d79
|
[] |
no_license
|
alexandraback/datacollection
|
0bc67a9ace00abbc843f4912562f3a064992e0e9
|
076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf
|
refs/heads/master
| 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null |
UTF-8
|
Python
| false | false | 546 |
py
|
#/usr/bin/env python
# Google Code Jam Qualification Round 2016
# B. Revenge of the Pancakes
# https://code.google.com/codejam/contest/6254486/dashboard#s=p1
def happy(stack):
s = stack + '+'
t = s[0]
c = 0
for p in s[1:]:
if p != t:
c += 1
t = p
return c
with open('B-small-attempt0.in', 'r') as f:
t = int(f.readline())
with open('test.out', 'w') as g:
for i in xrange(t):
stack = f.readline().strip()
g.write('Case #%d: %d\n' % (i+1, happy(stack)))
|
[
"[email protected]"
] | |
9c53ab1aae60600743db9747d0c63fc33815b1d3
|
32809f6f425bf5665fc19de2bc929bacc3eeb469
|
/src/0435-Non-overlapping-Intervals/0435.py
|
7ead2f6d87451d06255ac7a691c7621aab8a49fb
|
[] |
no_license
|
luliyucoordinate/Leetcode
|
9f6bf01f79aa680e2dff11e73e4d10993467f113
|
bcc04d49969654cb44f79218a7ef2fd5c1e5449a
|
refs/heads/master
| 2023-05-25T04:58:45.046772 | 2023-05-24T11:57:20 | 2023-05-24T11:57:20 | 132,753,892 | 1,575 | 569 | null | 2023-05-24T11:57:22 | 2018-05-09T12:30:59 |
C++
|
UTF-8
|
Python
| false | false | 533 |
py
|
class Solution:
def eraseOverlapIntervals(self, intervals: List[List[int]]) -> int:
intervals.sort(key=lambda x: x[0])
result, pre = 0, None
for cur in intervals:
if prev and prev[1] > cur[0]:
result += 1
if cur[1] < prev[1]:
prev = cur
else:
prev = cur
return result
if __name__ == "__main__":
intervals = [[1,2], [2,3], [3,4], [1,3]]
print(Solution().eraseOverlapIntervals(intervals))
|
[
"[email protected]"
] | |
4a1a2aa093ea9f5082e68fb215190fec44f67e96
|
b88ddf1bbc0e203b403584428078d73593715454
|
/builder/igloo_director.py
|
ea3e87cb018003a80ebf063c557b4af64e4ccf29
|
[] |
no_license
|
keys4words/designPatterns
|
ef28289a92d68652f3d34a9a609aebe986e785bb
|
0d6ebd5dc1f8c3be725f7405bb785436eec37a89
|
refs/heads/main
| 2023-04-15T01:15:27.231107 | 2021-04-20T13:31:25 | 2021-04-20T13:31:25 | 348,728,039 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 285 |
py
|
from house_builder import HouseBuilder
class IglooDirector:
"One of the Directors, that can build a complex representation."
@staticmethod
def construct():
return HouseBuilder().set_building_type("Igloo").set_wall_material("Ice").set_number_doors(1).get_result()
|
[
"[email protected]"
] | |
924e843bf762ca9d9e88f6d17f4e35920d84013f
|
4bb6a8cbd7ac887ec4abc6abc97f0cb17415e82d
|
/Chapter 5 Strings/numbers2text.py
|
7d27d3705f77296072ca8408e9a44d5c200e6e9c
|
[] |
no_license
|
jbhennes/CSCI-220-Programming-1
|
cdc9cab47b4a79dccabf014224a175674e9a7155
|
ac9e85582eeb51a205981674ffdebe8a5b93a205
|
refs/heads/master
| 2021-01-01T03:54:50.723923 | 2016-05-02T16:06:55 | 2016-05-02T16:06:55 | 57,902,553 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 733 |
py
|
# numbers2text.py
# A program to convert a sequence of ASCII numbers into
# a string of text.
import string # include string library for the split function.
def main():
print "This program converts a sequence of ASCII numbers into"
print "the string of text that it represents."
print
# Get the message to encode
inString = raw_input("Please enter the ASCII-encoded message: ")
# Loop through each substring and build ASCII message
message = ""
for numStr in string.split(inString):
asciiNum = eval(numStr) # convert digits to a number
message = message + chr(asciiNum) # append character to message
print "The decoded message is:", message
main()
|
[
"[email protected]"
] | |
832e473b8c911f7063df943d58fecbe31724ce10
|
2868a3f3bca36328b4fcff5cce92f8adeb25b033
|
/+200ns/Ni_default/step1_dc/set.py
|
23f82640e2dddefec72eb6201e99726773cd9099
|
[] |
no_license
|
linfranksong/TM-enzyme_input
|
1c2a5e12e69c48febd5b5900aa00fe2339d42298
|
6e46a5b2c451efb93761707b77917a98ca0bfedc
|
refs/heads/master
| 2022-03-19T19:49:09.373397 | 2019-12-04T00:11:59 | 2019-12-04T00:11:59 | 205,220,795 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,114 |
py
|
import os
dir = os.path.dirname(os.path.realpath(__file__)) + '/'
for a in [150,200,250,300,350,400,450,500,550,600]:
#for a in [150]:
#for a in [200,250,300,350,400,450,500,550,600]:
os.system("rm -r %s_dc"%(a))
os.system("cp -r temp/ %s_dc"%(a))
adir=dir+ "%s_dc/"%(a)
os.chdir(adir)
os.system("sed -i 's/MMM/%s/g' */*pbs"%(a))
array= [0,0.00922, 0.04794, 0.11505, 0.20634, 0.31608, 0.43738, 0.56262, 0.68392, 0.79366, 0.88495, 0.95206, 0.99078,1.0]
for n in range(1,len(array)-1):
i=array[n]
os.system("rm -r %s"%(i))
os.system("cp -r files %s"%(i))
wdir=adir+"%s/"%(i)
os.chdir(wdir)
os.system("mv eq.in %s_eq.in"%(i))
os.system("mv us.in %s_us.in"%(i))
os.system("sed -i 's/XXX/%s/g' %s_eq.in"%(i,i))
os.system("sed -i 's/XXX/%s/g' %s_us.in"%(i,i))
os.system("sed -i 's/MMM/%s/g' dis.RST"%(a))
os.system("mv eq.pbs %s_eq.pbs"%(i))
os.system("mv us.pbs %s_us.pbs"%(i))
os.system("sed -i 's/XXX/%s/g' *.pbs"%(i))
os.system("sed -i 's/NNN/%s/g' *.pbs"%(array[n+1]))
os.system("sed -i 's/PPP/%s/g' *.pbs"%(array[n-1]))
os.chdir(adir)
sdir=adir+"0/"
os.chdir(sdir)
i=0
os.system("cp /mnt/gs18/scratch/users/songlin3/run/glx-0904/+200ns/Ni_default/step0_fep/%s_fep/1.0/%s_1.0_eq_center.rst ."%(a,a))
os.system("mv eq.in %s_eq.in"%(i))
os.system("mv us.in %s_us.in"%(i))
os.system("sed -i 's/XXX/%s/g' %s_eq.in"%(i,i))
os.system("sed -i 's/XXX/%s/g' %s_us.in"%(i,i))
os.system("mv eq.pbs %s_eq.pbs"%(i))
os.system("mv us.pbs %s_us.pbs"%(i))
os.system("sed -i 's/XXX/%s/g' *.pbs"%(i))
os.system("sed -i 's/MMM/%s/g' dis.RST"%(a))
os.system("sbatch 0_eq.pbs")
sdir=adir+"1.0/"
os.chdir(sdir)
i=1.0
os.system("mv eq.in %s_eq.in"%(i))
os.system("mv us.in %s_us.in"%(i))
os.system("sed -i 's/XXX/%s/g' %s_eq.in"%(i,i))
os.system("sed -i 's/XXX/%s/g' %s_us.in"%(i,i))
os.system("mv eq.pbs %s_eq.pbs"%(i))
os.system("mv us.pbs %s_us.pbs"%(i))
os.system("sed -i 's/XXX/%s/g' *.pbs"%(i))
os.system("sed -i 's/MMM/%s/g' dis.RST"%(a))
os.system("sed -i 's/MMM/%s/g' center.in"%(a))
os.chdir(dir)
|
[
"[email protected]"
] | |
807bd4413ac24f6180b70539c7ef6c6621b7e9db
|
372185cd159c37d436a2f2518d47b641c5ea6fa4
|
/142. 环形链表 II.py
|
83c889995bd0403eefdbd90103e6fe0fd1b3b7b1
|
[] |
no_license
|
lidongze6/leetcode-
|
12022d1a5ecdb669d57274f1db152882f3053839
|
6135067193dbafc89e46c8588702d367489733bf
|
refs/heads/master
| 2021-07-16T09:07:14.256430 | 2021-04-09T11:54:52 | 2021-04-09T11:54:52 | 245,404,304 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 758 |
py
|
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def detectCycle(self, head: ListNode) -> ListNode:
if head == None or head.next == None:
return None
fast = slow = head
temp = False # 记录是否有环
while fast.next and fast.next.next:
slow = slow.next
fast = fast.next.next
if fast == slow:
temp = True
break
if temp == True: # 若有环,则计算环起点位置
fast = head
while slow != fast:
slow = slow.next
fast = fast.next
return slow
return None # 若temp False 则无环,返回空
|
[
"[email protected]"
] | |
32fe88969c29acd42125c481d6f2bd756033c283
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02685/s529348276.py
|
65a166ca77210af61f1ce19dd156965d2e9ccf58
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 542 |
py
|
mod=998244353
fact=[1]
for i in range(1,2*10**5+1): #mod上での階乗を求める
fact.append((fact[-1]*i)%mod)
revfact=[1]
for i in range(1,2*10**5+1): #mod上での階乗の逆元をフェルマーの小定理を用いて求める
revfact.append(pow(fact[i],mod-2,mod))
n,m,k=map(int,input().split())
ans=0
for i in range(k,-1,-1): #各m(N-K<=m<=N)について場合の数を求める
group=n-i
tmp=fact[n-1]*revfact[group-1]*revfact[n-1-(group-1)]
tmp%=mod
tmp*=m
tmp%=mod
tmp*=pow(m-1,group-1,mod)
ans+=tmp
ans%=mod
print(ans)
|
[
"[email protected]"
] | |
156869ad9e5674ab69c9b1b1d37b2d2d4946460c
|
7ae374f11cc2e9673fb2c39d00e942253418b41a
|
/connect_H351/telnet_H351.py
|
199d5322ff7cbbf4f64dcf01e540e2edbee3d854
|
[] |
no_license
|
zeewii/H351
|
44c05acf0f814558f1fa8d8e2a9c942fee707985
|
80231ff0434a15835d6b484cbc498b2f963d048c
|
refs/heads/master
| 2021-01-10T00:57:18.501664 | 2015-09-28T10:39:24 | 2015-09-28T10:39:24 | 43,277,121 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,360 |
py
|
#coding=utf-8
#描述:本模块为通过使用pexpect模块登录telnet输入命令,并取出输入结果
#作者:曾祥卫
import datetime
import pexpect
#输入:user-登录名,ip-登录ip,password1-登录密码1,password2-登录密码2,command-输入命令
#输出:输入命令返回的结果
def telnet_command(user,ip,password1,password2,command):
try:
#远程主机登录后出现的字符串
finish = ":/#"
# 为telnet命令生成一个spawn类的子程序对象
child = pexpect.spawn('telnet %s'%ip)
#列出期望出现的字符串,"login","Unknown host",EOF,超时
i = child.expect(["(?i)Username", "(?i)Unknown host", pexpect.EOF, pexpect.TIMEOUT])
#匹配到了EOF或TIMEOUT,表示EOF或超时或"(?i)Unknown host",程序打印提示信息并退出
if i !=0:
print u"telnet登录失败,由于登录时超时或EOF或主机名无效"
child.close(force=True)
#如果匹配Username字符成功,输入用户名
else:
child.sendline(user)
#列出期望出现的字符串,'password',EOF,超时
i = child.expect(["(?i)Password", pexpect.EOF, pexpect.TIMEOUT])
#如果匹配EOF,超时,打印信息并退出
if i != 0:
print u"telnet登录失败,由于输入密码时超时或EOF"
#强制退出
child.close(force=True)
#匹配到了password,输入password1
child.sendline(password1)
#期望出现字符串'router>',输入'sh'
child.expect('router>')
child.sendline('sh')
#列出期望出现的字符串,'password',EOF,超时
i = child.expect(["(?i)Password", pexpect.EOF, pexpect.TIMEOUT])
#如果匹配EOF,超时,打印信息并退出
if i != 0:
print u"telnet登录失败,由于输入密码时超时或EOF"
#强制退出
child.close(force=True)
#匹配到了password,输入password1
child.sendline(password2)
#期待远程主机的命令提示符出现
child.expect(finish)
#如果匹配提示符成功,输入执行命令
child.sendline(command)
#期待远程主机的命令提示符出现
child.expect(finish)
# 将命令结果输出
result = child.before
print result
#将执行命令的时间和结果以追加的形式保存到telnet_log.txt文件中备份文件
f = open('telnet_log.txt','a')
str1 = str(datetime.datetime.now())+' '
f.writelines(str1+result)
f.close()
# 将 telnet 子程序的执行权交给用户
#child.interact()
#退出telent子程序
child.close(force=True)
#返回命令的输出结果
return result
#异常打印原因
except pexpect.ExceptionPexpect, e:
print 'telnet连接失败',str(e)
if __name__ == '__main__':
user = '100msh'
ip = '192.168.11.1'
password1 = '100msh'
password2 = '@w$r^y*i90'
command = 'ifconfig br-lan'
result = telnet_command(user,ip,password1,password2,command)
print result
|
[
"[email protected]"
] | |
5131a9c888902430b3d4a3d54233d26783ca9679
|
bf99b1b14e9ca1ad40645a7423f23ef32f4a62e6
|
/AtCoder/abc/065d.py
|
9fc11d2c49550f58cf2f400ec67dd7e78aefe5b5
|
[] |
no_license
|
y-oksaku/Competitive-Programming
|
3f9c1953956d1d1dfbf46d5a87b56550ff3ab3db
|
a3ff52f538329bed034d3008e051f30442aaadae
|
refs/heads/master
| 2021-06-11T16:14:12.635947 | 2021-05-04T08:18:35 | 2021-05-04T08:18:35 | 188,639,647 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,003 |
py
|
import heapq
N = int(input())
cities = [0 for _ in range(N)]
for i in range(N):
x, y = map(int, input().split())
cities[i] = (i, x, y)
edges = [[] for _ in range(N)]
cities.sort(key=lambda A : A[1])
for i in range(N - 1):
a, xFrom, yFrom = cities[i]
b, xTo, yTo = cities[i + 1]
cost = min(abs(xFrom - xTo), abs(yFrom - yTo))
edges[a].append((cost, b))
edges[b].append((cost, a))
cities.sort(key=lambda A : A[2])
for i in range(N - 1):
a, xFrom, yFrom = cities[i]
b, xTo, yTo = cities[i + 1]
cost = min(abs(xFrom - xTo), abs(yFrom - yTo))
edges[a].append((cost, b))
edges[b].append((cost, a))
vertex = set([0])
newEdge = []
que = []
for cost, to in edges[0]:
heapq.heappush(que, (cost, to))
ans = 0
while len(vertex) < N:
cost, now = heapq.heappop(que)
if now in vertex:
continue
ans += cost
vertex.add(now)
for c, to in edges[now]:
if not to in vertex:
heapq.heappush(que, (c, to))
print(ans)
|
[
"[email protected]"
] | |
1e1e14496a4ff4181136795e1206bfc147a0a3b7
|
75dcb56e318688499bdab789262839e7f58bd4f6
|
/_algorithms_challenges/leetcode/LeetcodePythonProject/leetcode_0751_0800/LeetCode759_EmployeeFreeTime.py
|
ff1eb9feb5d31dc20ea349c46e94b43ceaedee09
|
[] |
no_license
|
syurskyi/Algorithms_and_Data_Structure
|
9a1f358577e51e89c862d0f93f373b7f20ddd261
|
929dde1723fb2f54870c8a9badc80fc23e8400d3
|
refs/heads/master
| 2023-02-22T17:55:55.453535 | 2022-12-23T03:15:00 | 2022-12-23T03:15:00 | 226,243,987 | 4 | 1 | null | 2023-02-07T21:01:45 | 2019-12-06T04:14:10 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 1,595 |
py
|
'''
Created on Mar 30, 2018
@author: tongq
'''
# Definition for an interval.
class Interval(object):
def __init__(self, s=0, e=0):
self.start = s
self.end = e
class Solution(object):
def employeeFreeTime(self, schedule):
"""
:type schedule: List[List[Interval]]
:rtype: List[Interval]
"""
import heapq
heap = []
for arr in schedule:
for inter in arr:
heapq.heappush(heap, [inter.start, inter.end])
temp = heapq.heappop(heap)
res = []
while heap:
if temp[1] < heap[0][0]:
res.append(Interval(temp[1], heap[0][0]))
temp = heapq.heappop(heap)
else:
if temp[1] < heap[0][1]:
temp = heap[0]
heapq.heappop(heap)
return res
def test(self):
testCases = [
[
[[1,2],[5,6]],
[[1,3]],[[4,10]],
],
[
[[1,3],[6,7]],[[2,4]],
[[2,5],[9,12]],
],
]
for schedule in testCases:
print('schedule: %s' % schedule)
arr = []
for arr0 in schedule:
arr.append([Interval(inter[0], inter[1]) for inter in arr0])
schedule = arr
result = self.employeeFreeTime(schedule)
res = [[inter.start, inter.end] for inter in result]
print('result: %s' % res)
print('-='*30+'-')
if __name__ == '__main__':
Solution().test()
|
[
"[email protected]"
] | |
4e96486fda291297b6b7b7b5830180b891f7de07
|
54df8336b50e8f2d7dbe353f0bc51a2b3489095f
|
/Django/Django_Old/disa-py/member/admin.py
|
f571a992fa1e04afacdcd4fa42521d6044b42e0d
|
[] |
no_license
|
SurendraKumarAratikatla/MyLenovolapCodes1
|
42d5bb7a14bfdf8d773ee60719380ee28ff4947a
|
12c56200fcfd3e5229bfeec209fd03b5fc35b823
|
refs/heads/master
| 2023-06-17T15:44:18.312398 | 2021-07-19T10:28:11 | 2021-07-19T10:28:11 | 387,358,266 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,415 |
py
|
from django.contrib import admin
# Register your models here.
from models import *
from django.contrib.admin import ModelAdmin
from django.db.models.fields import Field
from django.contrib.admin import SimpleListFilter
'''#from assign.disa-py.disa.admin_site import custom_admin_site
class country(SimpleListFilter):
title = 'name' # or use _('country') for translated title
parameter_name = 'name'
def lookups(self, request, model_admin):
list_of_countries = []
queryset = Organisation.objects.all()
for countries in queryset:
list_of_countries.append(self.id)
return sorted(list_of_countries, key=lambda tp: tp[1])
def queryset(self, request, queryset):
if self.value():
return queryset.filter(organisations_id=self.value())
return str(queryset)
class CityAdmin(ModelAdmin):
list_filter = (country, )
@admin.register(Author, Reader, Editor, site=custom_admin_site)
class PersonAdmin(admin.ModelAdmin):
pass
'''
'''class AddressAdmin(admin.ModelAdmin):
list_display = ('mid','address','city','district','state','country','pin','phone')
#list_display = ('full_address', 'pin')
ordering = ['country']
actions = [ 'mark_seen']
def mark_seen(self, request, queryset):
queryset.update(status='p')
mark_seen.short_description = "Mark seen"
def my_view(request, *args, **kwargs):
user1 = Seva.objects.values_list('sevaday', flat=True)[0];
return u'%s' % (user1)
admin.site.register_view('somepath', view=my_view)'''
admin.site.register(Address, AddressAdmin)
admin.site.register(Awardee, AwardeeAdmin)
admin.site.register(LunarDate, LunarAdmin)
admin.site.register(Member, MembersAdmin)
admin.site.register(NakshatramRasiPadamData, NakshatramRasiPadamDataAdmin)
admin.site.register(Seva, SevasAdmin)
admin.site.register(DonationKind, DonationKindAdmin)
admin.site.register(DonationCash, DonationCashAdmin)
admin.site.register(DonationAsset, DonationAssetAdmin)
admin.site.register(DonationService, DonationServiceAdmin)
admin.site.register(MaasamType, commonAdmin)
admin.site.register(NakshatramType, commonAdmin)
# admin.site.register(OauthAccesstoken, commonAdmin)
# admin.site.register(OauthAuthCode, commonAdmin)
# admin.site.register(OauthRefreshToken, commonAdmin)
admin.site.register(Organisation, commonAdmin)
admin.site.register(Profile, commonAdmin)
admin.site.register(SVExtra, commonAdmin)
admin.site.register(PadamType, commonAdmin)
admin.site.register(PakshamType, commonAdmin)
admin.site.register(RasiType, commonAdmin)
admin.site.register(SequenceNumber, commonAdmin)
admin.site.register(SevaAddress, commonAdmin)
admin.site.register(SevaCategory, commonAdmin)
admin.site.register(Tag, commonAdmin)
admin.site.register(TithiType, commonAdmin)
admin.site.register(MedicalProfile, MedicalProfileAdmin)
admin.site.register(StaffProfile, StaffProfileAdmin)
admin.site.register(User, commonAdmin)
admin.site.register(Transaction)
admin.site.register(SevasAddress, SevasAddressAdmin)
admin.site.register(AssetLand, AssetLandAdmin)
admin.site.register(AssetBuilding, AssetBuildingAdmin)
admin.site.register(AssetEquipment, AssetEquipmentAdmin)
admin.site.register(Trustee, TrusteeAdmin)
admin.site.register(Honorary, commonAdmin)
admin.site.register(Complimentary, commonAdmin)
admin.site.register(Relatives, RelativesAdmin)
admin.site.register(Duration)
|
[
"[email protected]"
] | |
1d2db470220c93818fef669f95833c53bfc67818
|
0dc9968c34f74f6ff15435104806956169d6c82a
|
/algorithm/compare_tripet.py
|
519515ce23471f85179b0b6a1b0551eacbbf7458
|
[] |
no_license
|
tasnuvaleeya/hackerRank
|
5ac0e5b089e8da83980b37b1dea45def20fe02e0
|
7259324ea0692ce36c494d9b8913eef8e2211aa9
|
refs/heads/master
| 2021-04-09T17:37:41.769210 | 2018-04-12T13:55:56 | 2018-04-12T13:55:56 | 125,638,757 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 488 |
py
|
import sys
def solve(a0, a1, a2, b0, b1, b2):
# Complete this function
a = (1 if a0 > b0 else 0) + (1 if a1 > b1 else 0) + (1 if a2 > b2 else 0)
b = (1 if b0 > a0 else 0) + (1 if b1 > a1 else 0) + (1 if b2 > a2 else 0)
return (a, b)
a0, a1, a2 = input().strip().split(' ')
a0, a1, a2 = [int(a0), int(a1), int(a2)]
b0, b1, b2 = input().strip().split(' ')
b0, b1, b2 = [int(b0), int(b1), int(b2)]
result = solve(a0, a1, a2, b0, b1, b2)
print(" ".join(map(str, result)))
|
[
"[email protected]"
] | |
b2b27bb05f6ee9fa684ab184aab98b2328e8fb80
|
16dcbf88ae9514109151fe5ff447b2b653ddf48b
|
/2016/012-polynom/polynom 2.py
|
2847370b97cc567bdecda9cfbb8aa6c5054e1f08
|
[] |
no_license
|
ChristerNilsson/Lab
|
efa55ef5e79dff84b232dfcf94473eacdb263175
|
b1f730f45ec6e901bd14c1e4196aa5e0f591ecd2
|
refs/heads/master
| 2023-07-06T04:35:09.458936 | 2023-06-24T21:40:54 | 2023-06-24T21:40:54 | 48,474,249 | 8 | 8 | null | 2022-12-10T07:03:31 | 2015-12-23T06:51:11 |
JavaScript
|
UTF-8
|
Python
| false | false | 4,381 |
py
|
# -*- coding: utf-8 -*-
from sympy import S
# Polynom 2: Lista 0,1,2,3,... Value, Add, Mul, Diff, Integrate, Prettyprint
# Objektorienterat
class Polynom(object):
def __init__(self, polynom):
self.polynom = polynom
def __call__(self, x):
return sum([factor * x ** exponent for exponent,factor in enumerate(self.polynom)])
def __eq__(self,other):
return self.polynom == other.polynom
def __str__(self):
res = []
for degree,factor in enumerate(self.polynom):
a,b,c,d,e = '','','','',''
if factor == 0:
continue
if factor > 0:
a = '+'
if factor == 1:
if degree == 0:
b = str(factor)
elif factor == -1:
b = '-'
else:
b = str(factor)
if degree != 0:
c = '*'
if degree == 0:
pass
elif degree == 1:
d = 'x'
else:
d = 'x**'
if '/' in str(degree):
e = '(' + str(degree) + ')'
else:
e = str(degree)
res.append(a+b+c+d+e)
if not res:
res.append('0')
res = ''.join(res)
if res[0] == '+':
res = res[1:]
return res
def __add__(self, other):
return Polynom([(0 if f1 is None else f1) + (0 if f2 is None else f2) for f1,f2 in map(None, self.polynom, other.polynom)])
def __sub__(self, other):
return self + Polynom([-factor for factor in other.polynom])
def __mul__(self,other):
p1 = self.polynom
p2 = other.polynom
res = [0] * (len(p1) + len(p2))
for exp1,f1 in enumerate(p1):
for exp2,f2 in enumerate(p2):
res[exp1 + exp2] += f1 * f2
if not res:
return Polynom(res)
while res[-1] == 0:
res.pop()
if not res:
break
return Polynom(res)
def diff(self):
res = []
for degree,factor in enumerate(self.polynom):
if degree != 0:
res.append(factor * degree)
return Polynom(res)
def integrate(self):
res = [0]
for degree,factor in enumerate(self.polynom):
res.append(1.0 * factor / (degree + 1))
return Polynom(res)
a = Polynom([5,-7,3]) # f(x) = 5 -7*x + 3*x**2
assert a(0) == 5
assert a(1) == 1
assert a(2) == 3
assert Polynom([]) + Polynom([]) == Polynom([])
assert Polynom([1]) + Polynom([]) == Polynom([1])
assert Polynom([]) + Polynom([1]) == Polynom([1])
assert Polynom([1]) + Polynom([1]) == Polynom([2])
assert Polynom([1]) + Polynom([2]) == Polynom([3])
assert Polynom([1,0,1]) + Polynom([2,3]) == Polynom([3,3,1])
assert Polynom([]) * Polynom([]) == Polynom([])
assert Polynom([1]) * Polynom([]) == Polynom([])
assert Polynom([]) * Polynom([1]) == Polynom([])
assert Polynom([1]) * Polynom([1]) == Polynom([1])
assert Polynom([1]) * Polynom([2]) == Polynom([2])
assert Polynom([1,0,1]) * Polynom([2,3]) == Polynom([2,3,2,3])
assert Polynom([]).diff() == Polynom([])
assert Polynom([1]).diff() == Polynom([])
assert Polynom([1,2]).diff() == Polynom([2])
assert Polynom([1,2,3]).diff() == Polynom([2,6])
assert Polynom([5,-7,3]).diff() == Polynom([-7,6])
assert Polynom([]).integrate() == Polynom([0])
assert Polynom([1]).integrate() == Polynom([0,1])
assert Polynom([1,2]).integrate() == Polynom([0,1,1])
assert Polynom([1,2,3]).integrate() == Polynom([0,1,1,1])
assert Polynom([5,-7,3]).integrate() == Polynom([0,5,-3.5,1])
# Beräkna ytan mellan polynomen y=x och y=x*x, för x mellan 0 och 1
a = Polynom([0,1])
b = Polynom([0,0,1])
c = a - b
f = c.integrate()
assert str(f(1) - f(0)) == '0.166666666667'
assert str(Polynom([])) == '0'
assert str(Polynom([0])) == '0'
assert str(Polynom([1])) == '1'
assert str(Polynom([0,0])) == '0'
assert str(Polynom([0,1])) == 'x'
assert str(Polynom([0,-1])) == '-x'
assert str(Polynom([0,2])) == '2*x'
assert str(Polynom([0,-2])) == '-2*x'
a = [5, -7, 3]
assert str(Polynom(a)) == '5-7*x+3*x**2'
assert str(Polynom(a).diff()) == '-7+6*x'
assert str(Polynom(a).diff().diff()) == '6'
assert str(Polynom(a).diff().diff().diff()) == '0'
assert str(Polynom([0,-7,-3])) == '-7*x-3*x**2'
|
[
"[email protected]"
] | |
456d1b7dcc9770fbbd73c74764f549079b035733
|
4fd56b22ba00072817904c45f6b18844034f58f0
|
/projectapi/urls.py
|
4bc4445e2366ca58c269085b94fa45c39e920dd6
|
[
"MIT"
] |
permissive
|
kelvinrono/projectApi
|
0bf7a2766a5279ca4b27e8b3d55e352f7661f083
|
873ea90bff9ec1004d1f936d4fdcec47f95759c3
|
refs/heads/master
| 2023-06-19T16:04:26.886938 | 2021-07-20T20:47:40 | 2021-07-20T20:47:40 | 386,591,760 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 599 |
py
|
from django.contrib import admin
from django.urls import path,include
from django.contrib.auth import views
from django.conf import settings
from django.conf.urls.static import static
from django_registration.backends.one_step.views import RegistrationView
urlpatterns = [
path('admin/', admin.site.urls),
path('',include('api.urls')),
path('accounts/register/', RegistrationView.as_view(success_url='/'),name='django_registration_register'),
path('accounts/', include('django.contrib.auth.urls')),
path('accounts/', include('django_registration.backends.one_step.urls')),
]
|
[
"[email protected]"
] | |
7d24cd93a7fba526abe473e1a5d4a570cd1114e6
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2741/60760/298464.py
|
353a99580a2dc99629300bfe0b7b2f83c5ddb862
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 335 |
py
|
def func(arr: list):
length=len(arr)
l=length
while l>0:
for j in range(length+1-l):
temp=arr[j:j+l]
temp2=sorted(set(temp))
if temp==temp2:
return l
l=l-1
return 0
b = input()
arr = list(map(int, b[1:len(b)-1].split(',')))
print(func(arr))
|
[
"[email protected]"
] | |
2308b0c04994fcc9e120f82195135253102e7f8a
|
eaa284e89ce848e7500d08cc16b40b6c465e6b5c
|
/cthaeh/app.py
|
7efe62c7464f73d3379b74d3b80e4182bd7ca3a7
|
[
"MIT"
] |
permissive
|
pipermerriam/cthaeh
|
bfac951546977eeb078df9bffb5a07536f6772ee
|
a3f63b0522d940af37f485ccbeed07666adb465b
|
refs/heads/master
| 2023-08-28T08:49:23.966610 | 2020-04-28T18:17:02 | 2020-04-28T18:17:02 | 259,418,354 | 0 | 0 |
MIT
| 2020-04-27T18:30:54 | 2020-04-27T18:30:53 | null |
UTF-8
|
Python
| false | false | 2,077 |
py
|
import logging
import pathlib
from typing import Optional
from async_service import Service
from eth_typing import BlockNumber
from sqlalchemy import orm
import trio
from web3 import Web3
from cthaeh.exfiltration import Exfiltrator
from cthaeh.ir import Block as BlockIR
from cthaeh.loader import BlockLoader
from cthaeh.models import Header
from cthaeh.rpc import RPCServer
def determine_start_block(session: orm.Session) -> BlockNumber:
head = (
session.query(Header) # type: ignore
.order_by(Header.block_number.desc())
.filter(Header.is_canonical == True) # noqa: E712
.first()
)
if head is None:
return BlockNumber(0)
else:
return BlockNumber(head.block_number + 1)
class Application(Service):
logger = logging.getLogger("cthaeh.Cthaeh")
rpc_server: Optional[RPCServer] = None
def __init__(
self,
w3: Web3,
session: orm.Session,
start_block: Optional[BlockNumber],
end_block: Optional[BlockNumber],
concurrency: int,
ipc_path: Optional[pathlib.Path],
) -> None:
block_send_channel, block_receive_channel = trio.open_memory_channel[BlockIR](
128
)
if start_block is None:
start_block = determine_start_block(session)
self.exfiltrator = Exfiltrator(
w3=w3,
block_send_channel=block_send_channel,
start_at=start_block,
end_at=end_block,
concurrency_factor=concurrency,
)
self.loader = BlockLoader(
session=session, block_receive_channel=block_receive_channel
)
if ipc_path is not None:
self.rpc_server = RPCServer(ipc_path=ipc_path, session=session)
async def run(self) -> None:
self.manager.run_daemon_child_service(self.exfiltrator)
self.manager.run_daemon_child_service(self.loader)
if self.rpc_server is not None:
self.manager.run_daemon_child_service(self.rpc_server)
await self.manager.wait_finished()
|
[
"[email protected]"
] | |
e511daa839d5f5ec938a1828c6f4e1d08361e541
|
3f7c27ccd0ab1fcbd2583cf4b764b81bd27dd718
|
/apps/members/migrations/0003_auto__add_field_member_address__add_field_member_city__add_field_membe.py
|
bfad4ac11b208f53dc018a2f15b4d2636362d119
|
[] |
no_license
|
adamtlord/foreverland
|
001ca1a91a3cc468405efb80fe7981e75b82021c
|
8206ddeeb8cfbd2752ef6fa9839424718cb96e07
|
refs/heads/master
| 2020-04-16T00:50:51.582008 | 2016-09-21T03:27:39 | 2016-09-21T03:27:39 | 11,668,672 | 0 | 0 | null | 2016-09-04T03:46:51 | 2013-07-25T19:05:55 |
Python
|
UTF-8
|
Python
| false | false | 7,747 |
py
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Member.address'
db.add_column(u'members_member', 'address',
self.gf('django.db.models.fields.CharField')(max_length=100, null=True, blank=True),
keep_default=False)
# Adding field 'Member.city'
db.add_column(u'members_member', 'city',
self.gf('django.db.models.fields.CharField')(max_length=100, null=True, blank=True),
keep_default=False)
# Adding field 'Member.state'
db.add_column(u'members_member', 'state',
self.gf('django.contrib.localflavor.us.models.USStateField')(max_length=2, null=True, blank=True),
keep_default=False)
# Adding field 'Member.zip_code'
db.add_column(u'members_member', 'zip_code',
self.gf('django.db.models.fields.CharField')(max_length=20, null=True, blank=True),
keep_default=False)
# Adding field 'Member.phone'
db.add_column(u'members_member', 'phone',
self.gf('django.contrib.localflavor.us.models.PhoneNumberField')(max_length=20, null=True, blank=True),
keep_default=False)
# Adding field 'Member.ssn'
db.add_column(u'members_member', 'ssn',
self.gf('django.db.models.fields.CharField')(max_length=16, null=True, blank=True),
keep_default=False)
# Adding field 'Sub.address'
db.add_column(u'members_sub', 'address',
self.gf('django.db.models.fields.CharField')(max_length=100, null=True, blank=True),
keep_default=False)
# Adding field 'Sub.city'
db.add_column(u'members_sub', 'city',
self.gf('django.db.models.fields.CharField')(max_length=100, null=True, blank=True),
keep_default=False)
# Adding field 'Sub.state'
db.add_column(u'members_sub', 'state',
self.gf('django.contrib.localflavor.us.models.USStateField')(max_length=2, null=True, blank=True),
keep_default=False)
# Adding field 'Sub.zip_code'
db.add_column(u'members_sub', 'zip_code',
self.gf('django.db.models.fields.CharField')(max_length=20, null=True, blank=True),
keep_default=False)
# Adding field 'Sub.phone'
db.add_column(u'members_sub', 'phone',
self.gf('django.contrib.localflavor.us.models.PhoneNumberField')(max_length=20, null=True, blank=True),
keep_default=False)
# Adding field 'Sub.ssn'
db.add_column(u'members_sub', 'ssn',
self.gf('django.db.models.fields.CharField')(max_length=16, null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Member.address'
db.delete_column(u'members_member', 'address')
# Deleting field 'Member.city'
db.delete_column(u'members_member', 'city')
# Deleting field 'Member.state'
db.delete_column(u'members_member', 'state')
# Deleting field 'Member.zip_code'
db.delete_column(u'members_member', 'zip_code')
# Deleting field 'Member.phone'
db.delete_column(u'members_member', 'phone')
# Deleting field 'Member.ssn'
db.delete_column(u'members_member', 'ssn')
# Deleting field 'Sub.address'
db.delete_column(u'members_sub', 'address')
# Deleting field 'Sub.city'
db.delete_column(u'members_sub', 'city')
# Deleting field 'Sub.state'
db.delete_column(u'members_sub', 'state')
# Deleting field 'Sub.zip_code'
db.delete_column(u'members_sub', 'zip_code')
# Deleting field 'Sub.phone'
db.delete_column(u'members_sub', 'phone')
# Deleting field 'Sub.ssn'
db.delete_column(u'members_sub', 'ssn')
models = {
u'members.member': {
'Meta': {'object_name': 'Member'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'address': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'bio': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'display_first': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'display_last': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'dob': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instrument': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'join_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'middle_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'phone': ('django.contrib.localflavor.us.models.PhoneNumberField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'section': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'ssn': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'state': ('django.contrib.localflavor.us.models.USStateField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}),
'zip_code': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'})
},
u'members.sub': {
'Meta': {'object_name': 'Sub'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instrument': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'phone': ('django.contrib.localflavor.us.models.PhoneNumberField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'ssn': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'state': ('django.contrib.localflavor.us.models.USStateField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}),
'zip_code': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['members']
|
[
"[email protected]"
] | |
8a69b3abdbe989e9632031a056e21efcc892c649
|
c15a28ae62eb94dbf3ed13e2065195e572a9988e
|
/Cook book/src/9/preserving_function_metadata_when_writing_decorators/example.py
|
e5e1850554e8f722b7368d301f04da5a8473d8a1
|
[] |
no_license
|
xuyuchends1/python
|
10798c92840a1a59d50f5dc5738b2881e65f7865
|
545d950a3d2fee799902658e8133e3692939496b
|
refs/heads/master
| 2021-01-25T07:07:04.812140 | 2020-02-28T09:25:15 | 2020-02-28T09:25:15 | 93,647,064 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 673 |
py
|
import time
from functools import wraps
def timethis(func):
'''
Decorator that reports the execution time.
'''
@wraps(func)
def wrapper(*args, **kwargs):
start = time.time()
result = func(*args, **kwargs)
end = time.time()
print(func.__name__, end - start)
return result
return wrapper
if __name__ == '__main__':
@timethis
def countdown(n: int):
'''
Counts down
'''
while n > 0:
n -= 1
countdown(100000)
print('Name:', countdown.__name__)
print('Docstring:', repr(countdown.__doc__))
print('Annotations:', countdown.__annotations__)
|
[
"[email protected]"
] | |
b1b37aea147f4eae935359ca21d61807d97cf417
|
bbb8d941d0aa439ca435e0f00ddbd7330ad2db79
|
/cpp/cc1.py
|
ee3d587fa2a2f8a3948cee50ae517e7285bcf118
|
[] |
no_license
|
dimritium/Code
|
7ca940124074d7f7bca28559e0fe2f3cba24f846
|
e6678b3dabe21fcd05e362bb8115f7812ad9abb8
|
refs/heads/master
| 2021-07-25T06:35:22.755474 | 2017-11-04T15:07:50 | 2017-11-04T15:07:50 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 491 |
py
|
t = int(input())
for i in range(t):
s = str(input())
dic = {}
for i in range(len(s)):
try:
dic[s[i]].append(i)
except:
dic[s[i]] = [i]
for k,v in dic.items():
flag = 0
if len(dic[k])>1:
if dic[k][-1]!=len(s)-1:
dic[k].append(len(s)-1)
for j in range(len(v)-2):
new_s = re.compile(r"["+s[dic[k][j]:dic[k][j+1]]+"]")
for l in range(j+1,len(v))
|
[
"[email protected]"
] | |
19449a8c3d7391986351f441cf5c2b743a3dbcb2
|
2c143ba64032f65c7f7bf1cbd567a1dcf13d5bb1
|
/腾讯/回溯算法/022括号生成.py
|
c5dd0dd441e7531fdd68cfbbe845ec6452796fcd
|
[] |
no_license
|
tx991020/MyLeetcode
|
5b6121d32260fb30b12cc8146e44e6c6da03ad89
|
cfe4f087dfeb258caebbc29fc366570ac170a68c
|
refs/heads/master
| 2020-04-09T21:43:41.403553 | 2019-03-27T18:54:35 | 2019-03-27T18:54:35 | 160,611,089 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,960 |
py
|
'''
给出 n 代表生成括号的对数,请你写出一个函数,使其能够生成所有可能的并且有效的括号组合。
例如,给出 n = 3,生成结果为:
[
"((()))",
"(()())",
"(())()",
"()(())",
"()()()"
]
'''
'''
class Solution:
def generateParenthesis(self, n):
"""
:type n: int
:rtype: List[str]
"""
self.res = []
self.singleStr('', 0, 0, n)
return self.res
def singleStr(self, s, left, right, n):
if left == n and right == n:
self.res.append(s)
if left < n:
self.singleStr(s + '(',left + 1, right, n)
if right < left:
self.singleStr(s + ')',left, right + 1, n)
非常牛逼的讲解,需要这样的人来给我们讲算法
####以Generate Parentheses为例,backtrack的题到底该怎么去思考?
所谓Backtracking都是这样的思路:在当前局面下,你有若干种选择。那么尝试每一种选择。如果已经发现某种选择肯定不行(因为违反了某些限定条件),就返回;如果某种选择试到最后发现是正确解,就将其加入解集
所以你思考递归题时,只要明确三点就行:选择 (Options),限制 (Restraints),结束条件 (Termination)。即“ORT原则”(这个是我自己编的)
对于这道题,在任何时刻,你都有两种选择:
加左括号。
加右括号。
同时有以下限制:
如果左括号已经用完了,则不能再加左括号了。
如果已经出现的右括号和左括号一样多,则不能再加右括号了。因为那样的话新加入的右括号一定无法匹配。
结束条件是: 左右括号都已经用完。
结束后的正确性: 左右括号用完以后,一定是正确解。因为1. 左右括号一样多,2. 每个右括号都一定有与之配对的左括号。因此一旦结束就可以加入解集(有时也可能出现结束以后不一定是正确解的情况,这时要多一步判断)。
递归函数传入参数: 限制和结束条件中有“用完”和“一样多”字样,因此你需要知道左右括号的数目。 当然你还需要知道当前局面sublist和解集res。
因此,把上面的思路拼起来就是代码:
if (左右括号都已用完) {
加入解集,返回
}
//否则开始试各种选择
if (还有左括号可以用) {
加一个左括号,继续递归
}
if (右括号小于左括号) {
加一个右括号,继续递归
}
你帖的那段代码逻辑中加了一条限制:“3. 是否还有右括号剩余。如有才加右括号”。这是合理的。不过对于这道题,如果满足限制1、2时,3一定自动满足,所以可以不判断3。
这题其实是最好的backtracking初学练习之一,因为ORT三者都非常简单明显。你不妨按上述思路再梳理一遍,还有问题的话再说。
以上文字来自 1point3arces的牛人解答
'''
|
[
"[email protected]"
] | |
bf5cc25038b36bbd8db9b85a2521712b8946591a
|
2775947a01c2b10671737eae47725435957890a5
|
/to-be-implemented/vstruct/win32.py
|
908eb8ab7618a154c12ecf06dbb1a4dddd3235df
|
[] |
no_license
|
albertz/pydbattach
|
7b4bd4b7b22ec3c0aa82b45ba29674d3c852a9a4
|
bbcc187627fc80ae4bd6fc98eefe41316f722a91
|
refs/heads/master
| 2022-10-25T12:34:05.045449 | 2022-10-11T14:53:18 | 2022-10-11T14:53:18 | 1,798,590 | 79 | 10 | null | 2022-02-09T12:43:01 | 2011-05-25T11:52:27 |
Python
|
UTF-8
|
Python
| false | false | 7,453 |
py
|
from vstruct.primitives import *
from vstruct import VStruct,VArray
DWORD = v_uint32
class NT_TIB(VStruct):
_fields_ = [
("ExceptionList", v_ptr), # ExceptionRegistration structures.
("StackBase", v_ptr),
("StackLimit", v_ptr),
("SubSystemTib", v_ptr),
("FiberData", v_ptr),
("Version", v_ptr),
("ArbitraryUserPtr", v_ptr),
("Self", v_ptr)
]
class SEH3_SCOPETABLE(VStruct):
_fields_ = [
("EnclosingLevel", v_int32),
("FilterFunction", v_ptr),
("HandlerFunction", v_ptr),
]
class SEH4_SCOPETABLE(VStruct):
"""
Much like the SEH3 scopetable with the stack cookie additions
"""
_fields_ = [
("GSCookieOffset", v_int32),
("GSCookieXOROffset", v_int32),
("EHCookieOffset", v_int32),
("EHCookieXOROffset", v_int32),
("EnclosingLevel", v_int32),
("FilterFunction", v_ptr),
("HandlerFunction", v_ptr),
]
class CLIENT_ID(VStruct):
_fields_ = [
("UniqueProcess", v_ptr),
("UniqueThread", v_ptr)
]
class TebReserved32Array(VArray):
_field_type_ = v_uint32
_field_count_ = 26
class TebReservedArray(VArray):
_field_type_ = v_uint32
_field_count_ = 5
class TEB(VStruct):
_fields_ = [
("TIB", NT_TIB),
("EnvironmentPointer", v_ptr),
("ClientId", CLIENT_ID),
("ActiveRpcHandle", v_ptr),
("ThreadLocalStorage", v_ptr),
("ProcessEnvironmentBlock", v_ptr),
("LastErrorValue", v_uint32),
("CountOfOwnedCriticalSections", v_uint32),
("CsrClientThread", v_ptr),
("Win32ThreadInfo", v_ptr),
("User32Reserved", TebReserved32Array),
("UserReserved", TebReservedArray),
("WOW32Reserved", v_ptr),
("CurrentLocale", v_uint32),
("FpSoftwareStatusRegister", v_uint32)
#FIXME not done!
]
# Some necissary arrays for the PEB
class TlsExpansionBitsArray(VArray):
_field_type_ = v_uint32
_field_count_ = 32
class GdiHandleBufferArray(VArray):
_field_type_ = v_ptr
_field_count_ = 34
class TlsBitMapArray(VArray):
_field_type_ = v_uint32
_field_count_ = 2
class PEB(VStruct):
_fields_ = [
("InheritedAddressSpace", v_uint8),
("ReadImageFileExecOptions", v_uint8),
("BeingDebugged", v_uint8),
("SpareBool", v_uint8),
("Mutant", v_ptr),
("ImageBaseAddress", v_ptr),
("Ldr", v_ptr),
("ProcessParameters", v_ptr),
("SubSystemData", v_ptr),
("ProcessHeap", v_ptr),
("FastPebLock", v_ptr),
("FastPebLockRoutine", v_ptr),
("FastPebUnlockRoutine", v_ptr),
("EnvironmentUpdateCount", v_uint32),
("KernelCallbackTable", v_ptr),
("SystemReserved", v_uint32),
("AtlThunkSListPtr32", v_ptr),
("FreeList", v_ptr),
("TlsExpansionCounter", v_uint32),
("TlsBitmap", v_ptr),
("TlsBitmapBits", TlsBitMapArray),
("ReadOnlySharedMemoryBase", v_ptr),
("ReadOnlySharedMemoryHeap", v_ptr),
("ReadOnlyStaticServerData", v_ptr),
("AnsiCodePageData", v_ptr),
("OemCodePageData", v_ptr),
("UnicodeCaseTableData", v_ptr),
("NumberOfProcessors", v_uint32),
("NtGlobalFlag", v_uint64),
("CriticalSectionTimeout",v_uint64),
("HeapSegmentReserve", v_uint32),
("HeapSegmentCommit", v_uint32),
("HeapDeCommitTotalFreeThreshold", v_uint32),
("HeapDeCommitFreeBlockThreshold", v_uint32),
("NumberOfHeaps", v_uint32),
("MaximumNumberOfHeaps", v_uint32),
("ProcessHeaps", v_ptr),
("GdiSharedHandleTable", v_ptr),
("ProcessStarterHelper", v_ptr),
("GdiDCAttributeList", v_uint32),
("LoaderLock", v_ptr),
("OSMajorVersion", v_uint32),
("OSMinorVersion", v_uint32),
("OSBuildNumber", v_uint16),
("OSCSDVersion", v_uint16),
("OSPlatformId", v_uint32),
("ImageSubsystem", v_uint32),
("ImageSubsystemMajorVersion", v_uint32),
("ImageSubsystemMinorVersion", v_uint32),
("ImageProcessAffinityMask", v_uint32),
("GdiHandleBuffer", GdiHandleBufferArray),
("PostProcessInitRoutine", v_ptr),
("TlsExpansionBitmap", v_ptr),
("TlsExpansionBitmapBits", TlsExpansionBitsArray),
("SessionId", v_uint32),
("AppCompatFlags", v_uint64),
("AppCompatFlagsUser", v_uint64),
("pShimData", v_ptr),
("AppCompatInfo", v_ptr),
("CSDVersion", v_ptr), # FIXME make wide char reader?
("UNKNOWN", v_uint32),
("ActivationContextData", v_ptr),
("ProcessAssemblyStorageMap", v_ptr),
("SystemDefaultActivationContextData", v_ptr),
("SystemAssemblyStorageMap", v_ptr),
("MinimumStackCommit", v_uint32),
]
class HEAP_ENTRY(VStruct):
_fields_ = [
("Size", v_uint16),
("PrevSize", v_uint16),
("SegmentIndex", v_uint8),
("Flags", v_uint8),
("Unused", v_uint8),
("TagIndex", v_uint8)
]
class ListEntry(VStruct):
_fields_ = [
("Flink", v_ptr),
("Blink", v_ptr)
]
class HeapSegmentArray(VArray):
_field_type_ = v_uint32
_field_count_ = 64
class HeapUnArray(VArray):
_field_type_ = v_uint8
_field_count_ = 16
class HeapUn2Array(VArray):
_field_type_ = v_uint8
_field_count_ = 2
class HeapFreeListArray(VArray):
_field_type_ = ListEntry
_field_count_ = 128
class HEAP(VStruct):
_fields_ = [
("Entry", HEAP_ENTRY),
("Signature", v_uint32),
("Flags", v_uint32),
("ForceFlags", v_uint32),
("VirtualMemoryThreshold", v_uint32),
("SegmentReserve", v_uint32),
("SegmentCommit", v_uint32),
("DeCommitFreeBlockThreshold", v_uint32),
("DeCommitTotalFreeThreshold", v_uint32),
("TotalFreeSize", v_uint32),
("MaximumAllocationSize", v_uint32),
("ProcessHeapsListIndex", v_uint16),
("HeaderValidateLength", v_uint16),
("HeaderValidateCopy", v_ptr),
("NextAvailableTagIndex", v_uint16),
("MaximumTagIndex", v_uint16),
("TagEntries", v_ptr),
("UCRSegments", v_ptr),
("UnusedUnCommittedRanges", v_ptr),
("AlignRound", v_uint32),
("AlignMask", v_uint32),
("VirtualAllocBlocks", ListEntry),
("Segments", HeapSegmentArray),
("u", HeapUnArray),
("u2", HeapUn2Array),
("AllocatorBackTraceIndex",v_uint16),
("NonDedicatedListLength", v_uint32),
("LargeBlocksIndex", v_ptr),
("PseudoTagEntries", v_ptr),
("FreeLists", HeapFreeListArray),
("LockVariable", v_uint32),
("CommitRoutine", v_ptr),
("FrontEndHeap", v_ptr),
("FrontEndHeapLockCount", v_uint16),
("FrontEndHeapType", v_uint8),
("LastSegmentIndex", v_uint8)
]
class EXCEPTION_RECORD(VStruct):
_fields_ = [
("ExceptionCode", DWORD),
("ExceptionFlags", DWORD),
("ExceptionRecord", v_ptr), # Pointer to the next
("ExceptionAddress", v_ptr),
("NumberParameters", DWORD),
#("ExceptionInformation", DWORD[NumberParameters])
]
class EXCEPTION_REGISTRATION(VStruct):
_fields_ = [
("prev", v_ptr),
("handler", v_ptr),
]
|
[
"[email protected]"
] | |
4d4d51aa814dfd29d50290261d6d9ce681a302e8
|
d2c4151eff768af64946ababc2e41c13d8973cd3
|
/ARC105/a.py
|
f9f21d8dd457a05c96bd9eb45c5f8bcd344e63e9
|
[] |
no_license
|
Intel-out-side/AtCoder
|
2de19b71981247135432aed2d6d9c2a16c3ab7f0
|
0c419d2df15fff02032432cb1b1323612484e16e
|
refs/heads/master
| 2022-06-23T04:21:12.886072 | 2022-06-13T14:39:07 | 2022-06-13T14:39:07 | 235,240,853 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 229 |
py
|
import math
N = int(input())
for a in range(1, 100):
tmp = N - 3**a
if tmp < 5:
print(-1)
exit()
for b in range(1, 100):
if 5**b == tmp:
print(a, b)
exit()
print(-1)
|
[
"[email protected]"
] | |
bb5260b71015d345a88ae42ed0488418d428fac1
|
1f3bed0bb480a7d163dab73f1d315741ecbc1072
|
/vtkplotter_examples/other/trimesh/section.py
|
5dbcbdb40fe1f58a43833bc7fec62a29fa4cd2b8
|
[
"MIT"
] |
permissive
|
ismarou/vtkplotter-examples
|
1ce78197182da7496b016b27f1d5eb524c49cac6
|
1eefcc026be169ab7a77a5bce6dec8044c33b554
|
refs/heads/master
| 2021-03-11T18:43:22.313457 | 2020-03-03T22:11:25 | 2020-03-03T22:11:25 | 246,551,341 | 4 | 0 | null | 2020-03-11T11:18:48 | 2020-03-11T11:18:47 | null |
UTF-8
|
Python
| false | false | 1,721 |
py
|
import trimesh
import numpy as np
from vtkplotter import show, Plane, Text2D, printc, download
# load the mesh from filename, file objects are also supported
f = download('https://github.com/mikedh/trimesh/raw/master/models/featuretype.STL')
mesh = trimesh.load_mesh(f)
# get a single cross section of the mesh
txt = Text2D('cross section of the mesh', c='k')
mslice = mesh.section(plane_origin=mesh.centroid, plane_normal=[0,0,1])
pl = Plane(mesh.centroid, normal=[0,0,1], sx=6, sy=4, alpha=0.3)
slice_2D, to_3D = mslice.to_planar()
# show objects on N=2 non-synced renderers:
show([(mesh, pl), (slice_2D, txt)], N=2, sharecam=False, axes=True)
# if we wanted to take a bunch of parallel slices, like for a 3D printer
# we can do that easily with the section_multiplane method
# we're going to slice the mesh into evenly spaced chunks along z
# this takes the (2,3) bounding box and slices it into [minz, maxz]
z_extents = mesh.bounds[:,2]
# slice every .125 model units (eg, inches)
z_levels = np.arange(*z_extents, step=0.125)
# find a bunch of parallel cross sections
sections = mesh.section_multiplane(plane_origin=mesh.bounds[0],
plane_normal=[0,0,1],
heights=z_levels)
N = len(sections)
printc("nr. of sections:", N, c='green')
# summing the array of Path2D objects will put all of the curves
# into one Path2D object, which we can plot easily
combined = np.sum(sections)
sections.append([combined, Text2D('combined')])
# show objects in N synced renderers:
show(sections, N=N, axes=True, newPlotter=True)
# the medial axis is available for closed Path2D objects
show(slice_2D + slice_2D.medial_axis(), axes=True, newPlotter=True)
|
[
"[email protected]"
] | |
3dc9519fbdd363764163d3eefaebd7907a2214a1
|
ac0957824d2730170603b6af26e38177965208a6
|
/build/beginner_tutorials/catkin_generated/pkg.develspace.context.pc.py
|
fc1d02a638249646cd9aab97cb2ef6c8c924b854
|
[] |
no_license
|
JelenaKiblik/jekibl-rtech
|
6c9c0ee78e4a2bf539ecac9f050110e96551171f
|
a3b4ef8bdfaba64a1209d695db78b6b7d7074c19
|
refs/heads/master
| 2020-08-01T06:29:20.727647 | 2019-11-17T10:38:14 | 2019-11-17T10:38:14 | 210,897,762 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 461 |
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/ubuntu/jekibl-rtech/devel/include".split(';') if "/home/ubuntu/jekibl-rtech/devel/include" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "beginner_tutorials"
PROJECT_SPACE_DIR = "/home/ubuntu/jekibl-rtech/devel"
PROJECT_VERSION = "0.0.0"
|
[
"[email protected]"
] | |
9b4cee01a7a4aad6bd4aa41ff11599feddafe8b0
|
14b5679d88afa782dc5d6b35878ab043089a060a
|
/students/LvTao/20200515/测试鼠标是否移动.py
|
317ea2ada7e5968f4d2f40aad82bbbc2832c59da
|
[] |
no_license
|
mutiangua/EIS2020
|
c541ef32623f67f9277945cd39cff3c02f06e4dd
|
92aa2711b763a2c93be238825c445bf2db8da391
|
refs/heads/master
| 2022-11-18T05:21:47.567342 | 2020-07-11T10:11:21 | 2020-07-11T10:11:21 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 289 |
py
|
import time
import pyautogui
def pos():
pos_mouse=pyautogui.position()
time.sleep(1)
return pos_mouse
while True:
if pos()==pyautogui.position():
continue
else:
x,y=pyautogui.position()
print('当前位置X{},Y{}'.format(x,y))
|
[
"[email protected]"
] | |
74c590dee70d866754a3bfddb67a69646b5796c8
|
7837961d07a64aa1f73d88ed1012ec5e322ab370
|
/src/generative_playground/molecules/lean_settings.py
|
5af24245d63c36b4de49a57e16e9310343c74414
|
[
"MIT"
] |
permissive
|
markharley/generative_playground
|
1281f13cc28c43ede9695e3ffa98713e613023d4
|
56e826e5ca453ee19b0d4298ed27b4db5efd6fd9
|
refs/heads/master
| 2020-05-18T09:50:27.820273 | 2019-05-05T12:03:26 | 2019-05-05T12:03:26 | 184,337,386 | 0 | 0 | null | 2019-04-30T22:01:43 | 2019-04-30T22:01:42 | null |
UTF-8
|
Python
| false | false | 386 |
py
|
import inspect
import os
molecules_root_location = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) + '/'
def get_data_location(molecules=True):
if molecules:
return {'source_data': molecules_root_location + 'data/250k_rndm_zinc_drugs_clean.smi'}
else:
return {'source_data': molecules_root_location + 'data/equation2_15_dataset.txt'}
|
[
"[email protected]"
] | |
98cc0764581e92078db33632b9a8330ad97806de
|
51d7e8c09793b50d45731bd5ab9b531b525cf6db
|
/tests/garage/torch/algos/test_maml_ppo.py
|
ea4ac63fd8c01d020ad7379470f45d65de0217bd
|
[
"MIT"
] |
permissive
|
fangqyi/garage
|
454247849a6a3f547557b3fac3787ba9eeb0391f
|
ddafba385ef005f46f913ab352f9638760e5b412
|
refs/heads/master
| 2023-02-25T00:43:18.903328 | 2021-01-26T01:52:15 | 2021-01-26T01:52:15 | 267,667,220 | 0 | 0 |
MIT
| 2020-05-28T18:35:08 | 2020-05-28T18:35:07 | null |
UTF-8
|
Python
| false | false | 2,582 |
py
|
"""This script is a test that fails when MAML-TRPO performance is too low."""
import pytest
try:
# pylint: disable=unused-import
import mujoco_py # noqa: F401
except ImportError:
pytest.skip('To use mujoco-based features, please install garage[mujoco].',
allow_module_level=True)
except Exception: # pylint: disable=broad-except
pytest.skip(
'Skipping tests, failed to import mujoco. Do you have a '
'valid mujoco key installed?',
allow_module_level=True)
import torch
from garage.envs import GarageEnv
from garage.envs import normalize
from garage.envs.mujoco import HalfCheetahDirEnv
from garage.experiment import deterministic, LocalRunner
from garage.torch.algos import MAMLPPO
from garage.torch.policies import GaussianMLPPolicy
from garage.torch.value_functions import GaussianMLPValueFunction
from tests.fixtures import snapshot_config
@pytest.mark.mujoco
class TestMAMLPPO:
"""Test class for MAML-PPO."""
def setup_method(self):
"""Setup method which is called before every test."""
self.env = GarageEnv(
normalize(HalfCheetahDirEnv(), expected_action_scale=10.))
self.policy = GaussianMLPPolicy(
env_spec=self.env.spec,
hidden_sizes=(64, 64),
hidden_nonlinearity=torch.tanh,
output_nonlinearity=None,
)
self.value_function = GaussianMLPValueFunction(env_spec=self.env.spec,
hidden_sizes=(32, 32))
def teardown_method(self):
"""Teardown method which is called after every test."""
self.env.close()
def test_ppo_pendulum(self):
"""Test PPO with Pendulum environment."""
deterministic.set_seed(0)
rollouts_per_task = 5
max_path_length = 100
runner = LocalRunner(snapshot_config)
algo = MAMLPPO(env=self.env,
policy=self.policy,
value_function=self.value_function,
max_path_length=max_path_length,
meta_batch_size=5,
discount=0.99,
gae_lambda=1.,
inner_lr=0.1,
num_grad_updates=1)
runner.setup(algo, self.env)
last_avg_ret = runner.train(n_epochs=10,
batch_size=rollouts_per_task *
max_path_length)
assert last_avg_ret > -5
|
[
"[email protected]"
] | |
fac3f04df019414ae685c3823333bcb2f171d65d
|
52381a4fc02e90ce1fcfffd8d9876d9e8f44c248
|
/core/jobs/batch_jobs/email_deletion_jobs.py
|
895c4067d3191f9ccbef1490e639ea0c12d09bab
|
[
"Apache-2.0"
] |
permissive
|
ankita240796/oppia
|
18aa1609a0f237ce76142b2a0d3169e830e5bcdd
|
ba4f072e494fd59df53fecc37e67cea7f9727234
|
refs/heads/develop
| 2022-07-11T01:11:53.136252 | 2022-06-30T08:55:49 | 2022-06-30T08:55:49 | 160,626,761 | 0 | 0 |
Apache-2.0
| 2020-04-28T16:12:26 | 2018-12-06T06:02:18 |
Python
|
UTF-8
|
Python
| false | false | 5,406 |
py
|
# coding: utf-8
#
# Copyright 2021 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Validation Jobs for blog models"""
from __future__ import annotations
from core.jobs import base_jobs
from core.jobs.io import ndb_io
from core.jobs.transforms import job_result_transforms
from core.jobs.types import job_run_result
from core.platform import models
import apache_beam as beam
MYPY = False
if MYPY: # pragma: no cover
from mypy_imports import email_models
from mypy_imports import feedback_models
from mypy_imports import user_models
(email_models, feedback_models, user_models) = models.Registry.import_models([
models.NAMES.email, models.NAMES.feedback, models.NAMES.user
])
class DeleteUnneededEmailRelatedModelsJob(base_jobs.JobBase):
"""Job that deletes emails models that belonged to users that were deleted
as part of the wipeout process.
"""
def run(self) -> beam.PCollection[job_run_result.JobRunResult]:
deleted_user_ids_collection = (
self.pipeline
| 'Get all deleted user models' >> ndb_io.GetModels(
user_models.DeletedUserModel.get_all())
| 'Extract user IDs' >> beam.Map(
lambda deleted_user_model: deleted_user_model.id)
)
deleted_user_ids = beam.pvalue.AsIter(deleted_user_ids_collection)
sent_email_models_to_delete = (
self.pipeline
| 'Get all sent email models' >> ndb_io.GetModels(
email_models.SentEmailModel.get_all())
| 'Filter sent email models that belong to deleted users' >> (
beam.Filter(
lambda model, ids: (
model.sender_id in ids or model.recipient_id in ids),
ids=deleted_user_ids
))
)
sent_email_models_to_delete_result = (
sent_email_models_to_delete
| 'Count sent email models to be deleted' >> (
job_result_transforms.CountObjectsToJobRunResult('SENT EMAILS'))
)
bulk_email_models_to_delete = (
self.pipeline
| 'Get all bulk email models' >> ndb_io.GetModels(
email_models.BulkEmailModel.get_all())
| 'Filter bulk email models that belong to deleted users' >> (
beam.Filter(
lambda model, ids: model.sender_id in ids,
ids=deleted_user_ids
))
)
bulk_email_models_to_delete_result = (
bulk_email_models_to_delete
| 'Count bulk email models to be deleted' >> (
job_result_transforms.CountObjectsToJobRunResult('BULK EMAILS'))
)
unsent_feedback_email_models_to_delete = (
self.pipeline
| 'Get all unsent feedback models' >> ndb_io.GetModels(
feedback_models.UnsentFeedbackEmailModel.get_all())
| 'Filter unsent feedback models that belong to deleted users' >> (
beam.Filter(
lambda model, ids: model.id in ids, ids=deleted_user_ids))
)
unsent_feedback_email_models_to_delete_result = (
unsent_feedback_email_models_to_delete
| 'Count unsent feedback email models to be deleted' >> (
job_result_transforms.CountObjectsToJobRunResult(
'FEEDBACK EMAILS'))
)
user_bulk_emails_models_to_delete = (
self.pipeline
| 'Get all user bulk email models' >> ndb_io.GetModels(
user_models.UserBulkEmailsModel.get_all())
| 'Filter user bulk email models that belong to deleted users' >> (
beam.Filter(
lambda model, ids: model.id in ids, ids=deleted_user_ids))
)
user_bulk_emails_models_to_delete_result = (
user_bulk_emails_models_to_delete
| 'Count user bulk email models to be deleted' >> (
job_result_transforms.CountObjectsToJobRunResult(
'USER BULK EMAILS'))
)
unused_models_deletion = (
(
sent_email_models_to_delete,
bulk_email_models_to_delete,
unsent_feedback_email_models_to_delete,
user_bulk_emails_models_to_delete
)
| 'Merge models' >> beam.Flatten()
| 'Extract keys' >> beam.Map(lambda model: model.key)
| 'Delete models' >> ndb_io.DeleteModels()
)
return (
(
sent_email_models_to_delete_result,
bulk_email_models_to_delete_result,
unsent_feedback_email_models_to_delete_result,
user_bulk_emails_models_to_delete_result,
)
| 'Merge results' >> beam.Flatten()
)
|
[
"[email protected]"
] | |
c5382963180478fd862fc67e67e37e67fa689e13
|
f829d2c4347ce85ae6dd769f0aab2491d8ee4751
|
/old/.history/a_20201125194051.py
|
f79c3e45da0a2e2a0076008652c3fd3694e249f9
|
[
"LicenseRef-scancode-mulanpsl-2.0-en",
"MulanPSL-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
pscly/bisai1
|
0ef18d4aa12541322947a5be250ef7f260c93276
|
257c02c8f23e373834a6275683470f3f081b7373
|
refs/heads/18ti
| 2023-01-21T22:02:15.582281 | 2020-11-29T09:30:33 | 2020-11-29T09:30:33 | 316,918,262 | 0 | 0 |
NOASSERTION
| 2020-11-29T12:33:33 | 2020-11-29T09:39:21 |
Python
|
UTF-8
|
Python
| false | false | 969 |
py
|
# for n in range(400,500):
# i = n // 100
# j = n // 10 % 10
# k = n % 10
# if n == i ** 3 + j ** 3 + k ** 3:
# print(n)
# 第一道题(16)
# input("请输入(第一次):")
# s1 = input("请输入(第二次):")
# l1 = s1.split(' ')
# l2 = []
# for i in l1:
# if i.isdigit():
# l2.append(int(i))
# for i in l2:
# if not (i % 6):
# print(i, end=" ")
# 第二道题(17)
out_l1 = []
def bian_int_list(l1):
re_l1 = [] # 返回出去的列表
for i in l1:
re_l1.append(int(i))
return re_l1
def jisuan(int_num):
he1 = 0
global out_l1
for i in str(int_num):
he1 += int(i)**2
if he1 > int(str_num):
out_l1.append(str_num)
return True
return None
while 1:
in_1 = input("请输入数值:")
nums_l1 = in_1.split(' ')
for i in range(nums_l1[0, nums_l1[1]+1]):
if jisuan(i):
out_l1.append(i)
print(i)
|
[
"[email protected]"
] | |
0a64700b1408521e4cb652493afa4c3773da70d3
|
fde90006ac56f38863ebbff75fe0da7296d8d4b6
|
/src/cfehome/old_settings.py
|
c58c730fb22a082126c92754a918c204c7235049
|
[] |
no_license
|
carter3689/django-intro
|
c1c32d742548e27732580d32321648f054d1338d
|
155042398d9f2505e44dfa9cfe0a2f7ad3f8131d
|
refs/heads/master
| 2021-01-01T04:32:49.347962 | 2017-07-14T04:57:02 | 2017-07-14T04:57:02 | 97,194,405 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,099 |
py
|
"""
Django settings for cfehome project.
Generated by 'django-admin startproject' using Django 1.11.3.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'p!@78+nocob7yj%nean8wwes$s_vmp2$!sahv8#gopd0mi20zn'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'cfehome.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'cfehome.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
|
[
"[email protected]"
] | |
8f779ae7bd790997e2a3fce3a42a64b70bbd7709
|
3047f66549c5928cf07bc14bd3ff276ce8458f22
|
/config.py
|
bf1021d3b9f955d335b7c9d6608e18fcdcae53d8
|
[] |
no_license
|
2429581027/spe2018
|
b47faf01b5954552cbfe4caed32923663c716396
|
3649104935fc8b519450d6d12c78110a40f5aaec
|
refs/heads/master
| 2022-12-06T17:12:08.324913 | 2020-08-09T16:34:07 | 2020-08-09T16:34:07 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,514 |
py
|
'''
file: config.py
date: 2018_09_19
author: Junjie Cao
'''
import argparse
###################################
## shared parameters
parser = argparse.ArgumentParser(description = 'spe 2019, reconstruction from incompleted points')
#parser.add_argument('--data_root', type = str, default = '/data/spe_database_old', help = 'it is a shared parameter')
parser.add_argument('--data_root', type = str,default = '../../data', help = 'it is a shared parameter') # for my macbook
parser.add_argument('--outf', type=str, default='../../data/spe_out', help='output folder')# /Users/jjcao/data/spe_data_train_11348
parser.add_argument('--model', type=str, default = './model/0.pkl', help='saved/pre_trained model')
# parser.add_argument('--modelBeta', type=str, default = './model/SPENetSiam.pkl', help='saved/pre_trained model')
# parser.add_argument('--modelPose', type=str, default = './model/SPENetSiam.pkl', help='saved/pre_trained model')
# parser.add_argument('--modelGen', type=str, default = './model/SPENetSiam_pointnetmini_PointGenCon_84_0.109_s0.106_p0.001_3d0.0004_decoded0.0002_j0.0001-centerBinput-stnOutput.pkl', help='saved/pre_trained model')
parser.add_argument('--center_input', default = True, type = bool, help = 'center input in dataset')
parser.add_argument('--trans_smpl_generated', default = 'stn', type = str, help = 'None, stn, center')
# should >= number of GPU*2. e.g. 72 batch in 3 GPU leads to 24 batch in each GPU. # If the batches number on each GPU == 1, nn.BatchNorm1d fails.
# large batch size => better convergence. # 16 for 6-9G gpu with decoder, 24 for ? without decoder
#parser.add_argument('--batch_size', type=int, default=128, help='input batch size') #72=24*3=18*4, 96=24*4
parser.add_argument('--batch_size', type=int, default=2, help='input batch size') # for debug on mac
parser.add_argument('--start_epoch', type=int, default = 0, help='')
parser.add_argument('--no_epoch', type=int, default = 121, help='number of epochs to train for')#121
parser.add_argument('--lr',type = float,default = 0.001,help = 'learning rate')#0.001
parser.add_argument('--step_lr', type = float, default = 10, help = 'encoder learning rate.')
parser.add_argument('--step_save', type = float, default = 2, help = 'step for saving model.')
parser.add_argument('--shape_ratio',type = float, default = 40.0 ,help = 'weight of shape loss') #40 for GMOF loss function
parser.add_argument('--pose_ratio',type = float, default = 400.0, help = 'weight of pose')# 400 for GMOF loss function
#default: 400. 20 is enough for making sure that predicated pose parameter does not contain global rotation
parser.add_argument('--threeD_ratio',type = float, default = 400.0, help = 'weight of vertices decoded by smpl')
#default: 200. 20 is enough for making sure that predicated pose parameter does not contain global rotation
parser.add_argument('--j3d_ratio',type = float, default = 0.0, help = 'weight of 3d key points decoded by smpl') #200
parser.add_argument('--decoded_ratio',type = float, default = 400.0, help = 'weight of vertices decoded by decoder')#400,
#parser.add_argument('--with_chamfer',default = False, type = bool,help = 'use chamfer loss')
#parser.add_argument('--chamfer_ratio',type = float, default = 0.0, help = 'weight of 3d chamfer distance')#50
###################################
## parameters for training
parser.add_argument('--network', type = str,default = 'SPENet',help = 'SPENet, SPENetSiam, SPENetBeta, SPENetPose')
parser.add_argument('--encoder', type = str,default = 'pointnetmini',help = 'pointnetmini, pointnet or pointnet2')
parser.add_argument('--decoder', type = str,default = 'None',help = 'None, PointGenCon or pointnet2 or dispNet?')
parser.add_argument('--with_stn', default = 'STN3dTR', type = str, help = 'use STN3dR, STN3dRQuad, STN3dTR, or None in encoder')
parser.add_argument('--with_stn_feat', default = False, type = bool, help = 'use stn feature transform in encoder or not')
parser.add_argument('--pervertex_weight', type = str, default = 'None', help = 'None or ')#./data/pervertex_weight_sdf.npz
parser.add_argument('--point_count', type=int, default=2500, help='the count of vertices in the input pointcloud for training')
parser.add_argument('--workers', type=int, default=0, help='number of data loading workers - 0 means same thread as main execution')
parser.add_argument('--momentum',type = float,default = 0.9,help = 'momentum')
# weight decay = 0.0001, it is very important for training the network using adam
parser.add_argument('--wd', type = float, default = 0.0001, help = 'encoder weight decay rate.')
parser.add_argument('--ls', type = str, default = 'L2', help = 'loss function: L2, L1, or GMOF (from less robust to more robust).')
parser.add_argument('--vis', type=str, default= 'spe', help='visdom environment, use visualization in training')
parser.add_argument('--smpl_mean_theta_path', type = str, default = './data/neutral_smpl_mean_params.h5', help = 'the path for mean smpl theta value')
parser.add_argument('--smpl_model',type = str,
default = './data/neutral_smpl_with_cocoplus_reg.txt',
help = 'smpl model path')
########
# for reconstruction, correspondence
parser.add_argument('--HR', type=int, default=0, help='Use high Resolution template for better precision in the nearest neighbor step ?')
parser.add_argument('--nepoch', type=int, default=3000, help='number of epochs to train for during the regression step')
# parser.add_argument('--inputA', type=str, default = "/data/MPI-FAUST/test/scans/test_scan_021.ply", help='your path to mesh 0')
# parser.add_argument('--inputB', type=str, default = "/data/MPI-FAUST/test/scans/test_scan_011.ply", help='your path to mesh 1')
parser.add_argument('--inputA', type=str, default = "data/example_0.ply", help='your path to mesh 0')
parser.add_argument('--inputB', type=str, default = "data/example_1.ply", help='your path to mesh 1')
#parser.add_argument('--num_points', type=int, default = 6890, help='number of points fed to poitnet') # point_count
#parser.add_argument('--num_angles', type=int, default = 300, help='number of angle in the search of optimal reconstruction. Set to 1, if you mesh are already facing the cannonical direction as in data/example_1.ply')
parser.add_argument('--clean', type=int, default=1, help='if 1, remove points that dont belong to any edges')
parser.add_argument('--scale', type=int, default=1, help='if 1, scale input mesh to have same volume as the template')
parser.add_argument('--project_on_target', type=int, default=0, help='if 1, projects predicted correspondences point on target mesh')
########
# for data generation
parser.add_argument('--human_count', type = int, default = 30000, help = 'the count of male/femal in generated database')
parser.add_argument('--sample_count', type = int, default = 0, help = 'the count of samples of a SMPL template mesh') # 2500
parser.add_argument('--op', type = str, default = 'generate', help = 'generate, distill, unify')
parser.add_argument('--gender', type = str, default = 'm', help = 'm for male, f for female, b for both')
parser.add_argument('--data_type', type = str, default = 'w', help = 'w for whole, f for front view, fb for front & back view')
# spe_dataset_train_specifiedPose
parser.add_argument('--database_train', type = str, default = 'spe_dataset_train', help = 'name')
parser.add_argument('--database_val', type = str, default = 'spe_dataset_val', help = 'name')
args = parser.parse_args()
|
[
"[email protected]"
] | |
5836ad6384982599fa5386c942f276b1fcbd7022
|
05fc3134da52ab0f1d95d9c4304bde68fc2a56cc
|
/tasks.py
|
a5661e372b313f07d146231967b867407d64dc2f
|
[
"AGPL-3.0-only"
] |
permissive
|
lino-framework/extjs6
|
b046d43bac3676afd2bbad825a8c478c2007471f
|
6c8cf927e265bf23ad15d07da0b01c087c7bff07
|
refs/heads/master
| 2023-07-21T15:39:04.616082 | 2023-07-10T20:35:39 | 2023-07-10T20:35:39 | 46,885,420 | 6 | 1 |
BSD-2-Clause
| 2018-02-13T05:52:43 | 2015-11-25T20:40:26 |
CSS
|
UTF-8
|
Python
| false | false | 448 |
py
|
from atelier.invlib import setup_from_tasks
ns = setup_from_tasks(
globals(), "lino_extjs6",
languages="en de fr et".split(),
# tolerate_sphinx_warnings=True,
blogref_url = 'https://luc.lino-framework.org',
revision_control_system='git',
# locale_dir='lino_extjs/extjs/locale',
cleanable_files=['docs/api/lino_extjs6.*'],
demo_projects=[
'lino_extjs6.projects.team6',
'lino_extjs6.projects.lydia6'])
|
[
"[email protected]"
] | |
677352f08e920cb21713ec2f072334eb23f02ebb
|
a56e5570ab57e4d3c44c9c6ba44bdacac9fa1ad8
|
/insertion_sort.py
|
027c54743f008f5ce2dac82c48a2eeee27837080
|
[] |
no_license
|
teknofage/CS-2.1-Sorting_Algorithms
|
a7db54c29af5c939022d4dd6453a0529256a3bc1
|
e42b64c4d606d76102b5930ae8e74822a75999ae
|
refs/heads/main
| 2023-01-20T16:52:00.816333 | 2020-12-05T07:50:55 | 2020-12-05T07:50:55 | 308,201,025 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 427 |
py
|
def insertionSort(alist):
for i in range(1,len(alist)):
#element to be compared
current = alist[i]
#comparing the current element with the sorted portion and swapping
while i>0 and alist[i-1]>current:
alist[i] = alist[i-1]
i = i-1
alist[i] = current
#print(alist)
return alist
print([5,2,1,9,0,4,6])
print(insertionSort([5,2,1,9,0,4,6]))
|
[
"[email protected]"
] | |
61d67338da326c0b82ae9ef359f504ccba54da59
|
ed298f7b16e0a1fcc4d5ddc9da324247d200bc8a
|
/cleanup.py
|
03ca72d1bca9728c96256d120fb9e0c22c7a7d14
|
[] |
no_license
|
stella-gao/deepfunc
|
ed1a67f0a0e682a2e0d1fde05a13fe190ec6f07e
|
a587512519c234c7ab70eb3fd504a98cd935b4ab
|
refs/heads/master
| 2021-01-21T00:11:48.502524 | 2016-04-28T17:18:44 | 2016-04-28T17:18:44 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,207 |
py
|
#!/usr/bin/env python
'''
THEANO_FLAGS=mode=FAST_RUN,device=gpu,floatX=float32 python gen_next_level_data.py
'''
import numpy
from keras.models import Sequential
from keras.layers.core import (
Dense, Dropout, Activation, Flatten)
from keras.layers.convolutional import Convolution2D, MaxPooling2D
from keras.layers.embeddings import Embedding
from keras.optimizers import SGD
from sklearn.metrics import classification_report
from keras.utils import np_utils
from utils import (
shuffle, train_val_test_split,
get_gene_ontology,
get_model_max_features,
encode_seq_one_hot)
import sys
import os
from collections import deque
LAMBDA = 24
DATA_ROOT = 'data/cnn/'
CUR_LEVEL = 'level_2/'
NEXT_LEVEL = 'level_3/'
MAXLEN = 1000
def get_model(
go_id,
parent_id,
nb_filter=64,
nb_row=3,
nb_col=3,
pool_length=2):
filepath = DATA_ROOT + CUR_LEVEL + parent_id + '/' + go_id + '.hdf5'
model = Sequential()
model.add(Convolution2D(nb_filter, nb_row, nb_col,
border_mode='valid',
input_shape=(1, MAXLEN, 20)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(pool_length, pool_length)))
model.add(Dropout(0.5))
model.add(Flatten())
model.add(Dense(1))
model.add(Activation('sigmoid'))
model.compile(
loss='binary_crossentropy', optimizer='adam', class_mode='binary')
# Loading saved weights
print 'Loading weights for ' + go_id
model.load_weights(filepath)
return model
def main(*args, **kwargs):
if len(args) < 3:
raise Exception('Please provide function id')
parent_id = args[1]
go_id = args[2]
if len(args) == 4:
level = int(args[3])
global CUR_LEVEL
global NEXT_LEVEL
CUR_LEVEL = 'level_' + str(level) + '/'
NEXT_LEVEL = 'level_' + str(level + 1) + '/'
try:
model = get_model(go_id, parent_id)
except Exception, e:
print e
filepath = DATA_ROOT + CUR_LEVEL + parent_id + '/' + go_id + '.hdf5'
print "Removing " + filepath
os.remove(filepath)
if __name__ == '__main__':
main(*sys.argv)
|
[
"[email protected]"
] | |
f4277637101ca2452185a124b44a2047eef1c208
|
b1931901a2599e170f4c0dbbecc1678ecd976904
|
/Tools/Scripts/webkitpy/port/simulator_process.py
|
c1147b2bbf734a964fff63af7b4931702f8a1399
|
[] |
no_license
|
walmis/WPEWebKit-upstream
|
b75872f73073a2d58da0a9a51fc9aab891fb897d
|
4b3a7b8cdd8afc12162fc2e0dcf474685e3fcf58
|
refs/heads/master
| 2023-03-10T11:19:26.173072 | 2017-03-22T09:28:59 | 2017-03-22T09:28:59 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,495 |
py
|
# Copyright (C) 2017 Apple Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import errno
import os
import signal
import time
from webkitpy.port.server_process import ServerProcess
from webkitpy.xcode.simulator import Simulator
class SimulatorProcess(ServerProcess):
class Popen(object):
def __init__(self, pid, stdin, stdout, stderr):
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
self.pid = pid
self.returncode = None
def poll(self):
if self.returncode:
return self.returncode
try:
os.kill(self.pid, 0)
except OSError, err:
assert err.errno == errno.ESRCH
self.returncode = 1
return self.returncode
def wait(self):
while not self.poll():
time.sleep(0.01) # In seconds
return self.returncode
def __init__(self, port_obj, name, cmd, env=None, universal_newlines=False, treat_no_data_as_crash=False, worker_number=None):
self._bundle_id = port_obj.app_identifier_from_bundle(cmd[0])
self._device = port_obj.device_for_worker_number(worker_number)
env['IPC_IDENTIFIER'] = self._bundle_id + '-' + self._device.udid
# This location matches the location used by WebKitTestRunner and DumpRenderTree
# for the other side of these fifos.
file_location = '/tmp/' + env['IPC_IDENTIFIER']
self._in_path = file_location + '_IN'
self._out_path = file_location + '_OUT'
self._error_path = file_location + '_ERROR'
super(SimulatorProcess, self).__init__(port_obj, name, cmd, env, universal_newlines, treat_no_data_as_crash)
def _reset(self):
super(SimulatorProcess, self)._reset()
# Unlinks are needed on reset in the event that the Python code unexpectedly
# fails between _start() and kill(). This can be caused by a SIGKILL or a crash.
# This ensures that os.mkfifo() will not be obstructed by previous fifos.
# Other files will still cause os.mkfifo() to fail.
try:
os.unlink(self._in_path)
except:
pass
try:
os.unlink(self._out_path)
except:
pass
try:
os.unlink(self._error_path)
except:
pass
def _start(self):
if self._proc:
raise ValueError('{} already running'.format(self._name))
self._reset()
FIFO_PERMISSION_FLAGS = 0600 # Only owner can read and write
os.mkfifo(self._in_path, FIFO_PERMISSION_FLAGS)
os.mkfifo(self._out_path, FIFO_PERMISSION_FLAGS)
os.mkfifo(self._error_path, FIFO_PERMISSION_FLAGS)
stdout = os.fdopen(os.open(self._out_path, os.O_RDONLY | os.O_NONBLOCK), 'rb')
stderr = os.fdopen(os.open(self._error_path, os.O_RDONLY | os.O_NONBLOCK), 'rb')
self._pid = self._device.launch_app(self._bundle_id, self._cmd[1:], env=self._env)
def handler(signum, frame):
assert signum == signal.SIGALRM
raise Exception('Timed out waiting for process to open {}'.format(self._in_path))
signal.signal(signal.SIGALRM, handler)
signal.alarm(3) # In seconds
stdin = None
try:
stdin = open(self._in_path, 'w', 0) # Opening with no buffering, like popen
except:
# We set self._proc as _reset() and _kill() depend on it.
self._proc = SimulatorProcess.Popen(self._pid, stdin, stdout, stderr)
if self._proc.poll() is not None:
self._reset()
raise Exception('App {} crashed before stdin could be attached'.format(os.path.basename(self._cmd[0])))
self._kill()
self._reset()
raise
signal.alarm(0) # Cancel alarm
self._proc = SimulatorProcess.Popen(self._pid, stdin, stdout, stderr)
def stop(self, timeout_secs=3.0):
try:
os.kill(self._pid, signal.SIGTERM)
except OSError as err:
assert err.errno == errno.ESRCH
pass
return super(SimulatorProcess, self).stop(timeout_secs)
|
[
"[email protected]@268f45cc-cd09-0410-ab3c-d52691b4dbfc"
] |
[email protected]@268f45cc-cd09-0410-ab3c-d52691b4dbfc
|
ab3168a7ed6a211db35ec3e6069861560ba39898
|
1986f044d6476fab476a9b5eb9a95cc30d6a8eac
|
/Chapter07/pygal_1.py
|
c30537be5d2fb85031674c73d8f2dbb96a6b3e07
|
[
"MIT"
] |
permissive
|
PacktPublishing/Mastering-Python-Networking
|
711f47ecff9ca2fec51f948badff22cd8c73ada4
|
52a2827919db1773f66700f3946390f200bd6dab
|
refs/heads/master
| 2023-02-08T01:39:44.670413 | 2023-01-30T09:03:30 | 2023-01-30T09:03:30 | 82,666,812 | 138 | 127 |
MIT
| 2020-11-05T11:34:15 | 2017-02-21T10:25:34 |
Python
|
UTF-8
|
Python
| false | false | 865 |
py
|
#!/usr/bin/env python3
import pygal
x_time = []
out_octets = []
out_packets = []
in_octets = []
in_packets = []
with open('results.txt', 'r') as f:
for line in f.readlines():
# eval(line) reads in each line as dictionary instead of string
line = eval(line)
x_time.append(line['Time'])
out_packets.append(float(line['Gig0-0_Out_uPackets']))
out_octets.append(float(line['Gig0-0_Out_Octet']))
in_packets.append(float(line['Gig0-0_In_uPackets']))
in_octets.append(float(line['Gig0-0_In_Octet']))
line_chart = pygal.Line()
line_chart.title = "Router 1 Gig0/0"
line_chart.x_labels = x_time
line_chart.add('out_octets', out_octets)
line_chart.add('out_packets', out_packets)
line_chart.add('in_octets', in_octets)
line_chart.add('in_packets', in_packets)
line_chart.render_to_file('pygal_example_2.svg')
|
[
"[email protected]"
] | |
d616c9ac31f6b34ba0c1d64c0a527e44a5450332
|
d41d18d3ea6edd2ec478b500386375a8693f1392
|
/plotly/validators/layout/scene/zaxis/tickfont/_color.py
|
c9c85093d3ba20c31f8f2d30cc4ebd575af30377
|
[
"MIT"
] |
permissive
|
miladrux/plotly.py
|
38921dd6618650d03be9891d6078e771ffccc99a
|
dbb79e43e2cc6c5762251537d24bad1dab930fff
|
refs/heads/master
| 2020-03-27T01:46:57.497871 | 2018-08-20T22:37:38 | 2018-08-20T22:37:38 | 145,742,203 | 1 | 0 |
MIT
| 2018-08-22T17:37:07 | 2018-08-22T17:37:07 | null |
UTF-8
|
Python
| false | false | 449 |
py
|
import _plotly_utils.basevalidators
class ColorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self,
plotly_name='color',
parent_name='layout.scene.zaxis.tickfont',
**kwargs
):
super(ColorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type='plot',
role='style',
**kwargs
)
|
[
"[email protected]"
] | |
287a191c2572037ada7b9ea37bd0ecbd1f5e4bc0
|
856e9a8afcb81ae66dd998b0d2cc3556c9f315ea
|
/tests/plugins/test_git_filters.py
|
2a4efe5e20573ce2e090ef58b7c78ce98a865449
|
[
"MIT"
] |
permissive
|
dexy/dexy
|
1d5c999830de4663c05a09f4cd00b1628dfc8d46
|
323c1806e51f75435e11d2265703e68f46c8aef3
|
refs/heads/develop
| 2023-06-10T08:02:45.076551 | 2021-02-28T22:40:41 | 2021-02-28T22:40:41 | 1,506,989 | 141 | 34 |
MIT
| 2020-06-15T17:44:50 | 2011-03-21T14:48:28 |
Python
|
UTF-8
|
Python
| false | false | 3,046 |
py
|
from dexy.exceptions import UserFeedback
from dexy.filters.git import repo_from_path
from dexy.filters.git import repo_from_url
from dexy.filters.git import generate_commit_info
from tests.utils import assert_in_output
from tests.utils import runfilter
from tests.utils import tempdir
from nose.exc import SkipTest
import os
import json
REMOTE_REPO_HTTPS = "https://github.com/ananelson/dexy-templates"
PATH_TO_LOCAL_REPO = os.path.expanduser("~/dev/testrepo")
# TODO use subprocess to check out a repo to a temp dir, or have a repo in data
# dir, or use [gasp] submodules.
try:
import pygit2
import urllib
no_local_repo = not os.path.exists(PATH_TO_LOCAL_REPO)
try:
urllib.urlopen("http://google.com")
no_internet = False
except IOError:
no_internet = True
if no_local_repo:
SKIP = (True, "No local repo at %s." % PATH_TO_LOCAL_REPO)
elif no_internet:
SKIP = (True, "Internet not available.")
else:
SKIP = (False, None)
except ImportError:
SKIP = (True, "pygit2 not installed")
def skip():
if SKIP[0]:
raise SkipTest(SKIP[1])
skip()
def test_run_gitrepo():
with runfilter("repo", REMOTE_REPO_HTTPS) as doc:
assert len(doc.wrapper.nodes) > 20
def test_generate_commit_info():
repo, remote = repo_from_url(REMOTE_REPO_HTTPS)
refs = repo.listall_references()
ref = repo.lookup_reference(refs[0])
commit = repo[ref.target]
commit_info = generate_commit_info(commit)
assert commit_info['author-name'] == "Ana Nelson"
assert commit_info['author-email'] == "[email protected]"
def test_git_commit():
with runfilter("gitcommit", REMOTE_REPO_HTTPS) as doc:
output = doc.output_data()
patches = json.loads(output['patches'])
assert output['author-name'] == "Ana Nelson"
assert output['author-email'] == "[email protected]"
#assert output['message'] == "Add README file."
#assert output['hex'] == "2f15837e64a70e4d34b924f6f8c371a266d16845"
def test_git_log():
assert_in_output("gitlog", PATH_TO_LOCAL_REPO,
"Add README file.")
def test_git_log_remote():
assert_in_output("gitlog", REMOTE_REPO_HTTPS,
"Rename")
def test_repo_from_url():
repo, remote = repo_from_url(REMOTE_REPO_HTTPS)
assert remote.name == 'origin'
assert remote.url == REMOTE_REPO_HTTPS
def test_repo_from_path():
repo, remote = repo_from_path(PATH_TO_LOCAL_REPO)
assert ".git" in repo.path
#assert isinstance(repo.head, pygit2.Object)
# assert "README" in repo.head.message
def test_repo_from_invalid_path():
with tempdir():
try:
repo, remote = repo_from_path(".")
assert False
except UserFeedback as e:
assert "no git repository was found at '.'" in str(e)
def test_run_git():
with runfilter("git", PATH_TO_LOCAL_REPO) as doc:
doc.output_data()
def test_run_git_remote():
with runfilter("git", REMOTE_REPO_HTTPS) as doc:
doc.output_data()
|
[
"[email protected]"
] | |
6cc1dc4c8e6b81d2106b35562acc5a9448a76b64
|
fd7a9faee9e2a6dbf89e54e1a7f228fcaf6911e1
|
/tests/test_cnocr.py
|
68b2776100394422842303886c7a0172e6ee7cb5
|
[
"NCSA",
"Zlib",
"Intel",
"LicenseRef-scancode-unknown-license-reference",
"MIT",
"BSD-2-Clause-Views",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0",
"BSD-2-Clause"
] |
permissive
|
wting861006/cnocr
|
f685c607e7ba84a8ead5a6a72301768c832a6320
|
9cb1cd57c2795007850bd25616880b15e4a3029d
|
refs/heads/master
| 2023-09-04T18:36:30.822721 | 2021-11-05T12:03:23 | 2021-11-05T12:03:23 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,410 |
py
|
# coding: utf-8
# Copyright (C) 2021, [Breezedeus](https://github.com/breezedeus).
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import sys
import pytest
import numpy as np
from PIL import Image
import Levenshtein
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.insert(1, os.path.dirname(os.path.abspath(__file__)))
from cnocr import CnOcr
from cnocr.utils import read_img
from cnocr.consts import NUMBERS, AVAILABLE_MODELS
from cnocr.line_split import line_split
root_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
example_dir = os.path.join(root_dir, 'docs/examples')
CNOCR = CnOcr(model_name='densenet-s-fc', model_epoch=None)
SINGLE_LINE_CASES = [
('20457890_2399557098.jpg', ['就会哈哈大笑。3.0']),
('rand_cn1.png', ['笠淡嘿骅谧鼎皋姚歼蠢驼耳胬挝涯狗蒽孓犷']),
('rand_cn2.png', ['凉芦']),
('helloworld.jpg', ['Hello world!你好世界']),
]
MULTIPLE_LINE_CASES = [
('hybrid.png', ['o12345678']),
(
'multi-line_en_black.png',
[
'transforms the image many times. First, the image goes through many convolutional layers. In those',
'convolutional layers, the network learns new and increasingly complex features in its layers. Then the ',
'transformed image information goes through the fully connected layers and turns into a classification ',
'or prediction.',
],
),
(
'multi-line_en_white.png',
[
'This chapter is currently only available in this web version. ebook and print will follow.',
'Convolutional neural networks learn abstract features and concepts from raw image pixels. Feature',
'Visualization visualizes the learned features by activation maximization. Network Dissection labels',
'neural network units (e.g. channels) with human concepts.',
],
),
(
'multi-line_cn1.png',
[
'网络支付并无本质的区别,因为',
'每一个手机号码和邮件地址背后',
'都会对应着一个账户--这个账',
'户可以是信用卡账户、借记卡账',
'户,也包括邮局汇款、手机代',
'收、电话代收、预付费卡和点卡',
'等多种形式。',
],
),
(
'multi-line_cn2.png',
[
'当然,在媒介越来越多的情形下,',
'意味着传播方式的变化。过去主流',
'的是大众传播,现在互动性和定制',
'性带来了新的挑战——如何让品牌',
'与消费者更加互动。',
],
),
]
CASES = SINGLE_LINE_CASES + MULTIPLE_LINE_CASES
def print_preds(pred):
pred = [''.join(line_p) for line_p, _ in pred]
print("Predicted Chars:", pred)
def cal_score(preds, expected):
if len(preds) != len(expected):
return 0
total_cnt = 0
total_dist = 0
for real, (pred, _) in zip(expected, preds):
pred = ''.join(pred)
distance = Levenshtein.distance(real, pred)
total_dist += distance
total_cnt += len(real)
return 1.0 - float(total_dist) / total_cnt
@pytest.mark.parametrize('img_fp, expected', CASES)
def test_ocr(img_fp, expected):
ocr = CNOCR
root_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
img_fp = os.path.join(root_dir, 'examples', img_fp)
pred = ocr.ocr(img_fp)
print('\n')
print_preds(pred)
assert cal_score(pred, expected) >= 0.8
img = read_img(img_fp)
pred = ocr.ocr(img)
print_preds(pred)
assert cal_score(pred, expected) >= 0.8
img = read_img(img_fp, gray=False)
pred = ocr.ocr(img)
print_preds(pred)
assert cal_score(pred, expected) >= 0.8
@pytest.mark.parametrize('img_fp, expected', SINGLE_LINE_CASES)
def test_ocr_for_single_line(img_fp, expected):
ocr = CNOCR
root_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
img_fp = os.path.join(root_dir, 'examples', img_fp)
pred = ocr.ocr_for_single_line(img_fp)
print('\n')
print_preds([pred])
assert cal_score([pred], expected) >= 0.8
img = read_img(img_fp)
pred = ocr.ocr_for_single_line(img)
print_preds([pred])
assert cal_score([pred], expected) >= 0.8
img = read_img(img_fp, gray=False)
pred = ocr.ocr_for_single_line(img)
print_preds([pred])
assert cal_score([pred], expected) >= 0.8
img = np.array(Image.fromarray(img).convert('L'))
assert len(img.shape) == 2
pred = ocr.ocr_for_single_line(img)
print_preds([pred])
assert cal_score([pred], expected) >= 0.8
img = np.expand_dims(img, axis=2)
assert len(img.shape) == 3 and img.shape[2] == 1
pred = ocr.ocr_for_single_line(img)
print_preds([pred])
assert cal_score([pred], expected) >= 0.8
@pytest.mark.parametrize('img_fp, expected', MULTIPLE_LINE_CASES)
def test_ocr_for_single_lines(img_fp, expected):
ocr = CNOCR
root_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
img_fp = os.path.join(root_dir, 'examples', img_fp)
img = read_img(img_fp)
if img.mean() < 145: # 把黑底白字的图片对调为白底黑字
img = 255 - img
line_imgs = line_split(np.squeeze(img, -1), blank=True)
line_img_list = [line_img for line_img, _ in line_imgs]
pred = ocr.ocr_for_single_lines(line_img_list)
print('\n')
print_preds(pred)
assert cal_score(pred, expected) >= 0.8
line_img_list = [np.array(line_img) for line_img in line_img_list]
pred = ocr.ocr_for_single_lines(line_img_list)
print_preds(pred)
assert cal_score(pred, expected) >= 0.8
def test_cand_alphabet():
img_fp = os.path.join(example_dir, 'hybrid.png')
ocr = CnOcr(cand_alphabet=NUMBERS)
pred = ocr.ocr(img_fp)
pred = [''.join(line_p) for line_p, _ in pred]
print("Predicted Chars:", pred)
assert len(pred) == 1 and pred[0] == '012345678'
INSTANCE_ID = 0
@pytest.mark.parametrize('model_name', AVAILABLE_MODELS.keys())
def test_multiple_instances(model_name):
global INSTANCE_ID
print('test multiple instances for model_name: %s' % model_name)
img_fp = os.path.join(example_dir, 'hybrid.png')
INSTANCE_ID += 1
print('instance id: %d' % INSTANCE_ID)
cnocr1 = CnOcr(model_name, name='instance-%d' % INSTANCE_ID)
print_preds(cnocr1.ocr(img_fp))
INSTANCE_ID += 1
print('instance id: %d' % INSTANCE_ID)
cnocr2 = CnOcr(model_name, name='instance-%d' % INSTANCE_ID, cand_alphabet=NUMBERS)
print_preds(cnocr2.ocr(img_fp))
|
[
"[email protected]"
] | |
24caadb1da40e28f0a1b19027c888aef7f29a004
|
8983b23a25fcc3739fc977850d242ebcc64434ce
|
/jqurity/urls.py
|
a1b034bb4034894993d2bac31814d1ce65d4a60f
|
[] |
no_license
|
jakiiii/django-blog
|
595d834c44c4b45817091da812b90b6fa7a34aab
|
260aa75b89cd9875a2e0ab1e0f9588dffd8f5281
|
refs/heads/master
| 2020-03-29T19:53:57.752279 | 2018-09-25T15:39:21 | 2018-09-25T15:42:39 | 150,286,125 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,130 |
py
|
"""jqurity URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('blog.urls')),
path('accounts/', include('accounts.urls'))
]
if settings.DEBUG:
urlpatterns = urlpatterns + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns = urlpatterns + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
[
"[email protected]"
] | |
384339a14d72cafb57e028d6b4112d06e5c27362
|
5774101105b47d78adb7a57eefdfa21502bbd70c
|
/project-follow/MadKing-master/assets/serializers.py
|
d39be9254ac56c2e85c54ce840290990ba81359f
|
[] |
no_license
|
zhlthunder/python-study
|
34d928f0ebbdcd5543ae0f41baaea955c92f5c56
|
0f25dd5105ba46791842d66babbe4c3a64819ee5
|
refs/heads/master
| 2023-01-12T18:39:47.184978 | 2018-10-07T23:48:04 | 2018-10-07T23:48:04 | 90,516,611 | 0 | 1 | null | 2022-12-26T19:46:22 | 2017-05-07T07:39:48 |
HTML
|
UTF-8
|
Python
| false | false | 736 |
py
|
#_*_coding:utf-8_*_
__author__ = 'jieli'
from assets.myauth import UserProfile
from assets import models
from rest_framework import serializers
class UserSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = UserProfile
# fields = ('url', 'name', 'email')
fields = ('url', 'name', 'email','is_admin')
class AssetSerializer(serializers.ModelSerializer):
class Meta:
model = models.Asset
#加上这个,可以同时显示server中的详细的信息;
depth=2
fields = ('name', 'sn','server','networkdevice')
class ServerSerializer(serializers.ModelSerializer):
class Meta:
model = models.Server
#fields = ('name', 'sn','server')
|
[
"[email protected]"
] | |
9b2cdeb86d06087f1f5fa0e0cfb88b8fab1f3579
|
11cd362cdd78c2fc48042ed203614b201ac94aa6
|
/desktop/core/ext-py3/boto-2.49.0/bin/cwutil
|
280d53f33edf02cafec34709b3684b22dfcc950c
|
[
"CC-BY-3.0",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-unknown-license-reference",
"ZPL-2.0",
"Unlicense",
"LGPL-3.0-only",
"CC0-1.0",
"LicenseRef-scancode-other-permissive",
"CNRI-Python",
"LicenseRef-scancode-warranty-disclaimer",
"GPL-2.0-or-later",
"Python-2.0",
"GPL-3.0-only",
"CC-BY-4.0",
"LicenseRef-scancode-jpython-1.1",
"AFL-2.1",
"JSON",
"WTFPL",
"MIT",
"LicenseRef-scancode-generic-exception",
"LicenseRef-scancode-jython",
"GPL-3.0-or-later",
"LicenseRef-scancode-python-cwi",
"BSD-3-Clause",
"LGPL-3.0-or-later",
"Zlib",
"LicenseRef-scancode-free-unknown",
"Classpath-exception-2.0",
"LicenseRef-scancode-proprietary-license",
"GPL-1.0-or-later",
"LGPL-2.0-or-later",
"MPL-2.0",
"ISC",
"GPL-2.0-only",
"ZPL-2.1",
"BSL-1.0",
"Apache-2.0",
"LGPL-2.0-only",
"LicenseRef-scancode-public-domain",
"Xnet",
"BSD-2-Clause"
] |
permissive
|
cloudera/hue
|
b42343d0e03d2936b5a9a32f8ddb3e9c5c80c908
|
dccb9467675c67b9c3399fc76c5de6d31bfb8255
|
refs/heads/master
| 2023-08-31T06:49:25.724501 | 2023-08-28T20:45:00 | 2023-08-28T20:45:00 | 732,593 | 5,655 | 2,244 |
Apache-2.0
| 2023-09-14T03:05:41 | 2010-06-21T19:46:51 |
JavaScript
|
UTF-8
|
Python
| false | false | 5,046 |
#!/usr/bin/env python
# Author: Chris Moyer <[email protected]>
# Description: CloudWatch Utility
# For listing stats, creating alarms, and managing
# other CloudWatch aspects
import boto
cw = boto.connect_cloudwatch()
from datetime import datetime, timedelta
def _parse_time(time_string):
"""Internal function to parse a time string"""
def _parse_dict(d_string):
result = {}
if d_string:
for d in d_string.split(","):
d = d.split(":")
result[d[0]] = d[1]
return result
def ls(namespace=None):
"""
List metrics, optionally filtering by a specific namespace
namespace: Optional Namespace to filter on
"""
print "%-10s %-50s %s" % ("Namespace", "Metric Name", "Dimensions")
print "-"*80
for m in cw.list_metrics():
if namespace is None or namespace.upper() in m.namespace:
print "%-10s %-50s %s" % (m.namespace, m.name, m.dimensions)
def stats(namespace, metric_name, dimensions=None, statistics="Average", start_time=None, end_time=None, period=60, unit=None):
"""
Lists the statistics for a specific metric
namespace: The namespace to use, usually "AWS/EC2", "AWS/SQS", etc.
metric_name: The name of the metric to track, pulled from `ls`
dimensions: The dimensions to use, formatted as Name:Value (such as QueueName:myQueue)
statistics: The statistics to measure, defaults to "Average"
'Minimum', 'Maximum', 'Sum', 'Average', 'SampleCount'
start_time: Start time, default to now - 1 day
end_time: End time, default to now
period: Period/interval for counts, default to 60 minutes
unit: Unit to track, default depends on what metric is being tracked
"""
# Parse the dimensions
dimensions = _parse_dict(dimensions)
# Parse the times
if end_time:
end_time = _parse_time(end_time)
else:
end_time = datetime.utcnow()
if start_time:
start_time = _parse_time(start_time)
else:
start_time = datetime.utcnow() - timedelta(days=1)
print "%-30s %s" % ('Timestamp', statistics)
print "-"*50
data = {}
for m in cw.get_metric_statistics(int(period), start_time, end_time, metric_name, namespace, statistics, dimensions, unit):
data[m['Timestamp']] = m[statistics]
keys = data.keys()
keys.sort()
for k in keys:
print "%-30s %s" % (k, data[k])
def put(namespace, metric_name, dimensions=None, value=None, unit=None, statistics=None, timestamp=None):
"""
Publish custom metrics
namespace: The namespace to use; values starting with "AWS/" are reserved
metric_name: The name of the metric to update
dimensions: The dimensions to use, formatted as Name:Value (such as QueueName:myQueue)
value: The value to store, mutually exclusive with `statistics`
statistics: The statistics to store, mutually exclusive with `value`
(must specify all of "Minimum", "Maximum", "Sum", "SampleCount")
timestamp: The timestamp of this measurement, default is current server time
unit: Unit to track, default depends on what metric is being tracked
"""
def simplify(lst):
return lst[0] if len(lst) == 1 else lst
print cw.put_metric_data(namespace, simplify(metric_name.split(';')),
dimensions = simplify(map(_parse_dict, dimensions.split(';'))) if dimensions else None,
value = simplify(value.split(';')) if value else None,
statistics = simplify(map(_parse_dict, statistics.split(';'))) if statistics else None,
timestamp = simplify(timestamp.split(';')) if timestamp else None,
unit = simplify(unit.split(';')) if unit else None)
def help(fnc=None):
"""
Print help message, optionally about a specific function
"""
import inspect
self = sys.modules['__main__']
if fnc:
try:
cmd = getattr(self, fnc)
except:
cmd = None
if not inspect.isfunction(cmd):
print "No function named: %s found" % fnc
sys.exit(2)
(args, varargs, varkw, defaults) = inspect.getargspec(cmd)
print cmd.__doc__
print "Usage: %s %s" % (fnc, " ".join([ "[%s]" % a for a in args]))
else:
print "Usage: cwutil [command]"
for cname in dir(self):
if not cname.startswith("_") and not cname == "cmd":
cmd = getattr(self, cname)
if inspect.isfunction(cmd):
doc = cmd.__doc__
print "\t%s - %s" % (cname, doc)
sys.exit(1)
if __name__ == "__main__":
import sys
self = sys.modules['__main__']
if len(sys.argv) >= 2:
try:
cmd = getattr(self, sys.argv[1])
except:
cmd = None
args = sys.argv[2:]
else:
cmd = help
args = []
if not cmd:
cmd = help
try:
cmd(*args)
except TypeError as e:
print e
help(cmd.__name__)
|
[
"[email protected]"
] | ||
82cdd53d1dcf9e33c62000824cbb3912abc74ad3
|
5f22ddbd3eeb99709e43e7b9a7958c9987c7efa4
|
/interview_bits/level_2/02_binary_search/02_search_step_simulation/01_implement-power-function.py
|
7f76f3870d716a1ce3475e367399e4163af05c04
|
[] |
no_license
|
salvador-dali/algorithms_general
|
04950bd823fc354adc58a4f23b7d2f3d39664798
|
aeee3356e2488c6fab08741b1ac26e8bd5e4ac0d
|
refs/heads/master
| 2020-12-14T06:24:10.466601 | 2016-07-17T06:00:17 | 2016-07-17T06:00:17 | 47,397,457 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 268 |
py
|
# https://www.interviewbit.com/problems/implement-power-function/
def power(a, b, m):
if a == 0:
return 0
res, mul = 1, a % m
while b:
if b % 2:
res = (res * mul) % m
mul = (mul * mul) % m
b /= 2
return res
|
[
"[email protected]"
] | |
a48345655e91b63f5ae905da3ad7b8a15ef14273
|
edcc0afdff7a7d01fa05664006d495627e9568e0
|
/tests/snapshot/test_destroy.py
|
420d09cd7da71f55fe79d6edcc08b8eaaf999984
|
[] |
no_license
|
b-a-t/zettarepl
|
871538cc83e9e0ec3cf0c7f4a66bba21559127e4
|
6596fb85f31919edf8eadeee47552d14f3d62db3
|
refs/heads/master
| 2020-04-01T23:22:27.097027 | 2018-10-16T18:45:10 | 2018-10-16T18:45:10 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 542 |
py
|
# -*- coding=utf-8 -*-
from unittest.mock import call, Mock
from zettarepl.snapshot.destroy import destroy_snapshots
from zettarepl.snapshot.snapshot import Snapshot
def test__destroy_snapshots__works():
shell = Mock()
destroy_snapshots(shell, [Snapshot("data", "snap-1"), Snapshot("data/work", "snap-1"), Snapshot("data", "snap-2")])
assert shell.exec.call_count == 2
shell.exec.assert_has_calls([
call(["zfs", "destroy", "data@snap-1%snap-2"]),
call(["zfs", "destroy", "data/work@snap-1"])
], True)
|
[
"[email protected]"
] | |
c922049e1d08e7a7dd1929f419415ed617b2dccc
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/59/usersdata/171/41957/submittedfiles/testes.py
|
16dd14c2d0278fb3b37bde0222232be8c114fd08
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 198 |
py
|
# -*- coding: utf-8 -*-
import math
#COMECE AQUI ABAIXO
a=float(input('digite a base:'))
b=float(input('digite o expoente:'))
cont=0
i=0
c=a**b
while i<cont:
c=a**b
cont=cont+1
print('%d'%c)
|
[
"[email protected]"
] | |
8203f8ceb30d5186a154e4b31d9a972deba8201b
|
8b4d37632e0435fe5f78bf1631dd74766e8db411
|
/xrandroll/xrandr.py
|
96ceed2ae8f3e366d30c4851a91de8b1c339fe25
|
[
"MIT"
] |
permissive
|
RakhithJK/xrandroll
|
ca876c35fda3235b81362bce9ff6779759d810a5
|
7d294ea15a639d9b15a55c0bfc13161307425554
|
refs/heads/master
| 2022-04-07T03:13:53.816999 | 2020-02-07T12:55:02 | 2020-02-07T12:55:02 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,071 |
py
|
"""Read/Write system display state using xrandr."""
import subprocess
from .monitor import Monitor, _split_by_lines_matching
def is_replica_of(a, b):
"""Return True if monitor a is a replica of b.
Replica means same resolution and position.
"""
return (
a.pos_x == b.pos_x
and a.pos_y == b.pos_y
and a.res_x == b.res_x
and a.res_y == b.res_y
and b.enabled
)
class Screen:
"""A Screen is a collection of monitors."""
def __init__(self, data):
self.monitors = {}
for monitor_data in _split_by_lines_matching(r"^[^ \t].*", data[1:]):
m = Monitor(monitor_data)
self.monitors[m.output] = m
self.update_replica_of()
def generate(self):
"""Create a list of xrandr invocations to match this state."""
results = []
for output, mon in self.monitors.items():
cli = ["xrandr"]
cli.append(f"--output {output}")
if not mon.enabled:
cli.append("--off")
else:
mode = mon.get_current_mode()
cli.append(f"--pos {int(mon.pos_x)}x{int(mon.pos_y)}")
cli.append(f"--mode {mode.res_x}x{mode.res_y}")
mod_x, mod_y = mode.res_x, mode.res_y
if mon.orientation in ("left", "right"):
mod_x, mod_y = mod_y, mod_x
cli.append(f"--scale {mon.res_x/mod_x}x{mon.res_y/mod_y}")
cli.append(f"--rotate {mon.orientation}")
if mon.primary:
cli.append("--primary")
results.append(" ".join(cli))
return results
def update_replica_of(self):
"""Decide which monitors are replicas of each other and
mark them as such."""
for a in self.monitors:
self.monitors[a].replica_of = []
for b in self.monitors:
if a != b and is_replica_of(self.monitors[a], self.monitors[b]):
self.monitors[a].replica_of.append(b)
def choose_a_monitor(self):
"""Choose what monitor to select by default.
* Not disabled
* Primary, if possible
"""
candidate = None
for name, mon in self.monitors.items():
if not mon.enabled:
continue
if mon.primary:
return name
candidate = name
return candidate
def get_primary(self):
"""Return the primary monitor, if any."""
for mon in self.monitors.values():
if mon.primary:
return mon
return None
def set_primary(self, name):
for mon in self.monitors.values():
mon.primary = name == mon.output
def read_data():
data = subprocess.check_output(
["xrandr", "--verbose"], encoding="utf-8"
).splitlines()
return data
def parse_data(data):
# Going to pretend there can only be one screen because life is short.
return Screen(_split_by_lines_matching("^Screen ", data)[0])
|
[
"[email protected]"
] | |
3d1cde7505953c42c17da27c37c33aaa338acc32
|
8441f156e53afcc6c2b5190de2439c68eb40f218
|
/python/nistoar/testing/__init__.py
|
d92c9b8c26da11199ab8542e66d9baff95a31408
|
[] |
no_license
|
usnistgov/oar-metadata
|
99436a84d32d623d77310e75eee834c683ea1d5b
|
2190bfc79d97f81d52dd24df0d4e9dc844065b67
|
refs/heads/integration
| 2023-07-08T16:06:23.258608 | 2023-04-22T21:00:09 | 2023-04-22T21:00:09 | 82,972,531 | 4 | 7 | null | 2023-06-30T18:27:38 | 2017-02-23T21:20:34 |
Python
|
UTF-8
|
Python
| false | false | 5,200 |
py
|
"""
test infrastructure and utilities usable throughout the nistoar library
"""
# this code was copied from the testing infrastructure for ejsonschema
import os, shutil
__all__ = [
'ensure_tmpdir', 'tmpdir', 'rmtmpdir', 'Tempfiles', 'artifactdir'
]
tmpname = "_test"
def ensure_tmpdir(basedir=None, dirname=None):
"""
ensure the existance of a directory where temporary inputs and outputs
can be placed. This directory is not cleaned up after use.
:argument str basedir: the desired path to tmp directory's parent directory.
if not provided, the directory will be placed in the
current working directory.
:return str: the path to the temporary directory
"""
tdir = tmpdir(basedir, dirname)
if not os.path.isdir(tdir):
os.mkdir(tdir)
return tdir
def tmpdir(basedir=None, dirname=None):
"""
return the name of a temporary directory where temporary inputs and outputs
can be placed.
:argument str basedir: the desired path to tmp directory's parent directory.
if not provided, the directory will be placed in the
current working directory.
:argument str dirname: the desired name for the directory
:return str: the path to the temporary directory
"""
if not dirname:
dirname = tmpname + str(os.getpid())
if not basedir:
basedir = os.getcwd()
return os.path.join(basedir, dirname)
def rmdir(dirpath):
"""
remove the given path and all its contents
"""
shutil.rmtree(dirpath)
def rmtmpdir(basedir=None, dirname=None):
"""
remove the default
:argument str basedir: the path to tmp directory's parent directory.
if not provided, the current working directory will
be assumed.
:argument str dirname: the name for the directory
:return str: the path to the removed temporary directory
"""
tdir = tmpdir(basedir, dirname)
if os.path.exists(tdir):
rmdir(tdir)
class Tempfiles(object):
"""
A class for creating temporary testing space that hides the configured
absolute location.
It is instantiated with a base directory where temporary directories and
files can be created. Full paths to a temporary file or directory can
be gotten, then, by calling the instance as a function:
.. code-block:: python
ts = Tempfiles(basedir)
tmpfile = ts("testoutput.txt")
If you want the file to be automatically cleaned up, use the track()
function:
tmpfile = ts.track("testoutput.txt")
Temporary directories that should be cleaned up can be created with mkdir():
.. code-block:: python
tmpdir = ts.mkdir("mytempdir")
All directories and files created below the configured base can be removed
by calling clean() explicitly or by using autoclean=True as a constructor
parameter; the latter will remove the files and directories when the
instance is destroyed.
"""
def __init__(self, tempdir=None, autoclean=False):
if not tempdir:
tempdir = ensure_tmpdir()
assert os.path.exists(tempdir)
self._root = tempdir
self._files = set()
self._autoclean = autoclean
@property
def root(self):
"""
the base directory below which is where temporary files and directories
can be created and tracked
"""
return self._root
def __call__(self, child):
return os.path.join(self.root, child)
def mkdir(self, dirname):
"""
create and track a directory given as a relative path
"""
d = os.path.join(self.root, dirname)
if not os.path.isdir(d):
os.mkdir(d)
self.track(dirname)
return d
def track(self, filename):
"""
keep track of a file or directory that has a relative path given by
filename. It will be removed upon a call to clean()
"""
self._files.add(filename)
return self.__call__(filename)
def clean(self):
"""
remove all files and directories being tracked by this instance.
"""
for i in range(len(self._files)):
filen = self._files.pop()
path = os.path.join(self._root, filen)
if os.path.exists(path):
try:
if os.path.isdir(path):
shutil.rmtree(path)
else:
os.remove(path)
finally:
if os.path.exists(path):
self._files.add(filen)
def __del__(self):
if self._autoclean:
self.clean()
def artifactdir(mod=None):
out = os.environ.get('OAR_TEST_ARTIFACT_DIR')
if not out or not os.path.isdir(out):
return tmpdir()
if not isinstance(mod, str) and hasattr(mod, '__name__'):
mod = mod.__name__
if not isinstance(mod, str):
return out
out = os.path.join(out, mod)
if not os.path.exists(out):
os.mkdir(out)
return out
|
[
"[email protected]"
] | |
00588c59ef606ca06a81ac2cc3da8e2270175594
|
52e8dce655b89a260d049d34e74bc0cd3caf6f07
|
/torchreid/__init__.py
|
3403b86662515fb3072ca4ac7f8f659b96c4a42f
|
[
"MIT"
] |
permissive
|
digitalbrain79/deep-person-reid
|
b527d0e8bd9a4a72209728c105fe5cd1773041dc
|
0e7026be11dab7cb6991c43ea0b36765445507f9
|
refs/heads/master
| 2020-05-20T02:50:24.406708 | 2019-05-06T21:28:34 | 2019-05-06T21:28:34 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 305 |
py
|
from __future__ import absolute_import
from __future__ import print_function
__version__ = '0.7.4'
__author__ = 'Kaiyang Zhou'
__description__ = 'Deep learning person re-identification in PyTorch'
from torchreid import (
engine,
models,
losses,
metrics,
data,
optim,
utils
)
|
[
"[email protected]"
] | |
dd72fcfd037b92916bb36a734e3754cf57ff6822
|
dfaa71f8064d3d0773941cf14ab86ff57ff67284
|
/part45/blog/models.py
|
d5edd654805cf32352512470306c70d8c055de71
|
[
"Apache-2.0"
] |
permissive
|
yllew36/WellyGI
|
e94c5000ff3a7f2fd7316d22ad166fbf7916ea23
|
7d53fac4c81bb994f61b22761e5ac7e48994ade4
|
refs/heads/master
| 2020-09-05T15:49:37.386078 | 2019-11-15T08:16:59 | 2019-11-15T08:16:59 | 220,148,061 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 547 |
py
|
from django.db import models
from django.utils.text import slugify
# Create your models here.
class ArtikelModel(models.Model):
judul = models.CharField(max_length=255)
isi = models.TextField()
penulis = models.CharField(max_length=255)
publish = models.DateTimeField(auto_now_add = True)
update = models.DateTimeField(auto_now=True)
slug = models.SlugField(blank=True,editable=False)
def save(self):
self.slug = slugify(self.judul)
super(Artikel, self).save()
def __str__(self):
return "{}. {}".format(self.id,self.judul)
|
[
"[email protected]"
] | |
d5710045f064d84d667dfa28c760ba605ec4e832
|
f1ee4b96f37419504576dc8b0d5b708bd5b9ba29
|
/builder/main.py
|
7a06353e59a01b076b8af1324a542b80ce572c60
|
[] |
no_license
|
OS-Q/P254
|
6d850efdd9da8a76d3cc2a4340c62cd8039dacdc
|
e3b542ec8020d280ab41ea5f2496b260e710f6d1
|
refs/heads/master
| 2023-04-19T11:03:23.733720 | 2021-05-04T03:48:12 | 2021-05-04T03:48:12 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,291 |
py
|
# Copyright 2014-present PlatformIO <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from platform import system
from os import makedirs
from os.path import isdir, join
from SCons.Script import (ARGUMENTS, COMMAND_LINE_TARGETS, AlwaysBuild,
Builder, Default, DefaultEnvironment)
env = DefaultEnvironment()
env.SConscript("compat.py", exports="env")
platform = env.PioPlatform()
board = env.BoardConfig()
env.Replace(
AR="arm-none-eabi-ar",
AS="arm-none-eabi-as",
CC="arm-none-eabi-gcc",
CXX="arm-none-eabi-g++",
GDB="arm-none-eabi-gdb",
OBJCOPY="arm-none-eabi-objcopy",
RANLIB="arm-none-eabi-ranlib",
SIZETOOL="arm-none-eabi-size",
ARFLAGS=["rc"],
SIZEPROGREGEXP=r"^(?:\.text|\.data|\.rodata|\.text.align|\.ARM.exidx)\s+(\d+).*",
SIZEDATAREGEXP=r"^(?:\.data|\.bss|\.noinit)\s+(\d+).*",
SIZECHECKCMD="$SIZETOOL -A -d $SOURCES",
SIZEPRINTCMD='$SIZETOOL -B -d $SOURCES',
PROGSUFFIX=".elf"
)
# Allow user to override via pre:script
if env.get("PROGNAME", "program") == "program":
env.Replace(PROGNAME="firmware")
env.Append(
BUILDERS=dict(
ElfToBin=Builder(
action=env.VerboseAction(" ".join([
"$OBJCOPY",
"-O",
"binary",
"$SOURCES",
"$TARGET"
]), "Building $TARGET"),
suffix=".bin"
),
ElfToHex=Builder(
action=env.VerboseAction(" ".join([
"$OBJCOPY",
"-O",
"ihex",
"-R",
".eeprom",
"$SOURCES",
"$TARGET"
]), "Building $TARGET"),
suffix=".hex"
)
)
)
if not env.get("PIOFRAMEWORK"):
env.SConscript("frameworks/_bare.py")
#
# Target: Build executable and linkable firmware
#
if "zephyr" in env.get("PIOFRAMEWORK", []):
env.SConscript(
join(platform.get_package_dir(
"framework-zephyr"), "scripts", "platformio", "platformio-build-pre.py"),
exports={"env": env}
)
target_elf = None
if "nobuild" in COMMAND_LINE_TARGETS:
target_elf = join("$BUILD_DIR", "${PROGNAME}.elf")
target_firm = join("$BUILD_DIR", "${PROGNAME}.bin")
else:
target_elf = env.BuildProgram()
target_firm = env.ElfToBin(join("$BUILD_DIR", "${PROGNAME}"), target_elf)
env.Depends(target_firm, "checkprogsize")
AlwaysBuild(env.Alias("nobuild", target_firm))
target_buildprog = env.Alias("buildprog", target_firm, target_firm)
#
# Target: Print binary size
#
target_size = env.Alias(
"size", target_elf,
env.VerboseAction("$SIZEPRINTCMD", "Calculating size $SOURCE"))
AlwaysBuild(target_size)
#
# Target: Upload by default .bin file
#
upload_protocol = env.subst("$UPLOAD_PROTOCOL")
upload_actions = []
if upload_protocol == "mbed":
upload_actions = [
env.VerboseAction(env.AutodetectUploadPort, "Looking for upload disk..."),
env.VerboseAction(env.UploadToDisk, "Uploading $SOURCE")
]
elif upload_protocol.startswith("jlink"):
def _jlink_cmd_script(env, source):
build_dir = env.subst("$BUILD_DIR")
if not isdir(build_dir):
makedirs(build_dir)
script_path = join(build_dir, "upload.jlink")
commands = [
"h",
"loadbin %s, %s" % (source, board.get(
"upload.offset_address", "0x0")),
"r",
"q"
]
with open(script_path, "w") as fp:
fp.write("\n".join(commands))
return script_path
env.Replace(
__jlink_cmd_script=_jlink_cmd_script,
UPLOADER="JLink.exe" if system() == "Windows" else "JLinkExe",
UPLOADERFLAGS=[
"-device", board.get("debug", {}).get("jlink_device"),
"-speed", env.GetProjectOption("debug_speed", "4000"),
"-if", ("jtag" if upload_protocol == "jlink-jtag" else "swd"),
"-autoconnect", "1",
"-NoGui", "1"
],
UPLOADCMD='$UPLOADER $UPLOADERFLAGS -CommanderScript "${__jlink_cmd_script(__env__, SOURCE)}"'
)
upload_actions = [env.VerboseAction("$UPLOADCMD", "Uploading $SOURCE")]
elif upload_protocol.startswith("blackmagic"):
env.Replace(
UPLOADER="$GDB",
UPLOADERFLAGS=[
"-nx",
"--batch",
"-ex", "target extended-remote $UPLOAD_PORT",
"-ex", "monitor %s_scan" %
("jtag" if upload_protocol == "blackmagic-jtag" else "swdp"),
"-ex", "attach 1",
"-ex", "load",
"-ex", "compare-sections",
"-ex", "kill"
],
UPLOADCMD="$UPLOADER $UPLOADERFLAGS $BUILD_DIR/${PROGNAME}.elf"
)
upload_actions = [
env.VerboseAction(env.AutodetectUploadPort, "Looking for BlackMagic port..."),
env.VerboseAction("$UPLOADCMD", "Uploading $SOURCE")
]
elif upload_protocol == "cmsis-dap":
debug_server = board.get("debug.tools", {}).get(
upload_protocol, {}).get("server")
assert debug_server
if debug_server.get("package") == "tool-pyocd":
env.Replace(
UPLOADER=join(platform.get_package_dir("tool-pyocd") or "",
"pyocd-flashtool.py"),
UPLOADERFLAGS=debug_server.get("arguments", [])[1:],
UPLOADCMD='"$PYTHONEXE" "$UPLOADER" $UPLOADERFLAGS $SOURCE'
)
elif debug_server.get("package") == "tool-openocd":
openocd_args = [
"-d%d" % (2 if int(ARGUMENTS.get("PIOVERBOSE", 0)) else 1)
]
openocd_args.extend(debug_server.get("arguments", []))
if env.GetProjectOption("debug_speed"):
openocd_args.extend(
["-c", "adapter speed %s" % env.GetProjectOption("debug_speed")]
)
openocd_args.extend([
"-c", "program {$SOURCE} %s verify reset; shutdown;" %
board.get("upload.offset_address", "")
])
openocd_args = [
f.replace("$PACKAGE_DIR",
platform.get_package_dir("tool-openocd") or "")
for f in openocd_args
]
env.Replace(
UPLOADER="openocd",
UPLOADERFLAGS=openocd_args,
UPLOADCMD="$UPLOADER $UPLOADERFLAGS")
upload_actions = [
env.VerboseAction("$UPLOADCMD", "Uploading $SOURCE")
]
# custom upload tool
elif upload_protocol == "custom":
upload_actions = [env.VerboseAction("$UPLOADCMD", "Uploading $SOURCE")]
if not upload_actions:
sys.stderr.write("Warning! Unknown upload protocol %s\n" % upload_protocol)
AlwaysBuild(env.Alias("upload", target_firm, upload_actions))
#
# Default targets
#
Default([target_buildprog, target_size])
|
[
"[email protected]"
] | |
b1877b2bf819138238459ec197dd6bdf01e9b712
|
3d2a74a859b0ea2a2f12315fd781154eae8449c5
|
/LeetCode/min_size_suba_sum.py
|
0b8ec9e1f641060914e8bb23000cbca0b64a88c5
|
[] |
no_license
|
jacobfelknor/practice_interview_questions
|
1e929b0fdb4f816202f000de96b9f66fb119802b
|
942f0ec730d7f0af650ddcee1abc5d17827c953c
|
refs/heads/master
| 2021-11-22T07:27:25.986891 | 2021-11-09T02:12:13 | 2021-11-09T02:12:13 | 227,508,728 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 716 |
py
|
"""
Given an array of n positive integers and a positive integer s,
find the minimal length of a contiguous subarray of which the sum ≥ s.
If there isn't one, return 0 instead.
>>> min_sub_array_length([2,3,1,2,4,3], 7)
2
"""
from typing import List
def min_sub_array_length(nums: List[int], nsum: int) -> int:
start = 0
# end = 0
min_len = float("inf")
cur_sum = 0
for end in range(len(nums)):
cur_sum += nums[end]
while cur_sum >= nsum:
min_len = min(min_len, end - start + 1)
cur_sum -= nums[start]
start += 1
if min_len == float("inf"):
return 0
return min_len
print(min_sub_array_length([2, 3, 1, 2, 4, 2], 7))
|
[
"[email protected]"
] | |
7a5222fd8eda27337c2d12c3e550a83aa9fa6281
|
231f8a898b20e475a5cabff439600de211d825c0
|
/deploy_tools/fabfile.py
|
33f3f66d5a1f450f1ea86a8eed1c19c182d68253
|
[
"MIT"
] |
permissive
|
thewchan/superlists
|
f7370b341ce7c37b8cae506eb5bafdd2fb31b07a
|
af41636b2cdafb45c638e36076b9cdefc5586aad
|
refs/heads/master
| 2023-05-26T11:01:24.310480 | 2021-06-11T21:12:20 | 2021-06-11T21:12:20 | 361,209,827 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| true | false | 1,841 |
py
|
"""Fabric deployment configuration and script."""
import random
from fabric.contrib.files import append, exists
from fabric.api import cd, env, local, run
REPO_URL = "https://github.com/thewchan/superlists.git"
def deploy() -> None:
"""Deploy site to server."""
site_folder = f"/home/{env.user}/sites/{env.host}"
run(f"mkdir -p {site_folder}")
with cd(site_folder):
_get_latest_source()
_update_virtualenv()
_create_or_update_dotenv()
_update_static_files()
_update_database()
def _get_latest_source() -> None:
"""Fetch the latest source code."""
if exists(".git"):
run("git fetch")
else:
run(f"git clone {REPO_URL} .")
current_commit = local("git log -n 1 --format=%H", capture=True)
run(f"git reset --hard {current_commit}")
def _update_virtualenv() -> None:
"""Updates the virtual environment at the server."""
if not exists("virtualenv/bin/pip"):
run("python3.7 -m venv virtualenv")
run("./virtualenv/bin/pip install -r requirements.txt")
def _create_or_update_dotenv() -> None:
"""Create or update environment file as needed."""
append(".env", "DJANGO_DEBUG_FALSE=y")
append(".env", f"SITENAME={env.host}")
current_contents = run("cat .env")
if "DJANGO_SECRET_KEY" not in current_contents:
new_secret = "".join(
random.SystemRandom().choices(
"abcdefghijklmnopqrstuvwxyz0123456789", k=50
)
)
append(".env", f"DJANGO_SECRET_KEY={new_secret}")
def _update_static_files() -> None:
"""Update static files as needed."""
run("./virtualenv/bin/python manage.py collectstatic --noinput")
def _update_database() -> None:
"""Migrate database as necessary."""
run("./virtualenv/bin/python manage.py migrate --noinput")
|
[
"[email protected]"
] | |
20437c1a84eb98ac587f50388c9768487f5ca702
|
b26448cd43ac991c6277b588a1dcb6da53afe10a
|
/users/forms.py
|
54880d817fdc01332a72a06f7e769d744f2d5c8f
|
[] |
no_license
|
Xednom/e-wallet
|
76da2658c34391c5d38e9d73ebce8f4ea80be87e
|
97e83849296fa9678b6fdcb0737dfe09ee268a3f
|
refs/heads/master
| 2023-01-29T04:27:51.833449 | 2019-10-16T07:34:25 | 2019-10-16T07:34:25 | 239,905,317 | 1 | 0 | null | 2023-01-04T14:20:08 | 2020-02-12T01:55:27 |
Python
|
UTF-8
|
Python
| false | false | 763 |
py
|
from django import forms
from django.contrib.auth.forms import UserCreationForm, UserChangeForm
from django_registration.forms import RegistrationForm
from .models import CustomUser
class CustomUserCreationForm(UserCreationForm):
class Meta(UserCreationForm.Meta):
model = CustomUser
fields = (
'first_name', 'last_name', 'email', 'address',
'country', 'state', 'zip_code'
)
class CustomUserChangeForm(UserChangeForm):
class Meta:
model = CustomUser
fields = (
'first_name', 'last_name', 'email', 'address',
'country', 'state', 'zip_code'
)
class CustomUserForm(RegistrationForm):
class Meta(RegistrationForm.Meta):
model = CustomUser
|
[
"[email protected]"
] | |
485a56d5d4b5bbc4ce35a0d79cf74af9937dee85
|
64f365bf14a3c700ac3dab4a43a2bccd7ad0f222
|
/setup.py
|
ea34a7eb8bee6edea5c9c57b41d1aaf016932e65
|
[
"MIT"
] |
permissive
|
russmain/leafmap
|
a4e8d081a5a3c973d2eb87616340dc44fd277fbd
|
277edabfba56bfe133f507173e6005b5a7504234
|
refs/heads/master
| 2023-07-15T23:11:16.445456 | 2021-09-02T03:04:59 | 2021-09-02T03:04:59 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,729 |
py
|
#!/usr/bin/env python
"""The setup script."""
import io
from os import path as op
from setuptools import setup, find_packages
with open("README.md") as readme_file:
readme = readme_file.read()
here = op.abspath(op.dirname(__file__))
# get the dependencies and installs
with io.open(op.join(here, "requirements.txt"), encoding="utf-8") as f:
all_reqs = f.read().split("\n")
install_requires = [x.strip() for x in all_reqs if "git+" not in x]
dependency_links = [x.strip().replace("git+", "") for x in all_reqs if "git+" not in x]
requirements = []
setup_requirements = []
test_requirements = []
setup(
author="Qiusheng Wu",
author_email="[email protected]",
python_requires=">=3.7",
classifiers=[
"Development Status :: 2 - Pre-Alpha",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
],
description="A Python package for geospatial analysis and interactive mapping in a Jupyter environment.",
install_requires=install_requires,
dependency_links=dependency_links,
license="MIT license",
long_description=readme,
long_description_content_type="text/markdown",
include_package_data=True,
keywords="leafmap",
name="leafmap",
packages=find_packages(include=["leafmap", "leafmap.*"]),
setup_requires=setup_requirements,
test_suite="tests",
tests_require=test_requirements,
url="https://github.com/giswqs/leafmap",
version="0.4.1",
zip_safe=False,
)
|
[
"[email protected]"
] | |
1a480f0e4af30873cf5daa67189f7085fb570119
|
ee561aa019a80f621007f82bdb21fe6ed8b6278f
|
/devel/ros_control-melodic-devel/hardware_interface/catkin_generated/pkg.develspace.context.pc.py
|
0b881c3ecc6378010075a3d5b58fcdccc75ddd34
|
[] |
no_license
|
allanwhledu/agv_edu_prj
|
4fb5fbf14cf0a14edd57ee9bd87903dc25d4d4f2
|
643a8a96ca7027529332f25208350de78c07e33d
|
refs/heads/master
| 2020-09-23T23:32:54.430035 | 2019-12-04T07:47:55 | 2019-12-04T07:47:55 | 225,613,426 | 1 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 554 |
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/sjtuwhl/ROBOTLAB_WS/src/ros_control-melodic-devel/hardware_interface/include".split(';') if "/home/sjtuwhl/ROBOTLAB_WS/src/ros_control-melodic-devel/hardware_interface/include" != "" else []
PROJECT_CATKIN_DEPENDS = "roscpp".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "hardware_interface"
PROJECT_SPACE_DIR = "/home/sjtuwhl/ROBOTLAB_WS/devel"
PROJECT_VERSION = "0.15.1"
|
[
"[email protected]"
] | |
2843225ad98b83b0dfefd872c82ee2088e5571c4
|
0b16b44e4fc8c98c9ea3f9d4b8b470f4f62f918d
|
/Core/migrations/0005_auto_20201105_0936.py
|
bedc07c9a234fd96f3fc7bd257cbcec57776181d
|
[] |
no_license
|
AthifSaheer/DipakNiroula-Django-Ecom
|
342eece90211fe80c41ba72bf69a50e63c5ea901
|
94ead608919c5bb076387e26f396e6c38319433e
|
refs/heads/main
| 2023-02-05T06:52:24.204206 | 2020-12-24T13:19:13 | 2020-12-24T13:19:13 | 324,160,212 | 3 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 509 |
py
|
# Generated by Django 2.2.14 on 2020-11-05 04:06
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Core', '0004_auto_20201104_1147'),
]
operations = [
migrations.AlterField(
model_name='order',
name='payment_method',
field=models.CharField(choices=[('Cash On Delivery ', 'Cash On Delivery '), ('Khalti ', 'Khalti '), ('Esewa ', 'Esewa ')], default='Khalti', max_length=20),
),
]
|
[
"[email protected]"
] | |
0cb6d6ce63e06611b90c62a58cf84c65f89759e2
|
3c2b5fd20c7372fccb97fa76deb0980a173b5991
|
/PythonFullStack/000Basic/day06/02-文件的读写.py
|
0c155ffb3e9c6c9d76f6d864f0c9700496908cc1
|
[] |
no_license
|
softwarefaith/PythonFullStack
|
560cdc2c0c38831e8304751b8b2bf680cb2f23e5
|
292cc0a5eee3ed8eb8a8d5e14673226533d2651e
|
refs/heads/master
| 2021-05-15T09:57:37.812869 | 2019-02-21T10:37:41 | 2019-02-21T10:37:41 | 108,229,662 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,715 |
py
|
#文件的访问模式:
#1.r:只读,文件不存在会崩溃
#2.w:只写
#3.a:追加写入
#4.rb:以二进制方式读取文件数据:常用
#5.wb:以二进制方式写入文件数据:常用
#6:ab:以二进制方式增加文件数据:常用
#爬视频,图片数据,文本数据,音频数据
# r+ w+ a+ 支持读写
#rb+ wb+ ab+ 支持二进制方式读写操作
#打开文件使用open函数
#------------r模式(只读)-----------
# 如果没有此文件会崩溃
# file = open("1.txt","r",encoding="utf-8")
# #读取文件中所有的数据
# content = file.read()
# print(content)
# #必须关闭
# file.close()
#--------w模式----------------------
#提示:如果文件不存在,会创建一个文件并打开,
#encoding="utf-8"设置编码方式(mac.linux)
#GBK cp936
#提示:w模式:如果文件存在,那么会文件中,原有数据清空,在写入数据
# file = open("1.txt","w",encoding="utf-8")
#1.txt写入数据
#打开文件后多次写入数据,不会覆盖数据
# file.write("A")
# file.write("哈哈")
# file.write("说大事大所大所多")
# #查看当前的编码格式(cp936)
# result = file.encoding
# print(result)
# # 记住所有对于文件的操作,最后一步都是close
# file.close()
#a------------追加数据
#
# file = open("1.txt","a",encoding="utf-8")
# file.write("BBB")
# file.close()
#在python2里面是不支持中文:
#python3默认支持中文
#_*_ coding:utf-8
# print("啊哈哈")
#rb-----------以二进制方式读取数据
file = open("1.txt","rb")
#binary mode doesn't take an encoding argument
#如果是二进制方式不需要指定编码格式
#读取数据
#中文打印会出现\xe5 一个中文三个字节
# file_data = file.read()
# #解码的操作
# content = file_data.decode("utf-8")
# #打印的就是解码后的数据
# print(content)
# #不支持写入数据
# file.write("aaaa")
#
# file.close()
#wb--------------以二进制方式写入数据
#前面是w就会覆盖原来的数据
# file = open("1.txt","wb")
# content = "hello 哈哈"
# #content包装成二进制人间,对content进行二进制编码
# file_data =content.encode("utf-8")
# file.write(file_data)
# file.close()
#ab-------二进制方式追加数据
# #如果两种模式同时存在,下方代码不会执行
# file = open("1.txt","ab")
# content = "hello"
# #追加也必须是二进制人间
# file_data =content.encode("utf-8")
# file.write(file_data)
# #不可读数据
# file.close()
#r+-------------------读写
#为了兼容不同操作系统,只要没有看到b模式就可以使用encoding方式指定编码
#基本操作,很多的坑
#正则表达式
file = open("1.txt","r+",encoding="utf-8")
file.write("abc")
result = file.read()
print(result)
file.close()
|
[
"[email protected]"
] | |
123a0cd3e2885c33639ca783c268bbee0e3fa695
|
bc63598033c6ca4ac7f257897aec0b23eaff60d1
|
/test/mitmproxy/test_proxy_config.py
|
e2c39846c7e7b8d19edbed878fb14cf9b84d42ad
|
[
"MIT"
] |
permissive
|
Scalr/mitmproxy
|
4aee723aef2f34fa1209364b5b03cedff7d3f85e
|
a6c608e08595e95279713e51e2a346344bd290c0
|
refs/heads/master
| 2020-06-27T08:52:29.441895 | 2016-11-23T00:27:23 | 2016-11-23T00:27:23 | 74,527,489 | 0 | 2 |
MIT
| 2018-05-03T00:00:18 | 2016-11-23T01:10:39 |
Python
|
UTF-8
|
Python
| false | false | 726 |
py
|
from mitmproxy.test import tutils
from mitmproxy.proxy import config
def test_parse_server_spec():
tutils.raises(
"Invalid server specification", config.parse_server_spec, ""
)
assert config.parse_server_spec("http://foo.com:88") == (
"http", ("foo.com", 88)
)
assert config.parse_server_spec("http://foo.com") == (
"http", ("foo.com", 80)
)
assert config.parse_server_spec("https://foo.com") == (
"https", ("foo.com", 443)
)
tutils.raises(
"Invalid server specification",
config.parse_server_spec,
"foo.com"
)
tutils.raises(
"Invalid server specification",
config.parse_server_spec,
"http://"
)
|
[
"[email protected]"
] | |
18c980d503bf6b4c69c1adfc9b18247782543587
|
ac6e4102dfb49a4e49de0e2766feb6e80ab0b5c2
|
/h1/models/storage_project_disk_update.py
|
db3461e12902a70bd45008c134567f0cb69ccd06
|
[
"MIT"
] |
permissive
|
hyperonecom/h1-client-python
|
df01f05ad295121e3dd391a3274c41e2f5b88e53
|
4ce355852ba3120ec1b8f509ab5894a5c08da730
|
refs/heads/master
| 2023-04-05T01:51:31.637002 | 2021-03-29T00:05:41 | 2021-03-29T00:05:41 | 319,309,525 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,246 |
py
|
# coding: utf-8
"""
HyperOne
HyperOne API # noqa: E501
The version of the OpenAPI document: 0.1.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from h1.configuration import Configuration
class StorageProjectDiskUpdate(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'name': 'str'
}
attribute_map = {
'name': 'name'
}
def __init__(self, name=None, local_vars_configuration=None): # noqa: E501
"""StorageProjectDiskUpdate - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._name = None
self.discriminator = None
if name is not None:
self.name = name
@property
def name(self):
"""Gets the name of this StorageProjectDiskUpdate. # noqa: E501
:return: The name of this StorageProjectDiskUpdate. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this StorageProjectDiskUpdate.
:param name: The name of this StorageProjectDiskUpdate. # noqa: E501
:type: str
"""
self._name = name
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, StorageProjectDiskUpdate):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, StorageProjectDiskUpdate):
return True
return self.to_dict() != other.to_dict()
|
[
"[email protected]"
] | |
e3f94648f1b2d25797273b156ae51df153c72c27
|
b90975e4d7acf7c9ad26ef5fc3e6247c95e2c540
|
/installation_test.py
|
73686a13ee12869e973416d273dd0707ec2ee9bb
|
[] |
no_license
|
lfernandez55/tensorflow_pluralsight
|
720de593a010d392d35b9da7263972148ec5076b
|
fc519c2154b90b40900df81fcdfd72f84d4eac22
|
refs/heads/master
| 2020-06-13T00:13:08.906189 | 2019-06-30T04:50:32 | 2019-06-30T04:50:32 | 194,470,020 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 260 |
py
|
import tensorflow as tf
sess = tf.Session()
#Verify we can print a string
hello = tf.constant("hello world from tensorflow")
print(sess.run(hello))
#Perform some simple math
a = tf.constant(20)
b = tf.constant(22)
print('a + b = {0}'.format(sess.run(a+b)))
|
[
"[email protected]"
] | |
4be2e8189f05febeb17633e6c20fdd4ab01b805f
|
268a6b7a1138dce434c6b7a54eb36cb4ae799ddd
|
/topo/custom/tests/test_delegate_forward.py
|
e01c1c60f6a5ea1c9407d803a176f66799f06906
|
[
"BSD-2-Clause"
] |
permissive
|
rubiruchi/fdeval
|
2b0592853a684a8c5b87aeb363e4ccff61f47c0c
|
f6463c1c7549b8ac7fc39854e87c88d3cac858a0
|
refs/heads/master
| 2022-11-08T17:56:34.188225 | 2020-06-23T16:46:13 | 2020-06-23T16:46:13 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,656 |
py
|
from topo.custom.topo import Topo
from . import testutil as testutil
import math
from core.engine import Engine
class TestEngine(Engine):
def on_EVSwitchStats(self, switch, ev):
#
es = self.ctx.topo.get_switch_by_label('ES')
if switch.label == 'DS':
if math.isclose(ev.tick, 3):
print("@%.0f add" % ev.tick)
for id, flow in self.active_flows.items():
self.add_delegation(ev.tick, flow, switch, es)
super().on_EVSwitchStats(switch, ev)
def on_EVSwitchNewFlow(self, switch, ev):
# forward flow on next switch in path
super().on_EVSwitchNewFlow(switch, ev)
class MyTopo( Topo ):
"delegate to a switch that is used again on the path afterwards, i.e., ..->ds->es->ds->es->s2->... "
def __init__( self, ctx ):
propagation_delay = float(ctx.config.get("topo.propagation_delay", 0.5))
processing_delay = float(ctx.config.get("topo.processing_delay", 0))
# Initialize
Topo.__init__( self )
ds = self.addSwitch( 'DS', x=2, y=1, engine=TestEngine(ctx, processing_delay=processing_delay))
ds2 = self.addSwitch( 'DS2', x=2, y=1, engine=TestEngine(ctx, processing_delay=processing_delay))
es = self.addSwitch( 'ES', x=1, y=1, engine=TestEngine(ctx, processing_delay=processing_delay))
h1 = self.addHost( 'h1', x=4, y=1)
h2 = self.addHost( 'h2',x=4, y=3)
self.addLink( ds, es, capacity=1000, propagation_delay=propagation_delay )
self.addLink( ds2, es, capacity=1000, propagation_delay=propagation_delay )
self.addLink( h1, ds, capacity=1000, propagation_delay=propagation_delay )
self.addLink( h2, ds2, capacity=1000, propagation_delay=propagation_delay )
# add traffic
self.addTraffic(
dict(fg_class='Single', fg_label="f0", fg_start=0, fg_demand=100, fg_duration=10,
fg_fixed_path=['h1', 'DS', 'ES', 'DS2', 'h2']))
# call on_done if simulation is finished
ctx.on_test_finished = self.on_done
def on_done(self, ctx):
testutil.print_summary(ctx)
print(testutil.get_flow_timings(ctx))
errors = []
errors += testutil.verify_flow_timings(ctx, FLOW_TIMINGS)
return errors
#return []
def get_topo(ctx):
return MyTopo(ctx)
topos = { 'MyTopo': ( lambda: MyTopo() ) }
FLOW_TIMINGS = """{"DS->ES": {"f0": [0.5, 12.0]}, "DS->h1": {}, "DS2->ES": {},
"DS2->h2": {"f0": [1.5, 13.0]}, "ES->DS": {"f0": [3, 11.5]},
"ES->DS2": {"f0": [1.0, 12.5]}, "h1->DS": {"f0": [0, 10.5]}, "h2->DS2": {}}"""
|
[
"[email protected]"
] | |
593d31b488df95765e3a64530d9157de067998a2
|
c8a38e65e71de888fc5b22fbd027bbaa0f3f6ef1
|
/Python/142.py
|
48db84b49b40e5429e83236336ce49f31599f810
|
[] |
no_license
|
skywhat/leetcode
|
e451a10cdab0026d884b8ed2b03e305b92a3ff0f
|
6aaf58b1e1170a994affd6330d90b89aaaf582d9
|
refs/heads/master
| 2023-03-30T15:54:27.062372 | 2023-03-30T06:51:20 | 2023-03-30T06:51:20 | 90,644,891 | 82 | 27 | null | null | null | null |
UTF-8
|
Python
| false | false | 557 |
py
|
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def detectCycle(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
slow = fast = head
while fast and fast.next:
slow, fast = slow.next, fast.next.next
if slow == fast:
while slow != head:
slow, head = slow.next, head.next
return head
return None
|
[
"[email protected]"
] | |
417a9c86d6cf0e60446d13fbaa43104cd89c1a44
|
b0f4b12ec6b14659b252f19776eb297366c9f330
|
/代码/day3-5/A.FileDemo.py
|
1bfc45d54864ee1dccb3618fe339ea82646998b0
|
[] |
no_license
|
vothin/code
|
a77259db4a3c4630bed293f979a49b676a1bd7c4
|
d2b7819fd3687e0a011988fefab3e6fd70bb014a
|
refs/heads/master
| 2020-08-31T15:48:28.155535 | 2020-01-09T08:21:57 | 2020-01-09T08:21:57 | 218,725,153 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,389 |
py
|
'''
open
r 以只读方式打开文件。文件的指针将会放在文件的开头。这是默认模式。
rb 以二进制格式打开一个文件用于只读。文件指针将会放在文件的开头。这是默认模式。
r+ 打开一个文件用于读写。文件指针将会放在文件的开头。
rb+ 以二进制格式打开一个文件用于读写。文件指针将会放在文件的开头。
w 打开一个文件只用于写入。如果该文件已存在则打开文件,并从开头开始编辑,即原有内容会被删除。如果该文件不存在,创建新文件。
wb 以二进制格式打开一个文件只用于写入。如果该文件已存在则打开文件,并从开头开始编辑,即原有内容会被删除。如果该文件不存在,创建新文件。
w+ 打开一个文件用于读写。如果该文件已存在则打开文件,并从开头开始编辑,即原有内容会被删除。如果该文件不存在,创建新文件。
wb+ 以二进制格式打开一个文件用于读写。如果该文件已存在则打开文件,并从开头开始编辑,即原有内容会被删除。如果该文件不存在,创建新文件。
a 打开一个文件用于追加。如果该文件已存在,文件指针将会放在文件的结尾。也就是说,新的内容将会被写入到已有内容之后。如果该文件不存在,创建新文件进行写入。
ab 以二进制格式打开一个文件用于追加。如果该文件已存在,文件指针将会放在文件的结尾。也就是说,新的内容将会被写入到已有内容之后。如果该文件不存在,创建新文件进行写入。
a+ 打开一个文件用于读写。如果该文件已存在,文件指针将会放在文件的结尾。文件打开时会是追加模式。如果该文件不存在,创建新文件用于读写。
ab+ 以二进制格式打开一个文件用于追加。如果该文件已存在,文件指针将会放在文件的结尾。如果该文件不存在,创建新文件用于读写。
'''
'''
函数语法
open(name[, mode[, buffering]]) 文件句柄 = open('文件路径', '模式',编码方式)。
name : 一个包含了你要访问的文件名称的字符串值。
mode : mode 决定了打开文件的模式:只读,写入,追加等。所有可取值见如下的完全列表。这个参数是非强制的,默认文件访问模式为只读(r)。
buffering : 如果 buffering 的值被设为 0,就不会有寄存。如果 buffering 的值取 1,访问文件时会寄存行。
如果将 buffering 的值设为大于 1 的整数,表明了这就是的寄存区的缓冲大小。如果取负值,寄存区的缓冲大小则为系统默认。
示例: f = open('test.txt',"r")
file 对象方法
file.read([size]) size未指定则返回整个文件,如果文件大小>2倍内存则有问题.f.read()读到文件尾时返回""(空字串)
file.readline() 返回一行
file.readlines([size]) 返回包含size行的列表,size 未指定则返回全部行
for line in f: print line #通过迭代器访问
f.write("hello\n") #如果要写入字符串以外的数据,先将他转换为字符串.
f.tell() 返回一个整数,表示当前文件指针的位置(就是到文件头的比特数).
f.seek(偏移量,[起始位置]) 用来移动文件指针.
f.close() 打开文件之后一定要关闭,否则文件内容会丢失:
'''
|
[
"[email protected]"
] | |
716cc2c81ec577e777a6a3cfc47ba680a6cadfc7
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/nouns/_plectrums.py
|
0d3dda010f944bbbef6409f78aeac191753a0607
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 252 |
py
|
from xai.brain.wordbase.nouns._plectrum import _PLECTRUM
#calss header
class _PLECTRUMS(_PLECTRUM, ):
def __init__(self,):
_PLECTRUM.__init__(self)
self.name = "PLECTRUMS"
self.specie = 'nouns'
self.basic = "plectrum"
self.jsondata = {}
|
[
"[email protected]"
] | |
7815604a4051af01935361e7b7859ccd85e3e71b
|
ea393959886a5cd13da4539d634f2ca0bbcd06a2
|
/283.py
|
b2b4f2cad4536764cd733094eaf98757b705c7b1
|
[] |
no_license
|
zhangchizju2012/LeetCode
|
f605f35b82f16282559af71e4e61ec2629a90ebc
|
0c4c38849309124121b03cc0b4bf39071b5d1c8c
|
refs/heads/master
| 2020-04-05T12:12:14.810639 | 2018-08-09T10:24:52 | 2018-08-09T10:24:52 | 81,021,830 | 7 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 979 |
py
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 14 00:24:22 2017
@author: zhangchi
"""
class Solution(object):
def moveZeroes(self, nums):
"""
:type nums: List[int]
:rtype: void Do not return anything, modify nums in-place instead.
"""
indexList = []
for index, item in enumerate(nums):
if item == 0:
indexList.append(index)
indexList.append(len(nums)) # 相当于最后也有个0,配合一下后面的处理
count = 0
for i in xrange(len(indexList)-1):
nums[indexList[i]-count:indexList[i+1]-count-1] = nums[indexList[i]+1:indexList[i+1]]
count += 1 #每次往后挪动一次,相当于每次有个0的位置被空出来了,所以前面要减掉count,且count每次加一
for i in xrange(indexList[-1]-count,len(nums)):
nums[i] = 0
#return nums
s = Solution()
print s.moveZeroes([])
|
[
"[email protected]"
] | |
900bbc907bb10a759b672147517f8448c7ef5e21
|
ef54d37f8a3303013ca7469871a320d303957ed7
|
/robo4.2/fusion/tests/wpst_crm/feature_tests/C7000/Supershaw_TAA_FA_DA/validate.py
|
d1b1f709416576fdb725e7dd9fe4c24c42439338
|
[] |
no_license
|
richa92/Jenkin_Regression_Testing
|
d18badfcf16bda682dfe7bcbbd66f54a9a27a58d
|
24a74926170cbdfafa47e972644e2fe5b627d8ff
|
refs/heads/master
| 2020-07-12T10:01:59.099137 | 2019-08-27T12:14:53 | 2019-08-27T12:14:53 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,069 |
py
|
'''
This module contains the code to get the IP's of the
ethernet networks. Using the IP's it can login to the
server and execute the diskspd commands to start the traffic.
Diskspd results or ouput will be redirected to the log file
'''
import paramiko
import os
import time
import re
import threading
import Queue
def execute_diskspd(ip, username, passwd, diskspd_cmd):
'''
Execute the diskSPD tool Command
'''
try:
single_cmd = "psexec \\\\" + ip + " -u " + username + " -p " + passwd + " " +\
diskspd_cmd
output = os.system(single_cmd)
return (output)
except Exception as e:
return (e)
def validate_windows_lun_count(ip, username, passwd, diskspd_cmd):
output = execute_diskspd(ip,
username, passwd, diskspd_cmd)
with open("C:\\WINDOWSLUN.txt") as f:
lines = f.readlines()
print lines
count = 0
for i in lines:
if "3PARdata" in i:
count = count + 1
print count
return count
|
[
"[email protected]"
] | |
7d0fa9b4b4f4b3082220c3ee9b07b146fdbbd204
|
9cbd088a0f7288acee3c1d736ef85e516b86d8fe
|
/twitter_tools.py
|
f3b7643e42816d3d937696a696eca0c0ddfeb875
|
[] |
no_license
|
fjccoin/twitbots
|
91ba75a8123c9c21cf20d3e235075f5e7b0ebd5d
|
513a6df705034aeb61b0d7ea2fccfe6c722160d9
|
refs/heads/master
| 2020-04-08T02:42:31.602419 | 2015-11-12T08:38:01 | 2015-11-12T08:38:01 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,719 |
py
|
import nltk
from collections import OrderedDict, defaultdict
import re
import requests
from bs4 import BeautifulSoup
from urlparse import urlparse
SLEEP_COMMAND = ' go to sleep'
WAKE_COMMAND = ' wake up'
QUIET_COMMAND = ' no reply'
LOUD_COMMAND = ' reply on'
ADMIN_ID = 21455761
def filter_tweet(tweet, userid, botname, friends=None):
skip = False
sleep = False
wake = False
debug = False
end_debug = False
# filter RTs
if tweet.get('retweet_count') > 0:
skip = True
# only reply to target user
sender = None
""" tweets to reply to:
if sender is owner and not a reply
if sender if owner's friend and mentions my name
"""
try:
sender = tweet.get('user').get('id')
if sender not in [userid, ADMIN_ID] + friends:
skip = True
except:
sender = None
skip = True
t = tweet.get('text')
if not t:
skip = True
else:
t = t.lower()
if t[:3] == "rt ":
skip = True
if sender in [userid, ADMIN_ID]:
if SLEEP_COMMAND in t:
sleep = True
elif WAKE_COMMAND in t:
wake = True
if QUIET_COMMAND in t:
debug = True
elif LOUD_COMMAND in t:
end_debug = True
if tweet.get('in_reply_to_status_id') and botname not in t:
skip = True
if t[0] == "@" and botname not in t:
skip = True
elif botname not in t:
skip = True
elif tweet.get('in_reply_to_status_id'):
skip = True
return skip, sleep, wake, debug, end_debug
def word_count(sentence, words):
s = nltk.word_tokenize(sentence)
return len(set(s) & set(words))
def ok_tweet(c, minlen, maxlen):
if c.endswith(':') or c.endswith(','):
return False
if len(c) > maxlen or len(c) < minlen:
return False
else:
return True
GARBAGE = [",", "--", "\'s", ".", "``","n\'t","\'\'",")","(","%","!","\'","?","percent",":"]
# semantic tools
def remove_stopwords(documents, sents=False):
texts = []
for d in documents:
if sents:
doc = d #d[0]+d[1]
else:
doc = documents[d]
doc = clean_str(doc)
tokens = nltk.word_tokenize(doc.lower())
tokens = [t for t in tokens if t not in nltk.corpus.stopwords.words('english')]
tokens = [t for t in tokens if t not in GARBAGE]
texts.append(tokens)
return texts
def clean_str(text):
# remove words that start with @
# remove urls
y = " ".join(filter(lambda x:(x[0]!='@' and x[:4]!='http'), text.split()))
return re.sub('[#$*|]', '', y)
def remove_infreq(inputs, minfreq):
frequency = defaultdict(int)
for text in inputs:
for token in text:
frequency[token] += 1
texts = [[token for token in text if frequency[token] > minfreq]
for text in inputs]
return texts
NEWS_DOMAINS = "thenewyorktimes moneybeat"
""" deal with urls in tweets """
def pull_headlines(tweet):
ent = tweet.get('entities')
urls = ent.get('urls')
t = ""
if urls:
for u in urls:
try:
url = u.get('expanded_url')
r = requests.get(url)
headlines = BeautifulSoup(r.content).find('title')
if not headlines:
headlines = BeautifulSoup(r.content).find('h1')
# remove domain
domain = '{uri.netloc}'.format(uri=urlparse(url)) + NEWS_DOMAINS
hwords = [h for h in headlines.getText().split() if h.lower() not in domain]
t = "%s %s" % (t,' '.join(hwords))
except:
continue
# also pull quoted tweets
if tweet.get('is_quote_status'):
try:
quote = tweet.get('quoted_status').get('text')
except:
quote = ''
t+=quote
return t
""" break and chunk tweets """
def send_tweet(api, tweet, id_orig=None, username=None):
twit = api.request('statuses/update', {'status': username + tweet, 'in_reply_to_status_id': id_orig})
# if too long, break it up
r = twit.response.json()
if username:
maxlen = 139-len(username)
else:
maxlen = 139
if r.get('errors'):
tweets = break_tweet(tweet, maxlen)
id_str = id_orig
for rt in tweets:
t = api.request('statuses/update', {'status': username + rt, 'in_reply_to_status_id': id_str})
rt_resp = t.response.json()
if rt_resp.get('errors'):
continue
else:
id_str = rt_resp.get('id_str')
def chunks(l, n):
"""Yield successive n-sized chunks from l.
Chunks prioritize commas. after that, spaces
"""
q = []
total = 0
remainder = l
while len(remainder) > 0:
if len(remainder) <= n:
q.append(remainder[:idx])
break
x = remainder[:n]
idx = x.rfind(',')
if idx > 0:
if idx > 50:
q.append(remainder[:idx+1])
remainder = remainder[idx+1:]
continue
idx = x.rfind(' ')
q.append(remainder[:idx])
remainder = remainder[idx+1:]
#for i in xrange(0, len(l), n):
# yield l[i:i+n]
return q
def break_tweet(tweet, n):
# first break into sentences
sent_detector = nltk.data.load('tokenizers/punkt/english.pickle')
rtweets = sent_detector.tokenize(tweet.strip())
for idx, rt in enumerate(rtweets):
if len(rt) > n:
clauses = rt.split('\n')
for cdx, c in enumerate(clauses):
d = '?'
commas = [e+d for e in c.split(d) if e != '']
commas[-1] = commas[-1][:-1]
clauses[cdx:cdx+len(commas)] = commas
rtweets[idx:idx+len(clauses)] = clauses
for idx, rt in enumerate(rtweets):
if len(rt) > n:
chunkt = chunks(rt, n)
rtweets[idx:idx+len(chunkt)] = chunkt
return rtweets
sent_detector = nltk.data.load('tokenizers/punkt/english.pickle')
def create_tweet(text, username):
""" create a tweet from mult long sentences
This process will vary by user.
"""
# up to 2 tweets
#maxlen = 263-2*len(username)
maxlen = 139-len(username)
for t in text:
if ok_tweet(t, 40, maxlen):
return t
# go through again and break them up
else:
sents = sent_detector.tokenize(t)
for s in sents:
if ok_tweet(s, 40, maxlen):
return s
return None
|
[
"[email protected]"
] | |
16f5f3e683d884969d7b2a96646d43ae6d346d91
|
91b2fb1fb6df216f2e365c3366bab66a567fc70d
|
/Week06/每日一题/857. 雇佣 K 名工人的最低成本.py
|
a439d49b3ad77a70ab1c5a3a7846aa901ac77d1d
|
[] |
no_license
|
hrz123/algorithm010
|
d17aee642f03f607a7984beb099eec18f2de1c8e
|
817911d4282d2e226518b3533dff28282a91b3d4
|
refs/heads/master
| 2022-12-20T14:09:26.365781 | 2020-10-11T04:15:57 | 2020-10-11T04:15:57 | 270,178,423 | 1 | 0 | null | 2020-06-07T03:21:09 | 2020-06-07T03:21:09 | null |
UTF-8
|
Python
| false | false | 5,322 |
py
|
# 857. 雇佣 K 名工人的最低成本.py
import heapq
from typing import List
class Solution:
def mincostToHireWorkers(self, quality: List[int], wage: List[int],
K: int) -> float:
v = list(zip(quality, wage))
v.sort(key=lambda t: t[1] / t[0])
priority_queue = []
ans = float('inf')
total = 0
for q, w in v:
total += q
heapq.heappush(priority_queue, -q)
if len(priority_queue) > K:
total += heapq.heappop(priority_queue)
if len(priority_queue) == K:
ans = min(ans, total * w / q)
return ans
# 给工资的钱取决于两点,与最大的工资质量比成正比,这些人的质量总和成正比
# 我们要同时减小这两个元素
# 我们沿着工资质量比,和这些人总体的质量这条曲线的边界,找最小值
class Solution:
def mincostToHireWorkers(self, quality: List[int], wage: List[int],
K: int) -> float:
v = list(zip(quality, wage))
v.sort(key=lambda e: e[1] / e[0])
heap = []
res = float('inf')
_sum_q = 0
for q, w in v:
_sum_q += q
heapq.heappush(heap, -q)
if len(heap) == K:
res = min(res, _sum_q * w / q)
_sum_q += heapq.heappop(heap)
return res
class Solution:
def mincostToHireWorkers(self, quality: List[int], wage: List[int],
K: int) -> float:
zv = list(zip(quality, wage))
zv.sort(key=lambda x: x[1] / x[0])
heap = []
res = float('inf')
q_sum = 0
for q, w in zv:
q_sum += q
heapq.heappush(heap, -q)
if len(heap) == K:
res = min(res, q_sum * w / q)
q_sum += heapq.heappop(heap)
return res
class Solution:
def mincostToHireWorkers(self, quality: List[int], wage: List[int],
K: int) -> float:
zv = list(zip(quality, wage))
zv.sort(key=lambda x: x[1] / x[0])
heap = []
q_sum = 0
res = float('inf')
for q, w in zv:
q_sum += q
heapq.heappush(heap, -q)
if len(heap) == K:
res = min(res, q_sum * w / q)
q_sum += heapq.heappop(heap)
return res
class Solution:
def mincostToHireWorkers(self, quality: List[int], wage: List[int],
K: int) -> float:
zv = list(zip(quality, wage))
zv.sort(key=lambda x: x[1] / x[0])
heap = []
res = float('inf')
q_sum = 0
for q, w in zv:
q_sum += q
heapq.heappush(heap, -q)
if len(heap) == K:
res = min(res, q_sum * w / q)
q_sum += heapq.heappop(heap)
return res
class Solution:
def mincostToHireWorkers(self, quality: List[int], wage: List[int],
K: int) -> float:
zv = list(zip(wage, quality))
zv.sort(key=lambda x: x[0] / x[1])
heap = []
res = float('inf')
qs = 0
for w, q in zv:
qs += q
heapq.heappush(heap, -q)
if len(heap) == K:
res = min(res, w / q * qs)
qp = -heapq.heappop(heap)
qs -= qp
return res
class Solution:
def mincostToHireWorkers(self, quality: List[int], wage: List[int],
K: int) -> float:
zv = list(zip(wage, quality))
zv.sort(key=lambda x: x[0] / x[1])
heap = []
res = float('inf')
qs = 0
for w, q in zv:
qs += q
heapq.heappush(heap, -q)
if len(heap) == K:
res = min(res, w / q * qs)
qp = -heapq.heappop(heap)
qs -= qp
return res
class Solution:
def mincostToHireWorkers(self, quality: List[int], wage: List[int],
K: int) -> float:
zv = [*zip(quality, wage)]
zv.sort(key=lambda x: x[1] / x[0])
heap = []
q_sum = 0
res = float('inf')
for q, w in zv:
heapq.heappush(heap, -q)
q_sum += q
if len(heap) == K:
res = min(res, q_sum * w / q)
q_sum += heapq.heappop(heap)
return res
class Solution:
def mincostToHireWorkers(self, quality: List[int], wage: List[int],
K: int) -> float:
zv = [*zip(quality, wage)]
zv.sort(key=lambda x: x[1] / x[0])
q_sum = 0
heap = []
res = float('inf')
for q, w in zv:
q_sum += q
heapq.heappush(heap, -q)
if len(heap) == K:
res = min(res, q_sum * w / q)
q_sum += heapq.heappop(heap)
return res
def main():
sol = Solution()
quality = [10, 20, 5]
wage = [70, 50, 30]
K = 2
res = sol.mincostToHireWorkers(quality, wage, K)
print(res)
quality = [3, 1, 10, 10, 1]
wage = [4, 8, 2, 2, 7]
K = 3
res = sol.mincostToHireWorkers(quality, wage, K)
print(res)
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
1e722c8b3d71456db9c90dd4ee5c9bde1a02f8c7
|
7dfb5942ae4721b7084bde958d632dd90096328a
|
/function_generator/error_models.py
|
b75cd3ae00e6daae1112f1a68f604e5b2ace591f
|
[
"Apache-2.0"
] |
permissive
|
blackwer/function_generator
|
f392ae0041f56d235a959ce3e54c1f865baf3cca
|
91025e67a2c64009f0384ee35466bb04f0819fce
|
refs/heads/master
| 2021-11-26T03:20:49.104389 | 2021-11-10T14:23:34 | 2021-11-10T14:23:34 | 219,051,758 | 9 | 2 |
Apache-2.0
| 2021-11-09T13:34:12 | 2019-11-01T19:42:48 |
C++
|
UTF-8
|
Python
| false | false | 291 |
py
|
import numpy as np
def standard_error_model(coefs, f):
return np.abs(coefs[-2:]).max()/max(1, np.abs(coefs[0]))
def relative_error_model(coefs, f):
return np.abs(coefs[-2:]).max()/np.abs(coefs[0])
def new_error_model(coefs, f):
return np.abs(coefs[-2:]).max()/np.abs(f).min()
|
[
"[email protected]"
] | |
52a4d27d2d45abfa176ad4c8edd1e8e1b6e7298c
|
1b126876948b3d05f89e058d4642405f192fb858
|
/src/strava_api/Client.py
|
ff70413ffc370f22346a23c172543126be8f72e8
|
[
"MIT"
] |
permissive
|
yknot/strava_api
|
6ecc972132156432cdc4e19ffe23fd5045fa765a
|
b31080b8718a6c26399cfc7c36b77f36a2bed1d3
|
refs/heads/master
| 2023-05-25T04:51:02.822053 | 2020-07-18T04:44:35 | 2020-07-18T04:44:35 | 279,205,963 | 0 | 0 |
MIT
| 2023-05-23T00:04:21 | 2020-07-13T04:01:33 |
Python
|
UTF-8
|
Python
| false | false | 1,091 |
py
|
"""Main module."""
import requests
from .Athlete import Athlete
class Client:
"""Class to manage your Strava API Client"""
def __init__(
self, client_id: str, client_secret: str, auth_token: str, refresh_token: str
) -> None:
"""initialize client with application attributes"""
self.client_id = client_id
self.client_secret = client_secret
self.auth_token = auth_token
self.refresh_token = refresh_token
# create variables
self.athlete = None
def set_athlete(self, auth_code: str) -> None:
try:
response = requests.post(
url="https://www.strava.com/oauth/token",
params={
"client_id": self.client_id,
"client_secret": self.client_secret,
"code": auth_code,
"grant_type": "authorization_code",
},
)
self.athlete = Athlete(response.json())
except requests.exceptions.RequestException:
print("HTTP Request failed")
|
[
"[email protected]"
] | |
9e01ee06ccb0d0c3f6fcbb90b6af174e4d295b4a
|
96086ae5e7bfa1e40159f919269a90c83e472326
|
/opengever/usermigration/plone_tasks.py
|
121756f0302306a726785ba83d2b3607d1afb842
|
[] |
no_license
|
lukasgraf/opengever.core
|
6fc313717fbec3692354e56c2c3293789076a389
|
a15c4ff8e0d5494906d7de46a43e3427c8d2d49f
|
refs/heads/master
| 2020-12-01T11:38:46.721555 | 2018-06-18T10:13:09 | 2018-06-18T10:13:09 | 57,871,187 | 0 | 0 | null | 2016-05-02T06:59:58 | 2016-05-02T06:59:58 | null |
UTF-8
|
Python
| false | false | 6,253 |
py
|
"""
Migrate user IDs in Plone tasks (issuers, responsibles, responses)
"""
from opengever.ogds.base.utils import ogds_service
from opengever.task.adapters import IResponseContainer
from opengever.task.task import ITask
from opengever.usermigration.exceptions import UserMigrationException
from plone import api
import logging
logger = logging.getLogger('opengever.usermigration')
FIELDS_TO_CHECK = ('responsible', 'issuer')
class PloneTasksMigrator(object):
"""This migrator changes the `issuer` and `responsible` fields on
Plone tasks, as well as updating responses on tasks as needed.
It does not however fix local roles assigned to Plone tasks - these can
be fixed using the "local roles" migration in ftw.usermigration.
"""
def __init__(self, portal, principal_mapping, mode='move', strict=True):
self.portal = portal
self.principal_mapping = principal_mapping
if mode != 'move':
raise NotImplementedError(
"PloneTasksMigrator only supports 'move' mode")
self.mode = mode
self.strict = strict
# Keep track of tasks that need reindexing
self.to_reindex = set()
self.task_moves = {
'responsible': [],
'issuer': [],
}
self.response_moves = {
'creator': [],
'responsible_before': [],
'responsible_after': [],
}
def _verify_user(self, userid):
ogds_user = ogds_service().fetch_user(userid)
if ogds_user is None:
msg = "User '{}' not found in OGDS!".format(userid)
raise UserMigrationException(msg)
def _fix_responses(self, obj):
container = IResponseContainer(obj)
path = '/'.join(obj.getPhysicalPath())
for response_no, response in enumerate(container):
response_identifier = '%s - Response #%s' % (path, response_no)
# Fix response creator
creator = getattr(response, 'creator', '')
if creator in self.principal_mapping:
logger.info("Fixing 'creator' for %s" % response_identifier)
new_userid = self.principal_mapping[creator]
response.creator = new_userid
self.response_moves['creator'].append((
response_identifier, creator, new_userid))
for change in response.changes:
# Fix responsible [before|after]
if change.get('id') == 'responsible':
before = change.get('before', '')
if before in self.principal_mapping:
new_userid = self.principal_mapping[before]
change['before'] = unicode(new_userid)
# Need to flag changes to track mutations - see #3419
response.changes._p_changed = True
logger.info(
"Fixed 'responsible:before' for change in %s "
"(%s -> %s)" % (
response_identifier, before, new_userid))
self.response_moves['responsible_before'].append((
response_identifier, before, new_userid))
after = change.get('after', '')
if after in self.principal_mapping:
new_userid = self.principal_mapping[after]
change['after'] = unicode(new_userid)
# Need to flag changes to track mutations - see #3419
response.changes._p_changed = True
logger.info(
"Fixed 'responsible:after' for change in %s "
"(%s -> %s)" % (
response_identifier, after, new_userid))
self.response_moves['responsible_after'].append((
response_identifier, after, new_userid))
def _migrate_plone_task(self, obj):
task = ITask(obj)
for field_name in FIELDS_TO_CHECK:
# Check 'responsible' and 'issuer' fields
old_userid = getattr(task, field_name, None)
if old_userid in self.principal_mapping:
path = '/'.join(obj.getPhysicalPath())
logger.info('Fixing %r for %s' % (field_name, path))
new_userid = self.principal_mapping[old_userid]
setattr(task, field_name, new_userid)
self.to_reindex.add(obj)
self.task_moves[field_name].append(
(path, old_userid, new_userid))
def migrate(self):
catalog = api.portal.get_tool('portal_catalog')
# Verify all new users exist before doing anything
for old_userid, new_userid in self.principal_mapping.items():
self._verify_user(new_userid)
all_tasks = [b.getObject() for b in catalog.unrestrictedSearchResults(
object_provides=ITask.__identifier__)]
for obj in all_tasks:
self._migrate_plone_task(obj)
self._fix_responses(obj)
for obj in self.to_reindex:
# Reindex 'responsible' and 'issuer' for changed objects.
logger.info('Reindexing %s' % '/'.join(obj.getPhysicalPath()))
obj.reindexObject(idxs=FIELDS_TO_CHECK)
results = {
'task_issuers': {
'moved': self.task_moves['issuer'],
'copied': [],
'deleted': []},
'task_responsibles': {
'moved': self.task_moves['responsible'],
'copied': [],
'deleted': []},
'response_creators': {
'moved': self.response_moves['creator'],
'copied': [],
'deleted': []},
'response_responsible_before': {
'moved': self.response_moves['responsible_before'],
'copied': [],
'deleted': []},
'response_responsible_after': {
'moved': self.response_moves['responsible_after'],
'copied': [],
'deleted': []},
}
return results
|
[
"[email protected]"
] | |
d89d76b57a914617374ae2be28918b6019c91b82
|
2cb07ae51d1de3e8bdff12e5628e7d142a98d970
|
/Aula3/Problem15_12_4.py
|
3454f557c9f57d8e47ebee3ce6450c7593be0a3e
|
[] |
no_license
|
juanfdg/JuanFreireCES22
|
e7c40a11584a86e1f81520d9da0bbdd58ea48e02
|
4d80b32163ea6d3f4c5f35375969a748022be438
|
refs/heads/master
| 2021-04-27T00:50:48.754467 | 2018-07-03T03:29:36 | 2018-07-03T03:29:36 | 122,661,075 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 369 |
py
|
class Point():
def __init__(self, x, y):
self.x = x
self.y = y
# Method wont work when other_point.x - self.x = 0
def get_line_to(self, other_point):
slope = (other_point.y-self.y)/(other_point.x-self.x)
linear_coef = self.y - slope*self.x
return (slope, linear_coef)
print(Point(4, 11).get_line_to(Point(6, 15)))
|
[
"--global"
] |
--global
|
8d6cca91d5489b3dabcf10d8c98523f7f3c593f8
|
9924e0dc6e0e8c8665508a218636f391451a153f
|
/Extras/use_flacco.py
|
2e8dfe4b9cb62fa2b2d599de9da641448cd1f9e8
|
[] |
no_license
|
ai-se/ExploratoryLandscapeAnalysis
|
b531d374221397ed91f43eeff00217aa85797881
|
c338fe93bb11881d25b6000853ca7ac0be69e212
|
refs/heads/master
| 2020-07-13T12:52:04.601453 | 2016-09-23T21:21:08 | 2016-09-23T21:21:08 | 66,961,225 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,730 |
py
|
from __future__ import division
import pyRserve
from os import listdir
import pandas as pd
from random import shuffle
def df_to_list_str(df):
columns = df.columns.tolist()
list = []
for column in columns:
list.extend(df[column].tolist())
result_str = ""
for i, l in enumerate(list):
result_str += str(l)
if i<len(list)-1: result_str += ","
return result_str
def get_ela_features(independent, dependent):
# rcmd = pyRserve.connect(host='localhost', port=6311)
# print(rcmd.eval('rnorm(100)'))
features = {}
i_ncols = len(independent.columns)
str_indep = "matrix(c(" + df_to_list_str(independent) + "), ncol=" + str(i_ncols) + ")"
str_dep = "matrix(c(" + df_to_list_str(dependent) + "), ncol=" + str(1) + ")"
assert(len(independent) == len(dependent)), "sanity check failed"
conn = pyRserve.connect(host='localhost', port=6311)
conn.voidEval("library('flacco')")
conn.voidEval("X <- " + str_indep)
conn.voidEval("y<- " + str_dep)
conn.voidEval("feat.object = createFeatureObject(X = X, y = y, blocks = 3)")
fs1 = conn.r("calculateFeatureSet(feat.object, set = 'ela_distr')")
for name, value in zip(fs1.keys, fs1.values):
features[name] = value
# fs2 = conn.r("calculateFeatureSet(feat.object, set = 'ela_level')")
# for name, value in zip(fs2.keys, fs2.values):
# features[name] = value
# fs3 = conn.r("calculateFeatureSet(feat.object, set = 'ela_meta')")
# for name, value in zip(fs3.keys, fs3.values):
# features[name] = value
# fs4 = conn.r("calculateFeatureSet(feat.object, set = 'cm_grad')")
# for name, value in zip(fs4.keys, fs4.values):
# features[name] = value
return features
if __name__ == "__main__":
files = ["../FeatureModels/" + f for f in listdir("../FeatureModels") if ".csv" in f]
for filename in ["../FeatureModels/BerkeleyDB.csv"]:
contents = pd.read_csv(filename)
independent_columns = [c for c in contents.columns if "$<" not in c]
dependent_column = [c for c in contents.columns if "$<" in c]
independents = contents[independent_columns]
raw_dependents = contents[dependent_column]
dependents = (raw_dependents - raw_dependents.mean()) / (raw_dependents.max() - raw_dependents.min())
indexes = range(len(contents))
shuffle(indexes)
n = 100#min(n, int(len(contents) * 0.1))
samples = indexes[:n]
independent_values = independents[independents.index.isin(samples)]
dependent_values = dependents[dependents.index.isin(samples)]
print filename
print get_ela_features(independent_values, dependent_values)
exit()
|
[
"[email protected]"
] | |
26c2f5e55d19a42e4299bc3c03c1aa8d472539d8
|
38a42a205eaa5a0a46989c95f0b01f7e04b96a9e
|
/uoft/CSC148H1F Intro to Comp Sci/@week3_stacks/@@Exercise3/stack_ex.py
|
25de6d1577c709a79973a271d6b1427ee3ffe857
|
[
"MIT"
] |
permissive
|
Reginald-Lee/biji-ben
|
d24cd1189ca3e9ed7b30e5b20a40137e8d6d4039
|
37009dfdbef9a15c2851bcca2a4e029267e6a02d
|
refs/heads/master
| 2023-05-06T23:06:49.819088 | 2020-06-10T12:07:47 | 2020-06-10T12:07:47 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,692 |
py
|
# Exercise 3: More Stack Exercises
#
# CSC148 Fall 2014, University of Toronto
# Instructor: David Liu
# ---------------------------------------------
# STUDENT INFORMATION
#
# List your information below, in format
# <full name>, <utorid>
# <Rui Qiu>, <999292509>
# ---------------------------------------------
from stack import Stack, EmptyStackError
class SmallStackError(Exception):
print("The stack has fewer than two elements.")
def reverse_top_two(stack):
""" (Stack) -> NoneType
Reverse the top two elements on stack.
Raise a SmallStackError if stack has fewer than two elements.
>>> stack = Stack()
>>> stack.push(1)
>>> stack.push(2)
>>> reverse_top_two(stack)
>>> stack.pop()
1
>>> stack.pop()
2
"""
try:
stack.is_empty() == False
except:
raise EmptyStackError
else:
try:
t1 = stack.pop()
t2 = stack.pop()
stack.push(t1)
stack.push(t2)
except:
raise SmallStackError
return stack
def reverse(stack):
""" (Stack) -> NoneType
Reverse all the elements of stack.
>>> stack = Stack()
>>> stack.push(1)
>>> stack.push(2)
>>> reverse(stack)
>>> stack.pop()
1
>>> stack.pop()
2
"""
temp = Stack()
temp2 = Stack()
while not stack.is_empty():
stuff = stack.pop()
temp.push(stuff)
while not temp.is_empty():
stuff = temp.pop()
temp2.push(stuff)
while not temp2.is_empty():
stuff = temp2.pop()
stack.push(stuff)
return stack
|
[
"[email protected]"
] | |
a8d4ea1ab28833bfd43a58cd9b108e03ae0b7c42
|
9d90b664ebbd11a57ee6156c528081551b98055b
|
/wsgi/local_data/brython_programs/tuple1.py
|
fb6bc882e1ee285aa89bedf32f13c2ec02f31f08
|
[] |
no_license
|
2014cdag21/c21
|
d4f85f91ba446feb6669a39903dda38c21e8b868
|
faf4b354f7d1d4abec79c683d7d02055c6bab489
|
refs/heads/master
| 2020-06-03T17:54:16.144118 | 2014-06-20T09:29:02 | 2014-06-20T09:29:02 | 19,724,479 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 429 |
py
|
d = (11,12,13,'asdf',14,15.0)
# Note - tuples are immutable types
# Common operations:
# length of a typle
print(len(d))
# indexation (in Python it starts from zero)
print(d[0], d[1])
# slicing
print(d[0:2]) # equals to (11, 12)
print(d[2:-1]) # equals to (13, 'asdf', 14)
print(d[:2]) # same as d[0:2], equals to (11, 12)
print(d[3:]) # equals to ('asdf', 14, 15.0)
# contains
print((15 in d, 100 in d)) # returns (True, False)
|
[
"[email protected]"
] | |
0e8f422dbaf4ff83f83fc49dc9410897d3314dcd
|
7e9daf6a2a3ebfb969e793f92afc0dc5f1c2fc35
|
/cat_mouse.py
|
940150c58ad59356e7f9220c3b08a3bfc16612a7
|
[] |
no_license
|
NARESHSWAMI199/5-Star-On-Hacker-Rank-Python
|
e43ce5cb3429d2a683c37e6f4ba6440d073d47c2
|
51f245d1d0966de21ddf861b22fe3379e7c8a0a7
|
refs/heads/main
| 2023-02-25T03:05:25.330205 | 2021-01-19T13:49:27 | 2021-01-19T13:49:27 | 325,296,957 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 518 |
py
|
quries_size = int(input())
maxmum = 0
for i in range(quries_size):
query = list(map(int,input().split()))
if query[0] > query[2]:
dist_of_a = query[0] - query[2]
else :
dist_of_a = query[2]- query[0]
if query[1] > query[2]:
dist_of_b = query[1] - query[2]
else :
dist_of_b = query[2]- query[1]
if dist_of_a < dist_of_b:
print("Cat A")
elif dist_of_b < dist_of_a:
print("Cat B")
else :
print("Mouse C")
|
[
"[email protected]"
] | |
d4952e4625b9ebd20f0d0deb21cdd0ca66b480cf
|
faa0ce2a95da958be3bfb171cdff29eeb43c3eb6
|
/py-exercises/JulieTestModule/characters/shadow.py
|
f71a4d7d759a9855c1f3ccbf67630318ea88332d
|
[] |
no_license
|
julianapeace/digitalcrafts-exercises
|
98fe4e20420c47cf9d92d16c45ac60dc35a49a6a
|
98e6680138d55c5d093164a47da53e1ddb6d064c
|
refs/heads/master
| 2021-08-30T04:17:09.997205 | 2017-12-16T00:22:22 | 2017-12-16T00:22:22 | 103,176,043 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 236 |
py
|
from characters.base import Character
class Shadow(Character):
def __init__(self, name = 'Shadow', health = 1, power = 5, armor = 0, evade = 0, coincount = 4):
super().__init__(name, health, power, armor, evade, coincount)
|
[
"[email protected]"
] | |
f66c598f24bf258557c6b380eb6f1b14b1fa4d9a
|
67a7c314fc99d9cd7a677fcb6bc2b6dfa20a9cff
|
/spambayes-1.0.4/utilities/dump_cdb.py
|
49728d0958b67c26cdc52128cfdcf1d6f116874e
|
[
"LicenseRef-scancode-unknown-license-reference",
"Python-2.0"
] |
permissive
|
Xodarap/Eipi
|
7ebbb9fd861fdb411c1e273ea5d2a088aa579930
|
d30997a737912e38316c198531f7cb9c5693c313
|
refs/heads/master
| 2016-09-11T06:28:01.333832 | 2011-05-03T15:35:20 | 2011-05-03T15:35:20 | 1,367,645 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 550 |
py
|
#! /usr/bin/env python
RC_DIR = "~/.spambayes"
DB_FILE = RC_DIR + "/wordprobs.cdb"
import sys
import os
DB_FILE = os.path.expanduser(DB_FILE)
from spambayes.cdb import Cdb
def main():
if len(sys.argv) == 2:
db_file = sys.argv[1]
else:
db_file = os.path.expanduser(DB_FILE)
db = Cdb(open(db_file, 'rb'))
items = []
for k, v in db.iteritems():
items.append((float(v), k))
items.sort()
for v, k in items:
print k, v
if __name__ == "__main__":
main()
|
[
"eipi@mybox.(none)"
] |
eipi@mybox.(none)
|
14698f5e208340300976981461b72d99053e4499
|
6fcfb638fa725b6d21083ec54e3609fc1b287d9e
|
/python/django_django/django-master/django/views/static.py
|
479c59cac6c4165e1254d9a1815a56860e62d1b5
|
[] |
no_license
|
LiuFang816/SALSTM_py_data
|
6db258e51858aeff14af38898fef715b46980ac1
|
d494b3041069d377d6a7a9c296a14334f2fa5acc
|
refs/heads/master
| 2022-12-25T06:39:52.222097 | 2019-12-12T08:49:07 | 2019-12-12T08:49:07 | 227,546,525 | 10 | 7 | null | 2022-12-19T02:53:01 | 2019-12-12T07:29:39 |
Python
|
UTF-8
|
Python
| false | false | 5,108 |
py
|
"""
Views and functions for serving static files. These are only to be used
during development, and SHOULD NOT be used in a production setting.
"""
import mimetypes
import os
import posixpath
import re
import stat
from urllib.parse import unquote
from django.http import (
FileResponse, Http404, HttpResponse, HttpResponseNotModified,
HttpResponseRedirect,
)
from django.template import Context, Engine, TemplateDoesNotExist, loader
from django.utils.http import http_date, parse_http_date
from django.utils.translation import gettext as _, gettext_lazy
def serve(request, path, document_root=None, show_indexes=False):
"""
Serve static files below a given point in the directory structure.
To use, put a URL pattern such as::
from django.views.static import serve
url(r'^(?P<path>.*)$', serve, {'document_root': '/path/to/my/files/'})
in your URLconf. You must provide the ``document_root`` param. You may
also set ``show_indexes`` to ``True`` if you'd like to serve a basic index
of the directory. This index view will use the template hardcoded below,
but if you'd like to override it, you can create a template called
``static/directory_index.html``.
"""
path = posixpath.normpath(unquote(path))
path = path.lstrip('/')
newpath = ''
for part in path.split('/'):
if not part:
# Strip empty path components.
continue
drive, part = os.path.splitdrive(part)
head, part = os.path.split(part)
if part in (os.curdir, os.pardir):
# Strip '.' and '..' in path.
continue
newpath = os.path.join(newpath, part).replace('\\', '/')
if newpath and path != newpath:
return HttpResponseRedirect(newpath)
fullpath = os.path.join(document_root, newpath)
if os.path.isdir(fullpath):
if show_indexes:
return directory_index(newpath, fullpath)
raise Http404(_("Directory indexes are not allowed here."))
if not os.path.exists(fullpath):
raise Http404(_('"%(path)s" does not exist') % {'path': fullpath})
# Respect the If-Modified-Since header.
statobj = os.stat(fullpath)
if not was_modified_since(request.META.get('HTTP_IF_MODIFIED_SINCE'),
statobj.st_mtime, statobj.st_size):
return HttpResponseNotModified()
content_type, encoding = mimetypes.guess_type(fullpath)
content_type = content_type or 'application/octet-stream'
response = FileResponse(open(fullpath, 'rb'), content_type=content_type)
response["Last-Modified"] = http_date(statobj.st_mtime)
if stat.S_ISREG(statobj.st_mode):
response["Content-Length"] = statobj.st_size
if encoding:
response["Content-Encoding"] = encoding
return response
DEFAULT_DIRECTORY_INDEX_TEMPLATE = """
{% load i18n %}
<!DOCTYPE html>
<html lang="en">
<head>
<meta http-equiv="Content-type" content="text/html; charset=utf-8" />
<meta http-equiv="Content-Language" content="en-us" />
<meta name="robots" content="NONE,NOARCHIVE" />
<title>{% blocktrans %}Index of {{ directory }}{% endblocktrans %}</title>
</head>
<body>
<h1>{% blocktrans %}Index of {{ directory }}{% endblocktrans %}</h1>
<ul>
{% if directory != "/" %}
<li><a href="../">../</a></li>
{% endif %}
{% for f in file_list %}
<li><a href="{{ f|urlencode }}">{{ f }}</a></li>
{% endfor %}
</ul>
</body>
</html>
"""
template_translatable = gettext_lazy("Index of %(directory)s")
def directory_index(path, fullpath):
try:
t = loader.select_template([
'static/directory_index.html',
'static/directory_index',
])
except TemplateDoesNotExist:
t = Engine(libraries={'i18n': 'django.templatetags.i18n'}).from_string(DEFAULT_DIRECTORY_INDEX_TEMPLATE)
files = []
for f in os.listdir(fullpath):
if not f.startswith('.'):
if os.path.isdir(os.path.join(fullpath, f)):
f += '/'
files.append(f)
c = Context({
'directory': path + '/',
'file_list': files,
})
return HttpResponse(t.render(c))
def was_modified_since(header=None, mtime=0, size=0):
"""
Was something modified since the user last downloaded it?
header
This is the value of the If-Modified-Since header. If this is None,
I'll just return True.
mtime
This is the modification time of the item we're talking about.
size
This is the size of the item we're talking about.
"""
try:
if header is None:
raise ValueError
matches = re.match(r"^([^;]+)(; length=([0-9]+))?$", header,
re.IGNORECASE)
header_mtime = parse_http_date(matches.group(1))
header_len = matches.group(3)
if header_len and int(header_len) != size:
raise ValueError
if int(mtime) > header_mtime:
raise ValueError
except (AttributeError, ValueError, OverflowError):
return True
return False
|
[
"[email protected]"
] | |
40ccd51ea1d674209bf46cbea751869f208c6df8
|
52b5773617a1b972a905de4d692540d26ff74926
|
/.history/coins_20200608093830.py
|
2b449f4937abf589c1a075934356f3463068e9c8
|
[] |
no_license
|
MaryanneNjeri/pythonModules
|
56f54bf098ae58ea069bf33f11ae94fa8eedcabc
|
f4e56b1e4dda2349267af634a46f6b9df6686020
|
refs/heads/master
| 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 178 |
py
|
def change(amount,coins):
count = 0
for i in range(len(coins)):
times = coins[i] / amount
print(times)
change(5,[1,2,5])
|
[
"[email protected]"
] | |
7dd7acbd17cee8b4c05c6f118abbd654aca5e2d0
|
797f21680bf51656db629691cc667a4ddae7a513
|
/final_exams/heroes_of_code_and_logic_VII.py
|
758594704481bd5724bca88a701dcec11bcbc266
|
[] |
no_license
|
yordan-marinov/fundamentals_python
|
48f5ab77814fddc6d3cb5a8d4b5e14f1eebf1298
|
e1e9544d02be99640623317fadee810b503e7d9f
|
refs/heads/master
| 2023-01-24T04:59:48.140176 | 2020-12-14T14:21:49 | 2020-12-14T14:21:49 | 309,784,119 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,679 |
py
|
def get_heroes_data() -> dict:
number_heroes = int(input())
heroes_data = {}
for _ in range(number_heroes):
data = input().split()
hero_name = data[0]
hit_points = int(data[1])
mana_points = int(data[2])
heroes_data[hero_name] = {
"hp": hit_points,
"mp": mana_points,
}
return heroes_data
def cast_spell(dd: dict, *args) -> dict:
hero_name = args[0]
mp_needed = int(args[1])
spell_name = args[2]
if dd[hero_name]["mp"] >= mp_needed:
dd[hero_name]["mp"] -= mp_needed
print(
f"{hero_name} has successfully cast {spell_name} "
f"and now has {dd[hero_name]['mp']} MP!"
)
else:
print(f"{hero_name} does not have enough MP to cast {spell_name}!")
return dd
def take_damage(dd: dict, *args) -> dict:
hero_name = args[0]
damage = int(args[1])
attacker = args[2]
dd[hero_name]["hp"] -= damage
if dd[hero_name]["hp"] > 0:
print(
f"{hero_name} was hit for {damage} HP by {attacker} and "
f"now has {dd[hero_name]['hp']} HP left!"
)
else:
print(f"{hero_name} has been killed by {attacker}!")
del dd[hero_name]
return dd
def recharge(dd: dict, *args) -> dict:
hero_name = args[0]
amount = int(args[1])
if dd[hero_name]["mp"] + amount > MAXIMUM_POINTS["mp"]:
amount = MAXIMUM_POINTS["mp"] - dd[hero_name]["mp"]
print(f"{hero_name} recharged for {amount} MP!")
dd[hero_name]["mp"] += amount
return dd
def heal(dd: dict, *args) -> dict:
hero_name = args[0]
amount = int(args[1])
if dd[hero_name]["hp"] + amount > MAXIMUM_POINTS["hp"]:
amount = MAXIMUM_POINTS["hp"] - dd[hero_name]["hp"]
print(f"{hero_name} healed for {amount} HP!")
dd[hero_name]["hp"] += amount
return dd
def main_manipulation_print_func(dd: dict, commands) -> print:
while True:
data = input()
if data == "End":
sorting_printing_func(dd)
break
data = data.split(" - ")
command = data.pop(0)
commands[command](dd, *data)
def sorting_printing_func(dd: dict) -> print:
for name, values in sorted(
dd.items(),
key=lambda pair: (-pair[1]["hp"], pair[0])
):
print(f"{name}")
print(f" HP: {values['hp']}")
print(f" MP: {values['mp']}")
MAXIMUM_POINTS = {"hp": 100, "mp": 200}
COMMANDS = dict(
CastSpell=cast_spell,
TakeDamage=take_damage,
Recharge=recharge,
Heal=heal
)
heroes = get_heroes_data()
main_manipulation_print_func(heroes, COMMANDS)
|
[
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.