blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
eb66983747fd37d5bad2b03c62aa2cb5b9820300
|
6923f79f1eaaba0ab28b25337ba6cb56be97d32d
|
/A_Primer_on_Scientific_Programming_with_Python/input/c2f_cml_v3.py
|
9d13a354db2390ff9b57049d4ab8c67135d83538
|
[] |
no_license
|
burakbayramli/books
|
9fe7ba0cabf06e113eb125d62fe16d4946f4a4f0
|
5e9a0e03aa7ddf5e5ddf89943ccc68d94b539e95
|
refs/heads/master
| 2023-08-17T05:31:08.885134 | 2023-08-14T10:05:37 | 2023-08-14T10:05:37 | 72,460,321 | 223 | 174 | null | 2022-10-24T12:15:06 | 2016-10-31T17:24:00 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 217 |
py
|
import sys
try:
C = float(sys.argv[1])
except:
print 'You failed to provide Celsius degrees as input '\
'on the command line!'
sys.exit(1) # abort
F = 9.0*C/5 + 32
print '%gC is %.1fF' % (C, F)
|
[
"[email protected]"
] | |
e06e1100601a1bacb795bb1f1efe4a2e96a3d781
|
221d5405763d1a6ab3c6755583e557c14b9f3742
|
/gusregon/gus.py
|
ca34d58334fd1bd2383ff69dfbb77899f224cc07
|
[
"BSD-2-Clause"
] |
permissive
|
tpro/django-gusregon
|
4bd7253be9d43345376e36312d763d4653d0bbcd
|
75d4f291ae805bd986e1b4cb03b3b94e52a48076
|
refs/heads/master
| 2021-01-18T18:11:50.746453 | 2015-04-19T11:00:49 | 2015-04-19T11:00:49 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,656 |
py
|
import requests
import json
GUS_API_URL = 'https://wyszukiwarkaregon.stat.gov.pl/wsBIR/UslugaBIRzewnPubl.svc/ajaxEndpoint/'
LOGIN_ENDPOINT = 'Zaloguj'
CAPTCHA_ENDPOINT = 'PobierzCaptcha'
CHECK_CAPTCHA_ENDPOINT = 'SprawdzCaptcha'
SEARCH_ENDPOINT = 'daneSzukaj'
COMPANY_DETAILS_ENDPOINT = 'DanePobierzPelnyRaport'
class GUS(object):
sid = None
report_type = {
'F': 'DaneRaportFizycznaPubl',
'P': 'DaneRaportPrawnaPubl'}
prefix_data = {
'F': 'fiz_',
'P': 'praw_'}
def __init__(self, sid=None):
self.sid = sid
def login(self):
data = {'pKluczUzytkownika': 'aaaaaabbbbbcccccdddd'}
self.sid = self._post(LOGIN_ENDPOINT, data=json.dumps(data))
return self.sid
def get_captcha(self):
return self._post(CAPTCHA_ENDPOINT)
def check_captcha(self, captcha):
data = {'pCaptcha': captcha}
return self._post(
CHECK_CAPTCHA_ENDPOINT, data=json.dumps(data))
def search(self, nip=None, regon=None, krs=None,
detailed=True, no_prefix=True):
if not any([nip, regon, krs]):
raise AttributeError(
'At least one parameter (nip, regon, krs) is required.')
if nip:
search_params = {'Nip': nip}
elif regon:
search_params = {'Regon': regon}
else:
search_params = {'Krs': krs}
data = {'pParametryWyszukiwania': search_params}
basic_info = self._post(
SEARCH_ENDPOINT, data=json.dumps(data))
if not detailed or not basic_info:
return basic_info
basic_info = json.loads(basic_info)[0]
data = {
'pNazwaRaportu': self.report_type.get(basic_info['Typ']),
'pRegon': basic_info['Regon'],
'pSilosID': 1,
}
details = json.loads(self._post(
COMPANY_DETAILS_ENDPOINT, data=json.dumps(data)))[0]
if no_prefix:
return self._remove_prefix(details)
return details
def _post(self, url, **kwargs):
headers = {'Content-Type': 'application/json'}
if self.sid:
headers.update({'sid': self.sid})
url = '%s%s' % (GUS_API_URL, url)
response = requests.post(url, headers=headers, **kwargs)
return json.loads(response.content)['d']
def _remove_prefix(self, data):
data_without_prefix = {}
for key, value in data.iteritems():
if key.startswith(tuple(self.prefix_data.values())):
key = key[key.find('_') + 1:]
data_without_prefix[key] = value
return data_without_prefix
|
[
"[email protected]"
] | |
2d46fb8bfbd693468cd059acdc41ca93221da9c6
|
c90b3ac3e5ad11cb93d4e6b76b9b9c4a19d0f512
|
/.history/copytest_20200502124009.py
|
413d6f42f34cf44c8b5ee71050cdd80e9dccb60d
|
[] |
no_license
|
rbafna6507/passwordstorageproject
|
6465585e36c81075856af8d565fe83e358b4a40a
|
480c30e358f7902ac0ef5c4e8d9556cb1d6d33f4
|
refs/heads/master
| 2022-11-25T12:05:02.625968 | 2020-07-27T21:33:38 | 2020-07-27T21:33:38 | 283,021,426 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 954 |
py
|
import pickle
import cryptography
from cryptography.fernet import Fernet
def encrypt(message: bytes, key: bytes) -> bytes:
return Fernet(key).encrypt(message)
def decrypt(token: bytes, key: bytes) -> bytes:
return Fernet(key).decrypt(token)
infile = open('jeff.pkl','rb')
z = pickle.load(infile)
key = Fernet.generate_key()
e_userpass = z
username = input("Username: ")
password = input("password: ")
website = input("Website: ")
e_username = encrypt(username.encode(), key)
e_password = encrypt(password.encode(), key)
e_list = [b"Username: " + e_username, b"Password: " + e_password]
e_userpass["Website: " + website] = e_list
outfile = open("jeff.pkl", "wb")
pickle.dump(e_userpass, outfile)
outfile.close()
infile = open('jeff.pkl','rb')
z = pickle.load(infile)
e_userpass = z
j = [e_userpass[k] for k in e_userpass]
e = [r.encode() for r in j]
q = decrypt(e, key)
"""for key, value in d_userpass.items():
print(key, ' : ', value)"""
|
[
"[email protected]"
] | |
a19137e2bc295d4d4e9c77c15d61e3a9e4d708f9
|
ff20661ef00b2db927c78f95a08cd6c40f950ee0
|
/inputmorethanone.py
|
81994128fa875ec38b52ef7cf8ec19866fc7810f
|
[] |
no_license
|
Techsrijan/mppython2021
|
57ca26e1acdf5adad2afa692dd5ae23336273603
|
583a991f85e2414c6b8ffe0405f727f3f5d38eee
|
refs/heads/main
| 2023-06-18T22:05:44.602220 | 2021-07-16T00:42:26 | 2021-07-16T00:42:26 | 374,290,977 | 0 | 9 | null | null | null | null |
UTF-8
|
Python
| false | false | 231 |
py
|
'''f=int(input("Enter the first number"))
s=int(input("Enter the Second number"))
'''
f,s=input("Enter two number").split(',')
print("F=",f,"S=",s)
j,k=input("Enter two number").split(' ')
print("j=",j,"k=",k)
print("add=",j+k)
|
[
"[email protected]"
] | |
ba3177e820dd8a4793158eb218326d48229866ef
|
bb372428bb90fa80f2e87820b3c8c5ba305dcd4c
|
/python/bot/eups.py
|
0e4812155219126d211b7dcd779287ab6d1ce9ec
|
[] |
no_license
|
TallJimbo/lsst-bot
|
7eb9b7a71a87a1ed416397c193931c80639bd746
|
0843afb2fdd5cc9ba62cf424a7dd73672b10e28f
|
refs/heads/master
| 2021-01-19T05:43:55.451321 | 2016-06-04T00:07:25 | 2016-06-04T00:07:25 | 60,484,354 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,534 |
py
|
#!/usr/bin/env python
from __future__ import absolute_import
import eups.table
import os
import logging
__all__ = "get_dependencies"
def get_dependencies(config, path, pkg, recursive=False):
"""Return immediate dependencies from inspecting a table file.
NOTE: recursive=True has not been tested.
"""
e = eups.Eups()
t = eups.table.Table(os.path.join(path, "ups", pkg + ".table"))
dependencies = t.dependencies(e, recursive=recursive)
if recursive:
dependencies.sort(key=lambda x: x[2])
for product, optional, depth in dependencies:
yield product.name, optional
def declare(config, path, pkg, version, tag_only=False):
e = eups.Eups()
if not tag_only:
logging.debug("Declaring {pkg} {version}.".format(pkg=pkg, version=version))
e.declare(productName=pkg, versionName=version, productDir=path)
for tmp in config.eups.tags:
tag = tmp.format(eups=config.eups)
logging.debug("Assigning tag {tag} to {pkg}.".format(pkg=pkg, tag=tag))
e.assignTag(tag, productName=pkg, versionName=version)
def undeclare(config, pkg, version):
e = eups.Eups()
e.undeclare(productName=pkg, versionName=version)
def setup(pkg, version, nodepend=False):
e = eups.Eups(max_depth=(0 if nodepend else -1))
e.setup(productName=pkg, versionName=version)
def tag(pkg, version, tag):
e = eups.Eups()
logging.debug("Assigning tag {tag} to {pkg}.".format(pkg=pkg, tag=tag))
e.assignTag(tag, productName=pkg, versionName=version)
|
[
"[email protected]"
] | |
31a96cf391d906b0d3d59fcd37437e16f21f474b
|
fd326562890d4f1987c384fc7c60374938231222
|
/OOP/ExamPrep/Exam10April21/project/decoration/ornament.py
|
90d7980c034613da08f4ee857bf726562ac89427
|
[] |
no_license
|
miro-lp/SoftUni
|
cc3b0ff742218c9ceaf93f05c319ccfeed5bc8a4
|
283d9328537919de49f7f6a301e58593bae9ca2a
|
refs/heads/main
| 2023-08-23T21:22:07.856226 | 2021-08-25T15:10:18 | 2021-08-25T15:10:18 | 318,134,101 | 2 | 1 | null | 2021-08-10T12:51:54 | 2020-12-03T09:03:08 |
Python
|
UTF-8
|
Python
| false | false | 151 |
py
|
from project.decoration.base_decoration import BaseDecoration
class Ornament(BaseDecoration):
def __init__(self):
super().__init__(1, 5)
|
[
"[email protected]"
] | |
588948095f2db1f4d431c649e77a76b72ecf54b8
|
68f57fd1dd274be72af6d85762b67bbf8d2ef6d6
|
/tests/test_cosine.py
|
3ac719652f889a7529befb8bcbf87a328c003cfa
|
[] |
no_license
|
afcarl/simplecosine
|
287cbf40ef8aa2251ea538b7b3c2d28c5b6f2488
|
1ba869198ab3211dd4b0412e80e670308007f687
|
refs/heads/master
| 2020-03-17T23:56:28.854494 | 2017-06-15T21:33:36 | 2017-06-15T21:33:36 | 134,069,251 | 1 | 0 | null | 2018-05-19T14:29:05 | 2018-05-19T14:29:05 | null |
UTF-8
|
Python
| false | false | 2,909 |
py
|
import unittest
from simplecosine.cosine import CosineSetSimilarity, CosineTextSimilarity
import numpy
import pickle
class TestSetCosineClass(unittest.TestCase):
def setUp(self):
self.ilist = [('a', 'b', 'c'),
['b', 'c', 'd k'],
('d k', 'e', 'f')
]
def test_cosine(self):
cosine = CosineSetSimilarity(self.ilist)
s1 = self.ilist[0]
s2 = self.ilist[1]
cosine_sim = cosine(s1, s2)
self.assertAlmostEqual(cosine_sim, 0.378, places=3)
cosine_sim = cosine(('g', 'h', 'd k', 'd k'), s2)
self.assertAlmostEqual(cosine_sim, 0.267, places=3)
def test_cosine_na(self):
cosine = CosineSetSimilarity(self.ilist)
cosine_sim = cosine(self.ilist[0], ())
assert numpy.isnan(cosine_sim)
def test_cosine_identical(self):
cosine = CosineSetSimilarity(self.ilist)
cosine_sim = cosine(self.ilist[0], self.ilist[0])
self.assertAlmostEqual(cosine_sim, 1, places=5)
def test_cosine_cache(self):
cosine = CosineSetSimilarity(self.ilist)
s1 = self.ilist[0]
s2 = self.ilist[1]
cosine_sim = cosine(s1, s2)
self.assertAlmostEqual(cosine_sim, 0.378, places=3)
cosine_sim = cosine(s1, s2)
self.assertAlmostEqual(cosine_sim, 0.378, places=3)
def test_cosine_no_corpus(self):
cosine = CosineSetSimilarity([])
s1 = self.ilist[0]
s2 = self.ilist[1]
cosine_sim = cosine(s1, s2)
self.assertAlmostEqual(cosine_sim, 0.667, places=3)
cosine_sim = cosine(('g', 'h', 'd k'), s2)
self.assertAlmostEqual(cosine_sim, 0.333, places=3)
def test_cosine_pickle(self) :
cosine = CosineSetSimilarity(self.ilist)
s1 = self.ilist[0]
s2 = self.ilist[1]
cosine_sim = cosine(s1, s2)
pickle.dumps(cosine)
cosine = CosineSetSimilarity([])
s1 = self.ilist[0]
s2 = self.ilist[1]
cosine_sim = cosine(s1, s2)
pickle.dumps(cosine)
class TestTextCosineClass(unittest.TestCase):
def setUp(self):
self.ilist = ['a b c',
'b c d',
'd e f']
def test_cosine(self):
cosine = CosineTextSimilarity(self.ilist)
s1 = self.ilist[0]
s2 = self.ilist[1]
cosine_sim = cosine(s1, s2)
self.assertAlmostEqual(cosine_sim, 0.378, places=3)
def test_cosine_na(self):
cosine = CosineTextSimilarity(self.ilist)
cosine_sim = cosine(self.ilist[0], '')
assert numpy.isnan(cosine_sim)
def test_cosine_identical(self):
cosine = CosineTextSimilarity(self.ilist)
cosine_sim = cosine(self.ilist[0], self.ilist[0])
self.assertAlmostEqual(cosine_sim, 1, places=5)
if __name__ == '__main__':
unittest.main()
|
[
"[email protected]"
] | |
93e2cb9162dfaedfe3a58c9892ccb9936f9405c9
|
9e7d7b4d029554eed0f760a027cd94558b919ae2
|
/CHAPTER15/overlaying.py
|
e320bf396d4410f1a0cc189810fc886ac93deca0
|
[] |
no_license
|
pooja1506/AutomateTheBoringStuff_2e
|
8247b68a195d5e1976c6474f0e97d947906ffd35
|
5bab9ccdcdb22ee10fe1272c91042be40fd67c17
|
refs/heads/master
| 2022-04-10T19:21:44.402829 | 2020-04-05T12:10:32 | 2020-04-05T12:10:32 | 249,620,282 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 589 |
py
|
import PyPDF2
minutesFile = open('meetingminutes.pdf', 'rb')
pdfReader = PyPDF2.PdfFileReader(minutesFile)
minutesFirstPage = pdfReader.getPage(0)
pdfWatermarkReader = PyPDF2.PdfFileReader(open('watermark.pdf', 'rb'))
minutesFirstPage.mergePage(pdfWatermarkReader.getPage(0))
pdfWriter = PyPDF2.PdfFileWriter()
pdfWriter.addPage(minutesFirstPage)
for pageNum in range(1, pdfReader.numPages):
pageObj = pdfReader.getPage(pageNum)
pdfWriter.addPage(pageObj)
resultPdfFile = open('watermarkedCover.pdf', 'wb')
pdfWriter.write(resultPdfFile)
minutesFile.close()
resultPdfFile.close()
|
[
"[email protected]"
] | |
da4d9970097abb9879bdaf10f8d859c5287053b0
|
5b8fcb1bf82a7c1ef5b6c2a939b1d1597bc7a24b
|
/create_json_for_airtable_operator.py
|
e00238b2f39eae43c6d55eae4974dcf2d194d262
|
[] |
no_license
|
katerinekhh/airflow_custom_stuff
|
2420c3ee95dab01e5eeeb8248500e253126e5b48
|
43ba78d96770a575ba7ab11a691b101e6d6604af
|
refs/heads/master
| 2022-10-12T13:55:01.916266 | 2020-06-12T13:17:06 | 2020-06-12T13:17:06 | 271,645,308 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,245 |
py
|
from datetime import datetime
import json
from airflow.utils.decorators import apply_defaults
from airflow.models.baseoperator import BaseOperator
from airflow.hooks.http_hook import HttpHook
class CreateJsonForAirtableOperator(BaseOperator):
@apply_defaults
def __init__(
self,
endpoint: str,
http_conn_id: str,
message_id_filepath: str,
update_filepath: str,
method='GET',
request_params=None,
*args, **kwargs,
):
super().__init__(*args, **kwargs)
self.http_conn_id = http_conn_id
self.request_params = request_params or {}
self.endpoint = endpoint
self.message_id_filepath = message_id_filepath
self.update_filepath = update_filepath
self.hook = HttpHook(
method=method,
http_conn_id=http_conn_id)
def execute(self, context):
response = self.hook.run(self.endpoint, data=self.request_params)
with open(self.message_id_filepath, 'r') as id_file:
message_id = id_file.read()
json_response = json.loads(response.text)
airtable_updates_data = {}
airtable_updates_data['records'] = []
for update in json_response['result']:
update_data_fields = {}
update_data = {}
if update['callback_query']['message']['message_id'] == int(message_id):
chat_id = update['callback_query']['message']['chat']['id']
username = update['callback_query']['from']['username']
triggered_at = datetime.fromtimestamp(
update['callback_query']['message']['date']).isoformat()[:-3] + "Z"
update_data['chat_id'] = chat_id
update_data['username'] = username
update_data['triggered_at'] = triggered_at
update_data['event_type'] = 'push_go_button'
update_data['reporter_name'] = 'khkaterina'
update_data_fields['fields'] = update_data
airtable_updates_data['records'].append(update_data_fields)
with open(self.update_filepath, 'w') as file:
json.dump(airtable_updates_data, file)
|
[
"[email protected]"
] | |
ef2911b4133217bc48dbf92e02a62bd1d9b5d171
|
e168a16fdd43d3023d16d8a643ccca318a44c327
|
/evm/logic/call.py
|
42acedd0f1791f1cebd63438077524bdee541b46
|
[] |
no_license
|
DavidKnott/py-evm
|
c589c88af55c121ea375bfdb0a53ecc6a4836119
|
66c47f58a62e995b5ce89e47007c8b03796c80b9
|
refs/heads/master
| 2021-01-01T04:08:39.921768 | 2017-07-18T13:03:45 | 2017-07-18T13:03:45 | 97,128,228 | 1 | 0 | null | 2017-07-13T13:54:57 | 2017-07-13T13:54:56 | null |
UTF-8
|
Python
| false | false | 7,349 |
py
|
from evm import constants
from evm.opcode import (
Opcode,
)
from evm.utils.address import (
force_bytes_to_address,
)
class BaseCall(Opcode):
def compute_msg_gas(self, computation, gas, to, value):
raise NotImplementedError("Must be implemented by subclasses")
def get_call_params(self, computation):
raise NotImplementedError("Must be implemented by subclasses")
def __call__(self, computation):
computation.gas_meter.consume_gas(
self.gas_cost,
reason=self.mnemonic,
)
(
gas,
value,
to,
sender,
code_address,
memory_input_start_position,
memory_input_size,
memory_output_start_position,
memory_output_size,
should_transfer_value,
) = self.get_call_params(computation)
computation.extend_memory(memory_input_start_position, memory_input_size)
computation.extend_memory(memory_output_start_position, memory_output_size)
call_data = computation.memory.read(memory_input_start_position, memory_input_size)
#
# Message gas allocation and fees
#
child_msg_gas, child_msg_gas_fee = self.compute_msg_gas(computation, gas, to, value)
computation.gas_meter.consume_gas(child_msg_gas_fee, reason=self.mnemonic)
# Pre-call checks
sender_balance = computation.state_db.get_balance(
computation.msg.storage_address,
)
insufficient_funds = should_transfer_value and sender_balance < value
stack_too_deep = computation.msg.depth + 1 > constants.STACK_DEPTH_LIMIT
if insufficient_funds or stack_too_deep:
if self.logger:
if insufficient_funds:
err_message = "Insufficient Funds: have: {0} | need: {1}".format(
sender_balance,
value,
)
elif stack_too_deep:
err_message = "Stack Limit Reached"
else:
raise Exception("Invariant: Unreachable code path")
self.logger.debug(
"%s failure: %s",
self.mnemonic,
err_message,
)
computation.gas_meter.return_gas(child_msg_gas)
computation.stack.push(0)
else:
if code_address:
code = computation.state_db.get_code(code_address)
else:
code = computation.state_db.get_code(to)
child_msg_kwargs = {
'gas': child_msg_gas,
'value': value,
'to': to,
'data': call_data,
'code': code,
'code_address': code_address,
'should_transfer_value': should_transfer_value,
}
if sender is not None:
child_msg_kwargs['sender'] = sender
child_msg = computation.prepare_child_message(**child_msg_kwargs)
if child_msg.is_create:
child_computation = computation.vm.apply_create_message(child_msg)
else:
child_computation = computation.vm.apply_message(child_msg)
computation.children.append(child_computation)
if child_computation.error:
computation.stack.push(0)
else:
actual_output_size = min(memory_output_size, len(child_computation.output))
computation.gas_meter.return_gas(child_computation.gas_meter.gas_remaining)
computation.memory.write(
memory_output_start_position,
actual_output_size,
child_computation.output[:actual_output_size],
)
computation.stack.push(1)
class Call(BaseCall):
def compute_msg_gas(self, computation, gas, to, value):
account_exists = computation.state_db.account_exists(to)
transfer_gas_fee = constants.GAS_CALLVALUE if value else 0
create_gas_fee = constants.GAS_NEWACCOUNT if not account_exists else 0
total_fee = gas + transfer_gas_fee + create_gas_fee
child_msg_gas = gas + (constants.GAS_CALLSTIPEND if value else 0)
return child_msg_gas, total_fee
def get_call_params(self, computation):
gas = computation.stack.pop(type_hint=constants.UINT256)
to = force_bytes_to_address(computation.stack.pop(type_hint=constants.BYTES))
(
value,
memory_input_start_position,
memory_input_size,
memory_output_start_position,
memory_output_size,
) = computation.stack.pop(num_items=5, type_hint=constants.UINT256)
return (
gas,
value,
to,
None, # sender
None, # code_address
memory_input_start_position,
memory_input_size,
memory_output_start_position,
memory_output_size,
True, # should_transfer_value,
)
class CallCode(BaseCall):
def compute_msg_gas(self, computation, gas, to, value):
transfer_gas_cost = constants.GAS_CALLVALUE if value else 0
total_fee = transfer_gas_cost + gas
child_msg_gas = gas + (constants.GAS_CALLSTIPEND if value else 0)
return child_msg_gas, total_fee
def get_call_params(self, computation):
gas = computation.stack.pop(type_hint=constants.UINT256)
code_address = force_bytes_to_address(computation.stack.pop(type_hint=constants.BYTES))
(
value,
memory_input_start_position,
memory_input_size,
memory_output_start_position,
memory_output_size,
) = computation.stack.pop(num_items=5, type_hint=constants.UINT256)
to = computation.msg.storage_address
sender = computation.msg.storage_address
return (
gas,
value,
to,
sender,
code_address,
memory_input_start_position,
memory_input_size,
memory_output_start_position,
memory_output_size,
True, # should_transfer_value,
)
class DelegateCall(CallCode):
def compute_msg_gas(self, computation, gas, to, value):
return gas, gas
def get_call_params(self, computation):
gas = computation.stack.pop(type_hint=constants.UINT256)
code_address = force_bytes_to_address(computation.stack.pop(type_hint=constants.BYTES))
(
memory_input_start_position,
memory_input_size,
memory_output_start_position,
memory_output_size,
) = computation.stack.pop(num_items=4, type_hint=constants.UINT256)
to = computation.msg.storage_address
sender = computation.msg.sender
value = computation.msg.value
return (
gas,
value,
to,
sender,
code_address,
memory_input_start_position,
memory_input_size,
memory_output_start_position,
memory_output_size,
False, # should_transfer_value,
)
|
[
"[email protected]"
] | |
2bff7ce472c638cc2952ee313e844673778ab37c
|
5faecec9b20d262150e48ac9f31c396f840b1f2f
|
/migrations/0010_auto_20200804_0913.py
|
f175b678b5857527caa863cd6db136e7bc3d803b
|
[] |
no_license
|
binkesi/blogsgn
|
fb767b0d22e3eb1c32ea7ee8fd0796766e3a8600
|
579b374f802a5651d20c3b3f85d8ff6a22476bdd
|
refs/heads/master
| 2022-11-27T23:24:45.574601 | 2020-08-04T10:06:28 | 2020-08-04T10:06:28 | 283,161,699 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 515 |
py
|
# Generated by Django 3.0.6 on 2020-08-04 01:13
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blogsgn', '0009_auto_20200804_0653'),
]
operations = [
migrations.AlterField(
model_name='author',
name='nation',
field=models.CharField(choices=[('CH', 'China'), ('US', 'America'), ('UK', 'England'), ('GE', 'German'), ('CA', 'Canada')], max_length=80, verbose_name='Nationality'),
),
]
|
[
"[email protected]"
] | |
0b583e86f97c1a537be2b27d6980f3a3dd93df1a
|
528c811306faa4a34bf51fca7955b7a24ac2e30c
|
/Python/Valid Anagram.py
|
263508830b33b30fd769bcad02fa5dbf91901f61
|
[] |
no_license
|
ganjingcatherine/LeetCode-1
|
1addbd7e4d9254a146601f9d5e28b8becb8235a6
|
488782d3f1e759da2d32b4e82dbf55b96c431244
|
refs/heads/master
| 2021-05-11T03:15:16.810035 | 2016-02-06T06:19:18 | 2016-02-06T06:19:18 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 806 |
py
|
"""
Given two strings s and t, write a function to determine if t is an anagram of s.
For example,
s = "anagram", t = "nagaram", return true.
s = "rat", t = "car", return false.
Note:
You may assume the string contains only lowercase alphabets.
"""
class Solution(object):
def isAnagram(self, s, t):
"""
:type s: str
:type t: str
:rtype: bool
"""
if len(s) != len(t): return False
table = {}
for i in xrange(len(s)):
if s[i] not in table:
table[s[i]] = 1
else:
table[s[i]] += 1
for i in xrange(len(t)):
if t[i] in table and table[t[i]] > 0:
table[t[i]] -= 1
else:
return False
return True
|
[
"[email protected]"
] | |
7713fd10c64850e9770370122883e5b6ea01086f
|
e2ae96b74289a04a2386294bf51bacad92e2a830
|
/city_scrapers_core/spiders/legistar.py
|
29c3176db02b4b0851fd939f9f79845a629163c5
|
[
"MIT"
] |
permissive
|
will-snavely/city-scrapers-core
|
6afa9d78fb1c325420baaae030633b01111f11bb
|
cb865069e49d09ab251b7f99247df5e13c5d0241
|
refs/heads/main
| 2022-12-11T21:39:03.307347 | 2020-09-09T13:29:53 | 2020-09-09T13:29:53 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,265 |
py
|
from datetime import datetime
from typing import Iterable, List, Mapping, Optional, Tuple
from urllib.parse import urlparse
import scrapy
from legistar.events import LegistarEventsScraper
from ..items import Meeting
from .spider import CityScrapersSpider
LINK_TYPES = ["Agenda", "Minutes", "Video", "Summary", "Captions"]
class LegistarSpider(CityScrapersSpider):
"""Subclass of :class:`CityScrapersSpider` that handles processing Legistar sites,
which almost always share the same components and general structure.
Uses the `Legistar events scraper <https://github.com/opencivicdata/python-legistar-scraper/blob/master/legistar/events.py>`_
from the ```python-legistar-scraper`` library <https://github.com/opencivicdata/python-legistar-scraper>`.
Any methods that don't pull the correct values can be replaced.
""" # noqa
link_types = []
def parse(self, response: scrapy.http.Response) -> Iterable[Meeting]:
"""Parse response from the :class:`LegistarEventsScraper`. Ignores the ``scrapy``
:class:`Response` which is still requested to be able to hook into ``scrapy``
broadly.
:param response: Scrapy response to be ignored
:return: Iterable of processed meetings
"""
events = self._call_legistar()
return self.parse_legistar(events)
def parse_legistar(
self, events: Iterable[Tuple[Mapping, Optional[str]]]
) -> Iterable[Meeting]:
"""Method to be implemented by Spider classes that will handle the response from
Legistar. Functions similar to ``parse`` for other Spider classes.
:param events: Iterable consisting of tuples of a dict-like object of scraped
results from legistar and an agenda URL (if available)
:raises NotImplementedError: Must be implemented in subclasses
:return: [description]
"""
raise NotImplementedError("Must implement parse_legistar")
def _call_legistar(
self, since: int = None
) -> Iterable[Tuple[Mapping, Optional[str]]]:
les = LegistarEventsScraper()
les.BASE_URL = self.base_url
les.EVENTSPAGE = f"{self.base_url}/Calendar.aspx"
if not since:
since = datetime.today().year
return les.events(since=since)
def legistar_start(self, item: Mapping) -> datetime:
"""Pulls the start time from a Legistar item
:param item: Scraped item from Legistar
:return: Meeting start datetime
"""
start_date = item.get("Meeting Date")
start_time = item.get("Meeting Time")
if start_date and start_time:
try:
return datetime.strptime(
f"{start_date} {start_time}", "%m/%d/%Y %I:%M %p"
)
except ValueError:
return datetime.strptime(start_date, "%m/%d/%Y")
def legistar_links(self, item: Mapping) -> List[Mapping[str, str]]:
"""Pulls relevant links from a Legistar item
:param item: Scraped item from Legistar
:return: List of meeting links
"""
links = []
for link_type in LINK_TYPES + self.link_types:
if isinstance(item.get(link_type), dict) and item[link_type].get("url"):
links.append({"href": item[link_type]["url"], "title": link_type})
return links
def legistar_source(self, item: Mapping) -> str:
"""Pulls the source URL from a Legistar item. Pulls a specific meeting URL if
available, otherwise defaults to the general Legistar calendar page.
:param item: Scraped item from Legistar
:return: Source URL
"""
default_url = f"{self.base_url}/Calendar.aspx"
if isinstance(item.get("Name"), dict):
return item["Name"].get("url", default_url)
if isinstance(item.get("Meeting Details"), dict):
return item["Meeting Details"].get("url", default_url)
return default_url
@property
def base_url(self) -> str:
"""Property with the Legistar site's base URL
:return: Legistar base URL
"""
parsed_url = urlparse(self.start_urls[0])
return f"{parsed_url.scheme}://{parsed_url.netloc}"
|
[
"[email protected]"
] | |
848257d62f49ecdcc747c38384d79aa0afb7700b
|
8db1ab4f9a2e47f7e8d69a685837d7e747bf9442
|
/cocos2d-x-tool/py_tool/syncResToProject.py
|
0773ceebfd0313dd7ab2c0df0f04cec7b688b661
|
[] |
no_license
|
tanzuoliang/python
|
051d6e46cebd7fdb74a0173aca0ca7a2b3ef5986
|
70f782cf3c72d2b7043727910509eb2d2f2fe065
|
refs/heads/master
| 2021-10-20T05:36:03.732738 | 2019-02-26T02:37:18 | 2019-02-26T02:37:18 | 111,288,598 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,595 |
py
|
#!/usr/bin/python
#encoding=utf-8
from myutil.utils import syncDir,checkSVNStatus
import os
fromRoot = '/Users/tanzuoliang/art_resource'
toRoot = "../res/new_ui"
toLanguage = "../res"
ll = [
('天天坦克/UI\ 效果图+输出\ 20170214\ 优化版/00\ icon','天天坦克/UI 效果图+输出 20170214 优化版/00 icon','icon'),
('天天坦克/UI\ 效果图+输出\ 20170214\ 优化版/00\ button','天天坦克/UI 效果图+输出 20170214 优化版/00 button','button'),
('天天坦克/UI\ 效果图+输出\ 20170214\ 优化版/00\ wenzi','天天坦克/UI 效果图+输出 20170214 优化版/00 wenzi','wenzi'),
('天天坦克/UI\ 效果图+输出\ 20170214\ 优化版/00\ 通用','天天坦克/UI 效果图+输出 20170214 优化版/00 通用','common'),
('天天坦克/UI\ 效果图+输出\ 20170214\ 优化版/00\ 字体','天天坦克/UI 效果图+输出 20170214 优化版/00 字体','fnt'),
('天天坦克/UI\ 效果图+输出\ 20170214\ 优化版/00\ BG','天天坦克/UI 效果图+输出 20170214 优化版/00 BG','bg')
]
"""
语言分类资源
"""
lll = [
('天天坦克/UI\ 效果图+输出\ 20170214\ 优化版/00\ 英文翻译','天天坦克/UI 效果图+输出 20170214 优化版/00 英文翻译','lang_en'),
('天天坦克/UI\ 效果图+输出\ 20170214\ 优化版/00\ 翻译原版','天天坦克/UI 效果图+输出 20170214 优化版/00 翻译原版','lang_chs')
]
from myutil.utils import getDirsize
import os
if os.path.exists('../res-new') and getDirsize('../res') < getDirsize('../res-new'):
print "当前res是压缩后的"
else:
os.system('svn up %s'%toLanguage)
for tu in ll:
fromDir = os.path.join(fromRoot, tu[0])
toDir = os.path.join(toRoot, tu[2])
os.system("svn up %s"%fromDir)
fromDir = os.path.join(fromRoot, tu[1])
syncDir(fromDir, toDir,False)
checkSVNStatus(toRoot,[tu[2]])
for tu in lll:
fromDir = os.path.join(fromRoot, tu[0])
toDir = os.path.join(toLanguage, "language_img", tu[2],"res","new_ui")
os.system("svn up %s"%fromDir)
fromDir = os.path.join(fromRoot, tu[1])
if not os.path.exists(toDir):
os.makedir(toDir)
syncDir(fromDir, toDir,False)
checkSVNStatus(os.path.join(toLanguage, "language_img"),[tu[2]])
"""
英文引导
"""
# os.system("cp %s %s"%(os.path.join(fromRoot,"天天坦克/UI\ 效果图+输出\ 20170214\ 优化版/00\ 英文翻译/Novice\ guide/controlexplain.jpg"),os.path.join(toRoot, "bg/lang_en_controlexplain.jpg")))
os.system("rm -rf %s"%(os.path.join(toLanguage,"language_img/lang_en/res/new_ui/Novice\ guide")))
os.system('svn ci %s -m "同步资源"'%toLanguage)
|
[
"[email protected]"
] | |
87a0d04e73c54c1e0daef6dcf0e338c6af43be21
|
ef187d259d33e97c7b9ed07dfbf065cec3e41f59
|
/work/atcoder/abc/abc024/B/answers/111654_Gale.py
|
b31d17de7f8e5d4c0d019d4cbf95c0c6f7e11513
|
[] |
no_license
|
kjnh10/pcw
|
847f7295ea3174490485ffe14ce4cdea0931c032
|
8f677701bce15517fb9362cc5b596644da62dca8
|
refs/heads/master
| 2020-03-18T09:54:23.442772 | 2018-07-19T00:26:09 | 2018-07-19T00:26:09 | 134,586,379 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 197 |
py
|
n, t = map(int, input().split())
a = [int(input()) for i in range(n)]
ans = t
for i in range(1, n):
ans += t
if a[i] <= a[i - 1] + t:
ans = ans - (a[i - 1] + t - a[i])
print(ans)
|
[
"[email protected]"
] | |
626be54fe2c402a3a685abc6d8479c10ea8a75aa
|
a5a99f646e371b45974a6fb6ccc06b0a674818f2
|
/CalibMuon/RPCCalibration/python/l1MuonOutputModule_cfi.py
|
6dbdc357f06e53ed7641a5fc49576123b5f1a25e
|
[
"Apache-2.0"
] |
permissive
|
cms-sw/cmssw
|
4ecd2c1105d59c66d385551230542c6615b9ab58
|
19c178740257eb48367778593da55dcad08b7a4f
|
refs/heads/master
| 2023-08-23T21:57:42.491143 | 2023-08-22T20:22:40 | 2023-08-22T20:22:40 | 10,969,551 | 1,006 | 3,696 |
Apache-2.0
| 2023-09-14T19:14:28 | 2013-06-26T14:09:07 |
C++
|
UTF-8
|
Python
| false | false | 419 |
py
|
import FWCore.ParameterSet.Config as cms
from CalibMuon.RPCCalibration.l1Muon_EventContent_cff import *
L1MuonEventContent = cms.OutputModule("PoolOutputModule",
l1Muon_EventContent,
l1MuonEventSelection,
datasets = cms.untracked.PSet(
filterName = cms.untracked.string('l1Muon_Filter'),
dataTier = cms.untracked.string('USER')
),
fileName = cms.untracked.string('l1Muon.root')
)
|
[
"[email protected]"
] | |
3230d5448ef48ac2a50e98f9791b15a0ed770f9f
|
0147677b611e40ac695ba07f914264b3470a7401
|
/src/mac_address_info.py
|
4ad9f15c04cbbf5909df457315f089a8c5f1a0cb
|
[] |
no_license
|
mblomdahl/sniffer
|
a2aed3ee37bb9a39d3c13ad8455ce7c7a2fc58c7
|
9101c59f958bb94fe1443fd90e95d333a02b785f
|
refs/heads/master
| 2021-01-24T00:23:30.318623 | 2015-08-14T12:56:33 | 2015-08-14T12:56:33 | 41,627,533 | 0 | 0 | null | 2015-08-30T12:11:01 | 2015-08-30T12:11:01 | null |
UTF-8
|
Python
| false | false | 5,190 |
py
|
import json
import urllib2
import os
class MacAddressInfo:
def __init__(self):
self.mac_address = ""
self.company = ""
self.address1 = ""
self.address2 = ""
self.address3 = ""
self.country = ""
class MacAddressStorage:
def __init__(self):
self.data = [] # creates a new empty list
def mac_address_lookup_from_internet(self, mac_address):
try:
print "Load from Internet %s" % mac_address
# Set the request URL http://www.macvendorlookup.com/api/v2/08-86-3B-D4-90-C0
url = 'http://www.macvendorlookup.com/api/v2/' + mac_address
# Send the GET request
response = urllib2.urlopen(url)
resp = response.read()
mac_object = MacAddressInfo
data = []
if resp:
# Interpret the JSON response
#data = json.loads(resp.decode('utf8'))
data = json.loads(resp)
mac_object.mac_address = mac_address
for company in data:
mac_object.company = company['company']
for address1 in data:
mac_object.address1 = address1['addressL1']
for address2 in data:
mac_object.address2 = address2['addressL2']
for address3 in data:
mac_object.address3 = address3['addressL3']
for country in data:
mac_object.country = country['country']
else:
mac_object.mac_address = mac_address
mac_object.company = ""
mac_object.address1 = ""
mac_object.address2 = ""
mac_object.address3 = ""
mac_object.country = ""
return mac_object
except :
print "Unexpected error:", url, resp
return None
def mac_address_lookup_from_cache(self, mac_address):
try:
self.load_data_from_file()
count = len( self.data["mac addresses"] )
for index in range(count):
if self.data["mac addresses"][index]["macaddress"] == mac_address:
mac_object = MacAddressInfo
mac_object.mac_address = mac_address
mac_object.company = self.data["mac addresses"][index]["company"]
mac_object.address1 = self.data["mac addresses"][index]["address1"]
mac_object.address2 = self.data["mac addresses"][index]["address2"]
mac_object.address3 = self.data["mac addresses"][index]["address3"]
mac_object.country = self.data["mac addresses"][index]["country"]
return mac_object
return None
except :
print "mac_address_lookup_from_cache error:"
return None
def mac_address_lookup(self, mac_address):
try:
mac_object = self.mac_address_lookup_from_cache(mac_address)
if mac_object is None :
mac_object = self.mac_address_lookup_from_internet(mac_address)
if mac_object is not None :
#self.load_data_from_file()
print mac_address
self.data["mac addresses"].append( {"macaddress":mac_address, "company":mac_object.company, "address1":mac_object.address1, "address2":mac_object.address2, "address3":mac_object.address3, "country":mac_object.country} )
self.store_data_to_file()
else :
return None
return mac_object
except :
print "mac_address_lookup error:"
return None
def load_data_from_file(self):
if len( self.data ) == 0:
if os.path.exists("/home/pi/sniffer/mac_addresses.json"):
file_handel = open('/home/pi/sniffer/mac_addresses.json', 'r')
self.data = json.load(file_handel)
#print "Load"
else:
#file_handel = open('/home/pi/sniffer/mac_addresses.json', 'w')
self.data.append( {"mac addresses":[]} )
#print "new"
def store_data_to_file(self):
file_handel = open('/home/pi/sniffer/mac_addresses.json', 'w')
json.dump(self.data, file_handel, sort_keys=True, indent=2)
#file_handel.write('\n')
if __name__ == '__main__':
storage = MacAddressStorage()
mac_object = MacAddressInfo()
#mac_object = storage.mac_address_lookup("08:86:3B:D4:90:C0")
#mac_object = storage.mac_address_lookup("6C:F3:73:E6:0A:11")
mac_object = storage.mac_address_lookup("9C:6C:15:97:76:04")
#print storage.mac_address_lookup("08-86-3B-D4-90-C0").mac_address
if mac_object :
print mac_object.mac_address
print mac_object.company
print mac_object.address1
print mac_object.address2
print mac_object.address3
print mac_object.country
else :
print "Error"
|
[
"="
] |
=
|
b54fd0bc290b3f5a82c4cad6ff829f7b399573f4
|
ded81a7568fe04f3227562cc5f67ffc675617cc0
|
/cheer_app/migrations/0002_comment.py
|
a7803e53c60185ed5d941b24bfcce9f91293cac8
|
[] |
no_license
|
shin04/cheer
|
3e220afc1fb0a4329ff7c16bd4823da1c09ee0a9
|
da39bbc584350c0ac89c23dbbfaf1c96ab9148fd
|
refs/heads/master
| 2020-07-02T16:07:44.280390 | 2020-05-20T11:13:03 | 2020-05-20T11:13:03 | 183,242,194 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 915 |
py
|
# Generated by Django 2.2 on 2019-08-05 04:29
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('cheer_app', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('author', models.CharField(max_length=200)),
('text', models.TextField()),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('approved_comment', models.BooleanField(default=False)),
('post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comments', to='cheer_app.Post')),
],
),
]
|
[
"[email protected]"
] | |
4d6647ad50459cf616a4eaaa782651b3b18edf2d
|
1af1f89eb9a178b95d1ba023b209b7538fb151f0
|
/Algorithms/146. LRU Cache.py
|
09989dad11e2256fc8b9ce1d4d8a754a15563be9
|
[] |
no_license
|
0xtinyuk/LeetCode
|
77d690161cc52738e63a4c4b6595a6012fa5c21e
|
08bc96a0fc2b672282cda348c833c02218c356f1
|
refs/heads/master
| 2023-02-21T16:58:39.881908 | 2021-01-25T08:00:13 | 2021-01-25T08:00:13 | 292,037,842 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 826 |
py
|
from collections import OrderedDict
class LRUCache():
def __init__(self, capacity):
self.memory = OrderedDict()
self.capacity = capacity
def get(self, key):
if key not in self.memory:
return - 1
self.memory.move_to_end(key)
return self.memory[key]
def put(self, key, value):
if key in self.memory:
self.memory.move_to_end(key)
self.memory[key] = value
if len(self.memory) > self.capacity:
self.memory.popitem(last = False)
# Your LRUCache object will be instantiated and called as such:
# obj = LRUCache(capacity)
# param_1 = obj.get(key)
# obj.put(key,value)
# Your LRUCache object will be instantiated and called as such:
# obj = LRUCache(capacity)
# param_1 = obj.get(key)
# obj.put(key,value)
|
[
"[email protected]"
] | |
215d0564daeceb18cdbfe7df3305df4cf9aaddc4
|
ddea930392ac5360b21e9043b620e703a9ccb31c
|
/tfx/components/example_gen/csv_example_gen/component.py
|
e98fab352364bc59a5a175075c9b90dce53af5c7
|
[
"Apache-2.0"
] |
permissive
|
Ark-kun/tfx
|
9c82b688776c80b2435bbb6154476526e8525ec8
|
f685f0387bd145316f43ceb484e64f893e749dcb
|
refs/heads/master
| 2021-07-25T05:58:15.168607 | 2020-05-22T01:07:44 | 2020-05-22T01:08:18 | 180,868,735 | 0 | 0 |
Apache-2.0
| 2019-04-11T20:01:57 | 2019-04-11T20:01:57 | null |
UTF-8
|
Python
| false | false | 3,690 |
py
|
# Lint as: python2, python3
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TFX CsvExampleGen component definition."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Any, Dict, Optional, Text, Union
from tfx import types
from tfx.components.base import executor_spec
from tfx.components.example_gen import component
from tfx.components.example_gen.csv_example_gen import executor
from tfx.proto import example_gen_pb2
class CsvExampleGen(component.FileBasedExampleGen): # pylint: disable=protected-access
"""Official TFX CsvExampleGen component.
The csv examplegen component takes csv data, and generates train
and eval examples for downsteam components.
"""
EXECUTOR_SPEC = executor_spec.ExecutorClassSpec(executor.Executor)
def __init__(
self,
input: types.Channel = None, # pylint: disable=redefined-builtin
input_config: Optional[Union[example_gen_pb2.Input, Dict[Text,
Any]]] = None,
output_config: Optional[Union[example_gen_pb2.Output, Dict[Text,
Any]]] = None,
example_artifacts: Optional[types.Channel] = None,
input_base: Optional[types.Channel] = None,
instance_name: Optional[Text] = None,
enable_cache: Optional[bool] = None):
"""Construct a CsvExampleGen component.
Args:
input: A Channel of type `standard_artifacts.ExternalArtifact`, which
includes one artifact whose uri is an external directory containing csv
files (required).
input_config: An example_gen_pb2.Input instance, providing input
configuration. If unset, the files under input_base will be treated as a
single split. If any field is provided as a RuntimeParameter,
input_config should be constructed as a dict with the same field names
as Input proto message.
output_config: An example_gen_pb2.Output instance, providing output
configuration. If unset, default splits will be 'train' and 'eval' with
size 2:1. If any field is provided as a RuntimeParameter,
output_config should be constructed as a dict with the same field names
as Output proto message.
example_artifacts: Optional channel of 'ExamplesPath' for output train and
eval examples.
input_base: Backwards compatibility alias for the 'input' argument.
instance_name: Optional unique instance name. Necessary if multiple
CsvExampleGen components are declared in the same pipeline.
enable_cache: Optional boolean to indicate if cache is enabled for the
CsvExampleGen component. If not specified, defaults to the value
specified for pipeline's enable_cache parameter.
"""
super(CsvExampleGen, self).__init__(
input=input,
input_config=input_config,
output_config=output_config,
example_artifacts=example_artifacts,
input_base=input_base,
instance_name=instance_name,
enable_cache=enable_cache)
|
[
"[email protected]"
] | |
5e8efd9eb59f40d86c42c63a6d9310545e0a1134
|
51f2492a5c207e3664de8f6b2d54bb93e313ca63
|
/atcoder/abc102/b.py
|
4be8dec8f9d480d7b0af81ef662a21f1f1ef5c4f
|
[
"WTFPL",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
abeaumont/competitive-programming
|
23c5aabd587d7bb15a61efd3428838cb934233dd
|
a24c9b89941a59d344b51dc1010de66522b1a0dd
|
refs/heads/master
| 2023-09-01T09:50:58.267361 | 2023-07-31T18:00:10 | 2023-07-31T18:00:10 | 117,589,708 | 618 | 262 |
WTFPL
| 2023-07-12T17:36:20 | 2018-01-15T20:00:56 |
C++
|
UTF-8
|
Python
| false | false | 158 |
py
|
#!/usr/bin/env python3
# https://abc102.contest.atcoder.jp/tasks/abc102_b
n = int(input())
a = [int(x) for x in input().split()]
a.sort()
print(a[-1] - a[0])
|
[
"[email protected]"
] | |
d22f9e180410bcb47f4308eb442280a1d6958408
|
b3197b795911a2ebdd3308f39d0e7be4b4626a44
|
/homework.4/4.task1.py
|
a81fcfb717c8ca4018adb4ef5c82f3125501d029
|
[] |
no_license
|
VPetrashchu/python-course
|
9e2af9582f1600201c6f28681ead7426501a82b6
|
d188c3f42f7fd70aad1535e0141e7ff5fddd1d8f
|
refs/heads/master
| 2023-02-23T09:41:56.079047 | 2021-01-31T20:12:08 | 2021-01-31T20:12:08 | 317,589,130 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 256 |
py
|
number = int(input('Enter number: '))
fact = 1
for i in range(1, number + 1):
fact = fact * i
print('Factorial is: {}'.format(fact))
# while ( number > 0):
# fact = fact * number
# number = number - 1
# print('Factorial is: {}'.format(fact))
|
[
"="
] |
=
|
97d915750244d1397fea6975d202218d1ad853f4
|
29f4de72b9aadaba277b4adb5e5cee5d8dd71f1e
|
/projection_data/make_ai.py
|
49ce8d7363987cb88c00c23533048229bdb00207
|
[] |
no_license
|
fgassert/aqueduct_atlas
|
87be4e1fbe9686cf06ff9c65257deabc617344e9
|
d00cd78ef3122aeda6eb563d0913baf73a9bb80e
|
refs/heads/master
| 2021-01-21T21:48:26.821562 | 2016-04-21T22:02:58 | 2016-04-21T22:02:58 | 15,684,645 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,658 |
py
|
import arcpy as ap
AI_RES = 600
def render_fields(prefix, polygons, mxd_template, layer_template, map_fields, map_layer_labels, map_values, ai_res=AI_RES):
mxd = ap.mapping.MapDocument(mxd_template)
df = ap.mapping.ListDataFrames(mxd)[0]
if map_layer_labels is None:
map_layer_labels = map_fields
if len(map_fields)!=len(map_layer_labels):
print "ERROR: Labels != fields"
if mxd.relativePaths == False:
print "ERROR: RelativePaths == False"
if type(map_fields) == str:
map_fields = [map_fields]
grpLyr = ap.mapping.ListLayers(mxd,"indicators")[0]
for i in range(len(map_fields)):
print "dissolving %s" % map_fields[i]
dissolved = "dis_%s_%s" % (map_layer_labels[i])
if not ap.Exists(dissolved):
ap.Dissolve_management(polygons,dissolved,map_fields[i])
else:
print "%s exists, skipping" % dissolved
lyr = ap.mapping.Layer(layer_template)
lyr.name = map_layer_labels[i]
lyr.replaceDataSource(WORKSPACE, "FILEGDB_WORKSPACE", dissolved, True)
lyr.symbology.valueField = map_fields[i]
lyr.symbology.classValues = map_values[map_fields[i]]
if grpLyr.isGroupLayer:
ap.mapping.AddLayerToGroup(df,grpLyr,lyr)
else:
ap.mapping.AddLayer(df, lyr)
outfile = "bin/%s%s_%s.ai"%(prefix, map_layer_labels[i])
print "exporting %s" % outfile
ap.mapping.ExportToAI(mxd, outfile,resolution=ai_res)
ap.mapping.RemoveLayer(df, ap.mapping.ListLayers(mxd,lyr.name)[0])
|
[
"[email protected]"
] | |
a7c2ff64776197033f6935ebd084216784ca1b4f
|
1521d32e3a2747054eea03df3195ca0fd52cfe71
|
/src/python/zensols/garmdown/__init__.py
|
1ed5dd6e2c05b14c7469f2b9a750d47327855ca1
|
[
"MIT"
] |
permissive
|
garmin-data/garmdown
|
533c525512914b97cbf42a919d670feb59c3269a
|
42509ddcc11bd7469e3a80d648fabd155657a074
|
refs/heads/master
| 2023-07-05T09:07:36.476348 | 2022-02-28T19:36:01 | 2022-02-28T19:36:01 | 191,933,069 | 15 | 6 |
MIT
| 2023-02-15T23:23:44 | 2019-06-14T11:37:45 |
Python
|
UTF-8
|
Python
| false | false | 212 |
py
|
from .domain import *
from .fetcher import *
from .persist import Persister
from .sheets import SheetUpdater
from .backup import *
from .reporter import *
from .mng import *
from .cli import *
from .app import *
|
[
"[email protected]"
] | |
4442d3be186c0780c78d903f7110f0e29096dfb6
|
8cbf335c5a39f2bbf1912b937ea4c3a31ab76f53
|
/kakuro.py
|
3869cab2399a04e10f9778aee78dd0fa41a9b26b
|
[] |
no_license
|
louisabraham/kakuro.py
|
e72e5a0dd4d1fc8b43bb8b1004ce7b46e5bf88bf
|
28ab8e5b066773a0f27f9eff6629391d21b167fc
|
refs/heads/master
| 2023-08-13T12:28:18.538669 | 2021-10-14T21:28:19 | 2021-10-14T21:28:19 | 417,281,425 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,969 |
py
|
from functools import lru_cache, partial
from collections import defaultdict
from set_cover import solve as solve_set_cover
def encode(pattern, cols, lines):
grid = [[c == "0" for c in line] for line in pattern.split()]
n = len(grid)
constraints = []
# build constraints on lines
vars = []
cur = 0
for i in range(n):
for j in range(n):
if grid[i][j]:
vars.append((i, j))
if (j == n - 1 or not grid[i][j]) and vars:
constraints.append((lines[cur], vars))
cur += 1
vars = []
# build constraints on columns
vars = []
cur = 0
for j in range(n):
for i in range(n):
if grid[i][j]:
vars.append((i, j))
if (i == n - 1 or not grid[i][j]) and vars:
constraints.append((cols[cur], vars))
cur += 1
vars = []
# map variables to constraints
var_to_cons = defaultdict(list)
for c, (_, vars) in enumerate(constraints):
for var in vars:
var_to_cons[var].append(c)
Y = {}
for i in range(n):
for j in range(n):
if not grid[i][j]:
continue
for x in range(1, 10):
# each cell has exactly one value
Y[i, j, x] = [("pos", i, j)]
for c in var_to_cons[i, j]:
# each value can be used at most once
Y[i, j, x].append(("con", c, x))
# add the "complement" values
for c, (tot, vars) in enumerate(constraints):
for t in decomp(45 - tot, 9 - len(vars)):
Y[c, t] = [("con", c)]
for x in t:
Y[c, t].append(("con", c, x))
# build X from Y
X = defaultdict(set)
for y, l in Y.items():
for x in l:
X[x].add(y)
return n, X, Y
@lru_cache(None)
def decomp(n, k, mini=1):
if n < mini:
return []
if k == 1:
return [(n,)] if n < 10 else []
ans = []
for x in range(mini, 10):
for t in decomp(n - x, k - 1, mini=x + 1):
ans.append((x,) + t)
return ans
def pp_sol(n, sol):
grid = [[0 for _ in range(n)] for _ in range(n)]
for x in sol:
if len(x) == 3:
i, j, x = x
grid[i][j] = x
return "\n".join("".join(str(x) for x in line) for line in grid)
def solve(pattern, cols, lines):
n, X, Y = encode(pattern, cols, lines)
yield from map(partial(pp_sol, n), solve_set_cover(X, Y))
if __name__ == "__main__":
pattern = """
0000X000
00000000
00000000
X0000000
0000000X
00000000
00000000
000X0000
"""
cols = [10, 13, 38, 39, 31, 28, 36, 39, 12, 10]
lines = [14, 8, 38, 36, 35, 35, 37, 36, 6, 11]
print(next(solve(pattern, cols, lines)))
|
[
"[email protected]"
] | |
7788d6d2554c64b729e9701d0fe4596e17cccfe8
|
5f22ddbd3eeb99709e43e7b9a7958c9987c7efa4
|
/__competitions/2014/11_03_w12/01.py
|
d50b3c914cc79f47bca1e6cd9529281c8b5f817c
|
[] |
no_license
|
salvador-dali/algorithms_general
|
04950bd823fc354adc58a4f23b7d2f3d39664798
|
aeee3356e2488c6fab08741b1ac26e8bd5e4ac0d
|
refs/heads/master
| 2020-12-14T06:24:10.466601 | 2016-07-17T06:00:17 | 2016-07-17T06:00:17 | 47,397,457 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 360 |
py
|
# https://www.hackerrank.com/contests/w12/challenges/priyanka-and-toys
# sort numbers and use greedy solution to find which one overlap
def toBuy(arr):
arr.sort()
num, maxPrice = 0, -1
for i in arr:
if i > maxPrice:
num += 1
maxPrice = i + 4
return num
input()
print toBuy(list(map(int, raw_input().split())))
|
[
"[email protected]"
] | |
8176f29c210a52c8544016e57564ace030a86875
|
155a25eb18213664da9978030e6743b04d570141
|
/manage.py
|
825bd789c5cf66c550320c139be766171af95606
|
[] |
no_license
|
powellc/timberwyck
|
0d6fd6e46c2899f32dda37faa8030a8c7080bc97
|
583cbc2ee33cb56187db13c94d5d4af74f51c9bd
|
refs/heads/master
| 2020-05-18T13:59:20.394609 | 2014-05-03T05:09:18 | 2014-05-03T05:09:18 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 313 |
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "timberwyck.settings")
os.environ.setdefault("DJANGO_CONFIGURATION", "Dev")
from configurations.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
[
"[email protected]"
] | |
c5001ecfa2716239bb437211c0ca5878f4942947
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/verbs/_underscoring.py
|
b4a890d3243fc3207ae8047c40277eb6f93f3f90
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 268 |
py
|
from xai.brain.wordbase.verbs._underscore import _UNDERSCORE
#calss header
class _UNDERSCORING(_UNDERSCORE, ):
def __init__(self,):
_UNDERSCORE.__init__(self)
self.name = "UNDERSCORING"
self.specie = 'verbs'
self.basic = "underscore"
self.jsondata = {}
|
[
"[email protected]"
] | |
065a7ba25278105449e7b3d0bc7e9d75e0141fe2
|
b564a7d17f224e9512fec36bab4333353381e22c
|
/lib/exaproxy/html/humans.py
|
f15fdaa44f84ee1894da006b05dcba9b027d9279
|
[
"BSD-2-Clause"
] |
permissive
|
Exa-Networks/exaproxy
|
464f9c72449b12d4f3960e9829a0f93fec8db0da
|
8b7291b79c1cd6542213a5e7d8dda3cf5a676166
|
refs/heads/master
| 2023-09-03T16:10:56.656549 | 2022-06-28T16:52:48 | 2022-06-28T16:52:48 | 13,495,150 | 127 | 25 |
NOASSERTION
| 2022-06-28T16:52:49 | 2013-10-11T09:36:28 |
Python
|
UTF-8
|
Python
| false | false | 1,384 |
py
|
# encoding: utf-8
"""
humans.py
Created by Thomas Mangin on 2012-02-25.
Copyright (c) 2011-2013 Exa Networks. All rights reserved.
"""
from .images import thomas,david
class humans:
txt = """\
/* TEAM */
Slave Driver / Grand Visionary: Thomas Mangin
Google+: https://plus.google.com/104241996506596749840
Github: https://github.com/thomas-mangin
Engineer Extraordinaire: David Farrar
Google+: https://plus.google.com/108845019528954357090
Github: https://github.com/david-farrar
/* Other contributors */
Marek Obuchowicz (kqueue reactor)
Github: https://github.com/marek-obuchowicz
"""
html = """\
<div style="padding: 20px 20px 20px 20px;">
<b>/* TEAM */</b><br/>
<br/>
<div style="margin-left:20px;margin-right:10px;">
<img width="100px" src="data:image/png;base64,%s"/>
</div>
<br/>
Slave Driver / Grand Visionary<br/>
<a href="https://plus.google.com/104241996506596749840">Thomas Mangin</a><br/>
<br/>
<div style="margin-left:20px;margin-right:10px;">
<img width="100px" src="data:image/png;base64,%s"/>
</div>
<br/>
Engineer Extraordinaire<br/>
<a href="https://plus.google.com/108845019528954357090">David Farrar</a><br/>
</div>
<div style="padding: 20px 20px 20px 20px;">
<b>/* Other contributors */</b>
<br/>
<a href="https://github.com/marek-obuchowicz">Marek Obuchowicz</a> (kqueue reactor)
<br/>
</div>
""" % (thomas,david)
|
[
"[email protected]"
] | |
6469709fcf868289b689c5a64db4c625a21116ff
|
904b4b7cd6b1709e9aded92737766a3b5a978838
|
/bissextile.py
|
d90e2359ddb92bf8a0938ca97e262464bbf19394
|
[] |
no_license
|
NicolasLagaillardie/Python
|
3ec7aa6eb21ffa86fad33060bb53e42cb7957dc9
|
a30037d688d8f11a195d7fa611347528c313d71b
|
refs/heads/master
| 2020-03-30T13:48:27.038592 | 2018-10-02T16:54:42 | 2018-10-02T16:54:42 | 151,288,608 | 0 | 0 | null | null | null | null |
ISO-8859-1
|
Python
| false | false | 382 |
py
|
# -*- coding: cp1252 -*-
def bissextile(annee):
if annee%4!=0:
print annee,' n\'est pas une année bissextile'
else:
if annee%100==0:
if annee%400==0:
print annee,' est bissextile'
else:
print annee,' n\'est pas une année bissextile'
else:
print annee,' est une année bissextile'
|
[
"[email protected]"
] | |
a9eb757a2b0a176611cde9701778712b3dd565df
|
bec8abb5c3146377f1b3bc2f2b4eaa4d02502211
|
/mediascraper/the_procrastinators/youtube_scraper.py
|
7383cc710c70c57551b36229ef8259fb99726bbb
|
[
"MIT"
] |
permissive
|
Kokitis/MediaScraper
|
578f3d96f1ef731906e03e56db77e141823f8681
|
8bd7294942945d90838357f14e10558a0512e316
|
refs/heads/master
| 2020-03-26T11:51:34.460233 | 2018-08-16T00:45:37 | 2018-08-16T00:45:37 | 144,863,333 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,286 |
py
|
from pathlib import Path
import requests
import json
import yaml
from pprint import pprint
from typing import List, Tuple
import re
import datetime
from dataclasses import dataclass
from pytools.timetools import Duration
shownotes_regex = ""
@dataclass
class Shownote:
timestamp: Duration
title: str
link: str
def extract_description(text:str)->str:
description, *junk = text.split('PCP Episode')
description = description.strip()
return description
def extract_shownotes(lines:List[str])->List[Shownote]:
""" Extracts the timestamps, titles, and links of each shownote."""
regex = re.compile("[\d]+:[\d]+(?:[:][\d]+)?")
shownotes = list()
for current_line, next_line in zip(lines[:-1], lines[1:]):
if regex.match(current_line):
_time, *_title = current_line.split(' ')
timestamp = Duration.from_string(_time)
title = " ".join(_title)
link = next_line
shownote = Shownote(timestamp, title, link)
shownotes.append(shownote)
return shownotes
if __name__ == "__main__":
sample = Path(__file__).parent / "Tourist Trap Stockholm Syndrome - The Pro Crastinators Podcast, Episode 119-toHfm6RyLYo.info.json"
data = json.loads(sample.read_text())
description = data['description']
#print(description)
pprint(extract_shownotes(description.split('\n')))
|
[
"[email protected]"
] | |
507b5e4a2cf5d1be59559b0252c23e4d162aace9
|
7762ca6feb98c8b1c95da09758801a6bc38922ff
|
/NinjaGold/settings.py
|
00af97c3a4b37d82f68939050baa3b893c96e2ba
|
[] |
no_license
|
SDBranka/NinjaGold
|
211bd6ade5e9c6a216ffef89a0c791a8a2d15ad5
|
db881812842f2188df1da20edc81469fcb56a50a
|
refs/heads/main
| 2023-04-29T01:02:07.427340 | 2021-05-22T19:05:34 | 2021-05-22T19:05:34 | 369,070,343 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,109 |
py
|
"""
Django settings for NinjaGold project.
Generated by 'django-admin startproject' using Django 2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '48s9*0q%(s79!70c9!^vujzz0iy))40u)ikr66k=9x7y^d*pcs'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'NG_app',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'NinjaGold.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'NinjaGold.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
|
[
"[email protected]"
] | |
91ef1503ce75661dbbe6b7d791eda966a31b1c1d
|
81eabe15995a6426b285b2312b73c0bde7bb61bc
|
/paleomix/tools/zonkey/common.py
|
81ad379b116d4e6692319c1a2c4afc9f055ff3ca
|
[] |
no_license
|
fvangef/paleomix
|
3a732d8cd99177809b25bd09dde6efd261b10cad
|
826fb866ae9c26cb7b49fc6a96fb618a3daaffcc
|
refs/heads/master
| 2020-04-15T22:05:02.249220 | 2018-11-05T19:56:49 | 2018-11-05T19:56:49 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,897 |
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2016 Mikkel Schubert <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import collections
import paleomix.yaml
import paleomix.common.versions as versions
# Format number for database file; is incremented when the format is changed.
# The 'revision' field specifies updates to the table that do not change the
# format of the database (see below).
_SUPPORTED_DB_FORMAT = 1
RSCRIPT_VERSION = versions.Requirement(call=("Rscript", "--version"),
search="version (\d+)\.(\d+)\.(\d+)",
checks=versions.GE(3, 0, 0),
priority=10)
class DBFileError(RuntimeError):
pass
def get_sample_names(handle):
samples = []
for readgroup in handle.header.get("RG", ()):
if "SM" in readgroup:
samples.append(readgroup["SM"])
return frozenset(samples)
def contig_name_to_plink_name(chrom):
"""Converts chromosome / contig name to the values expected by 'plink',
namely a digit or X/Y, or returns None if the chromosome could not be
identified.
"""
if chrom.isdigit():
return chrom.upper
elif chrom.upper() in "XY":
return chrom.upper()
elif chrom.lower().startswith("chr") and chrom[3:].isdigit():
return chrom[3:]
elif chrom.lower() in ("chrx", "chry"):
return chrom[3].upper()
else:
return None
def read_summary(filename, default="[MISSING VALUE!]"):
results = collections.defaultdict(lambda: default)
with open(filename) as makefile:
string = makefile.read()
data = paleomix.yaml.safe_load(string)
if not isinstance(data, dict):
raise DBFileError('Summary file does not contain dictionary')
results.update(data)
return results
|
[
"[email protected]"
] | |
c35247face031fdcf18da283072975cf5773b968
|
64a80df5e23b195eaba7b15ce207743e2018b16c
|
/Downloads/adafruit-circuitpython-bundle-py-20201107/lib/adafruit_pybadger/pybadge.py
|
6c341d8678773b63b175827e4b779cc10fcfcc22
|
[] |
no_license
|
aferlazzo/messageBoard
|
8fb69aad3cd7816d4ed80da92eac8aa2e25572f5
|
f9dd4dcc8663c9c658ec76b2060780e0da87533d
|
refs/heads/main
| 2023-01-27T20:02:52.628508 | 2020-12-07T00:37:17 | 2020-12-07T00:37:17 | 318,548,075 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,899 |
py
|
# The MIT License (MIT)
#
# Copyright (c) 2020 Kattni Rembor for Adafruit Industries
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
`adafruit_pybadger.pybadge`
================================================================================
Badge-focused CircuitPython helper library for PyBadge, PyBadge LC and EdgeBadge.
All three boards are included in this module as there is no difference in the
CircuitPython builds at this time, and therefore no way to differentiate
the boards from within CircuitPython.
* Author(s): Kattni Rembor
Implementation Notes
--------------------
**Hardware:**
* `Adafruit PyBadge <https://www.adafruit.com/product/4200>`_
* `Adafruit PyBadge LC <https://www.adafruit.com/product/3939>`_
* `Adafruit EdgeBadge <https://www.adafruit.com/product/4400>`_
**Software and Dependencies:**
* Adafruit CircuitPython firmware for the supported boards:
https://github.com/adafruit/circuitpython/releases
"""
from collections import namedtuple
import board
import digitalio
import analogio
import audioio
from gamepadshift import GamePadShift
import adafruit_lis3dh
import neopixel
from adafruit_pybadger.pybadger_base import PyBadgerBase
__version__ = "3.1.1"
__repo__ = "https://github.com/adafruit/Adafruit_CircuitPython_PyBadger.git"
Buttons = namedtuple("Buttons", "b a start select right down up left")
class PyBadge(PyBadgerBase):
"""Class that represents a single PyBadge, PyBadge LC, or EdgeBadge."""
_audio_out = audioio.AudioOut
_neopixel_count = 5
def __init__(self):
super().__init__()
i2c = None
if i2c is None:
try:
i2c = board.I2C()
except RuntimeError:
self._accelerometer = None
if i2c is not None:
int1 = digitalio.DigitalInOut(board.ACCELEROMETER_INTERRUPT)
try:
self._accelerometer = adafruit_lis3dh.LIS3DH_I2C(
i2c, address=0x19, int1=int1
)
except ValueError:
self._accelerometer = adafruit_lis3dh.LIS3DH_I2C(i2c, int1=int1)
# NeoPixels
self._neopixels = neopixel.NeoPixel(
board.NEOPIXEL, self._neopixel_count, brightness=1, pixel_order=neopixel.GRB
)
self._buttons = GamePadShift(
digitalio.DigitalInOut(board.BUTTON_CLOCK),
digitalio.DigitalInOut(board.BUTTON_OUT),
digitalio.DigitalInOut(board.BUTTON_LATCH),
)
self._light_sensor = analogio.AnalogIn(board.A7)
@property
def button(self):
"""The buttons on the board.
Example use:
.. code-block:: python
from adafruit_pybadger import pybadger
while True:
if pybadger.button.a:
print("Button A")
elif pybadger.button.b:
print("Button B")
elif pybadger.button.start:
print("Button start")
elif pybadger.button.select:
print("Button select")
"""
button_values = self._buttons.get_pressed()
return Buttons(
*[
button_values & button
for button in (
PyBadgerBase.BUTTON_B,
PyBadgerBase.BUTTON_A,
PyBadgerBase.BUTTON_START,
PyBadgerBase.BUTTON_SELECT,
PyBadgerBase.BUTTON_RIGHT,
PyBadgerBase.BUTTON_DOWN,
PyBadgerBase.BUTTON_UP,
PyBadgerBase.BUTTON_LEFT,
)
]
)
pybadge = PyBadge() # pylint: disable=invalid-name
"""Object that is automatically created on import."""
|
[
"[email protected]"
] | |
32ea31849e6bd4ef0acd560f4be8b565f98587d3
|
f0b5238cf64ca46dafd8aab484278dd40feffa4d
|
/insta/migrations/0008_image_profile.py
|
58a792198142bdfb3043e57e53faa92eb2d84078
|
[
"MIT"
] |
permissive
|
niklauspeter/instagram
|
0e7ef612b4bd1301b8b1c146a281a645d5940f49
|
303e26f88d3cdcc9a7a8a05d41a6fa21bf91737e
|
refs/heads/master
| 2021-09-09T14:44:48.293670 | 2019-05-23T15:56:49 | 2019-05-23T15:56:49 | 187,219,168 | 0 | 0 | null | 2021-09-08T01:00:34 | 2019-05-17T13:14:56 |
Python
|
UTF-8
|
Python
| false | false | 548 |
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2019-05-23 06:56
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('insta', '0007_remove_image_profile_photo'),
]
operations = [
migrations.AddField(
model_name='image',
name='profile',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='insta.Profile'),
),
]
|
[
"[email protected]"
] | |
d3d1ba274df3c9e32d65b77b40f7b3b416ade480
|
ebcc3f199a4dc7763bb4984fc8a910d015b0c5d0
|
/dht_temperature.py
|
9e9f4a9f3703f52d6e30aab16700eaefb40ef65a
|
[
"MIT"
] |
permissive
|
BurntTech/homie4
|
31aba5be338cee46ce2dad6483821cd837aa6704
|
577bdb413778865d3be03e0149e1773b5d312d51
|
refs/heads/master
| 2021-07-13T12:12:48.528194 | 2021-02-03T19:02:41 | 2021-02-03T19:02:41 | 233,911,796 | 1 | 0 |
MIT
| 2020-01-14T18:48:18 | 2020-01-14T18:48:17 | null |
UTF-8
|
Python
| false | false | 634 |
py
|
# Raspberry PI
import Adafruit_DHT
import time
from homie.device_temperature import Device_Temperature
mqtt_settings = {
'MQTT_BROKER' : 'OpenHAB',
'MQTT_PORT' : 1883,
}
try:
temperature_device = Device_Temperature(device_id="temperature-sensor-1",name = "Temperature_Sensor 1",mqtt_settings=mqtt_settings)
sensor = Adafruit_DHT.AM2302
pin = 4
while True:
humidity, temperature = Adafruit_DHT.read_retry(sensor, pin)
print(temperature)
temperature_device.update_temperature(temperature)
time.sleep(5)
except (KeyboardInterrupt, SystemExit):
print("Quitting.")
|
[
"[email protected]"
] | |
73d12c61155fbb679cf6f632c756bc0889002274
|
c2f92d75d235ff5ed7b213c02c4a0657545ba02f
|
/newchama_web/2/newchama/tools/test_mq1.py
|
dd2c58bbbefb696a43f1e8523ee83b7da1bbcca3
|
[] |
no_license
|
cash2one/tstpthon
|
fab6112691eb15a8a26bd168af3f179913e0c4e0
|
fc5c42c024065c7b42bea2b9de1e3874a794a30d
|
refs/heads/master
| 2021-01-20T01:52:06.519021 | 2017-04-14T09:50:55 | 2017-04-14T09:50:55 | 89,338,193 | 0 | 1 | null | 2017-04-25T08:46:06 | 2017-04-25T08:46:06 | null |
UTF-8
|
Python
| false | false | 847 |
py
|
#encoding:utf-8
import os,sys
sys.path.append(os.path.abspath('../'))
sys.path.append(os.path.abspath('/var/www/newchama'))
import newchama.settings
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "newchama.settings")
import pika
import pickle
from django.template import loader, Context
connection = pika.BlockingConnection(pika.ConnectionParameters(
'localhost'))
channel = connection.channel()
channel.queue_declare(queue='email')
email="[email protected]"
mail_dic = dict()
mail_dic['email'] = email
mail_dic['name'] = 'richard'
html_content = loader.render_to_string('tools/update_mail.html', mail_dic)
c={}
c['title']=u'NewChama用户通知'
c['email']=email
c['content']=html_content
channel.basic_publish(exchange='', routing_key='email', body=pickle.dumps(c))
print " [x] Sent 'Hello World!'"
connection.close()
|
[
"[email protected]"
] | |
40e044e81e637b03ed8ab1ee0a0bc10e3b4661f4
|
bc167f434158921bcf2c678155c5cdfec1c9b0c9
|
/PI_code/simulator/behaviourGeneration/firstGenScripts_preyHunter/behav372.py
|
4181e4a1f1456cec22542057f7e400034e38635a
|
[] |
no_license
|
s0217391/DifferentProjects
|
6450efc89c64ecd21b86c705737e89e5c69433a6
|
7f4da153660817b6cbf72d2e823aa29c0c2f95a9
|
refs/heads/master
| 2021-01-17T02:58:46.219240 | 2015-05-26T22:45:46 | 2015-05-26T22:45:46 | 34,995,164 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 539 |
py
|
#!/usr/bin/python
import sys
def compute(prey):
temp0 = min(prey[1], prey[0])
temp1 = -1 * prey[0]
if temp0 != 0:
temp1 = prey[0] / temp0
else:
temp1 = temp0
temp0 = temp0 - prey[1]
if temp1 > temp0:
if prey[0] > prey[1]:
if prey[1] != 0:
temp0 = temp0 % prey[1]
else:
temp0 = prey[1]
else:
temp0 = max(prey[1], temp0)
else:
temp0 = prey[0] * prey[1]
temp0 = temp1 + prey[1]
temp1 = -1 * temp1
temp0 = min(prey[1], prey[0])
temp0 = max(prey[1], prey[1])
temp0 = temp0 + temp0
return [temp0, temp0]
|
[
"[email protected]"
] | |
b9533ae22c6a70939b28441379420cc7a1b533ae
|
e98e7b45d85273797cf9f15e92fbe685a05bde18
|
/词条导出/zhidao_cron.py
|
19c1cf5746d7e05af4e57c436af8f87088d3a9f0
|
[] |
no_license
|
wangdexinpython/test
|
8d29d30e099f64f831b51265db7092d520df253c
|
a047148409e31b8a8140f2c13b959aa54ec14d0d
|
refs/heads/master
| 2020-09-11T05:10:49.041795 | 2019-12-31T07:47:41 | 2019-12-31T07:47:41 | 221,948,822 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,004 |
py
|
#coding=utf-8
import pymongo,time,requests,json
import urllib.parse
import redis,pexpect,os
class zhidao(object):
def __init__():
mongo=mongodb()
mon_app=app_mongo()
def mongodb():
mongo = pymongo.MongoClient(
"mongodb://xhql:" + urllib.parse.quote_plus("xhql_190228_snv738J72*fjVNv8220aiVK9V820@_")+"@172.26.26.132:20388/webpage")['webpage']
return mongo
def app_mongo():
mon = pymongo.MongoClient("mongodb://integrate:" + urllib.parse.quote_plus(
"integ_190228_snv738v8220aiVK9V820@_eate") + "@172.26.26.132:20388/integrate")
return mon
def Baike():
webnum = mongo.zhidao_details.find({'state_qiu':0,'source':'baiduzhidao'}).count()
print(webnum)
if webnum>0:
filetime = time.strftime("%Y%m%d", time.localtime())
filename = 'inc_zhidao_{}.dat'.format(filetime)
# filename = 'inc_zhidao_20190527.dat'
f = open(r'/mnt/data/liqiu/zhidao/{}'.format(filename),'a',encoding='utf-8')
for i in range(0,webnum,10000):
print('*****************************************',i)
# filetime = time.strftime("%Y%m%d_%H%M%S", time.localtime())
# filename = 'full_{}.dat'.format(filetime)
# f = open(r'/mnt/data/liqiu/{}'.format(filename),'a',encoding='utf-8')
zds = mongo.zhidao_details.find({'state_qiu':0,'source':'baiduzhidao'}).limit(10000).skip(i)
for one in zds:
try:
liqiu_dict = {'id':str(one['id']),'link':str(one['id']),'title':str(one['title']),'author':str(one['author']),'content':str(one['content_np']),'site_name':str(one['site_name']),'article_url':str(one['article_url']),'crawl_time':str(one['crawl_time']),'source':str(one['source']),'topic':'','flag':'0'}
if one.get('type',[]) and isinstance(one['type'],list):
liqiu_dict['type']=' '.join(one['type'])
elif one.get('type','') and isinstance(one['type'],str):
liqiu_dict['type']= one['type']
else:
liqiu_dict['type']=''
if one.get('label',[]) and isinstance(one['label'],list):
liqiu_dict['label']=' '.join(one['label'])
elif one.get('label',"") and isinstance(one['label'],str):
liqiu_dict['label']= one['label']
else:
liqiu_dict['label']=''
# if len(liqiu_dict)==0:
# continue
cons = liqiu_dict['content']
url = 'http://172.26.26.135:8995/topic?content={}'.format(cons)
ai = requests.get(url).text
print(ai)
if ai == 'AI':
ai = 'ai'
else:
ai = ''
liqiu_dict['topic'] = ai
read_dat(liqiu_dict)
f.write('{}\n'.format(json.dumps(liqiu_dict,ensure_ascii=False)))
s1={'id':one['id']}
s2 = {'$set':{'state_qiu':1}}
mongo.zhidao_details.update(s1,s2)
except KeyError as e:
print('异常')
print('---------------------------',e)
# continue
# f.write('{}\n'.format(json.dumps(liqiu_dict,ensure_ascii=False)))
def read_dat(,line):
if line['topic'] == 'ai':
dict_1 = {'id': line['id'], 'content': line['content'], 'crawl_time': line['crawl_time'],
'title': line['title'], 'source': line['source'], 'topic': line['topic'], 'type': line['type'],
'url': line['article_url']}
try:
dict_1['label'] = line['label']
except:
dict_1['label'] = ''
# print(dict_1)
mon_app.integrate.data_dat.update({'id': dict_1['id']}, dict_1, True)
def copy_data():
fileti = time.strftime("%H%M%S", time.localtime())
if int(fileti) > 230000:
# 判断文件是否为空
filetime = time.strftime("%Y%m%d", time.localtime())
filename = 'inc_zhidao_{}.dat'.format(filetime)
file2 = '/mnt/data/liqiu/zhidao/{}'.format(filename)
if os.path.getsize('{}'.format(file2)):
# 将写好的文件scp到指定文件夹下
cmd = "scp -r {} [email protected]:/home/search/ytt/search1/raw_data/src_data/".format(file2)
pexpect.run(cmd)
else:
pass
def run():
Baike()
copy_data()
if __name__ == '__main__':
zhi=zhidao()
zhi.run()
|
[
"[email protected]"
] | |
c69c64d15e9879c0c3e8bb12dc4086d660d80025
|
601443d21d3c9f9121e6aec76e2ad012ec4a7817
|
/arekit/contrib/utils/pipelines/text_opinion/filters/distance_based.py
|
ba536accb1dcf557dfb01ce0bdd5f75bd5db3952
|
[
"MIT"
] |
permissive
|
nicolay-r/AREkit
|
27421472ca296671a6da69a94c1070a0b5a33451
|
1e1d354654f4f0a72090504663cc6d218f6aaf4a
|
refs/heads/master
| 2023-08-29T13:30:26.511617 | 2023-08-13T20:11:43 | 2023-08-13T20:11:43 | 225,708,027 | 54 | 4 |
MIT
| 2023-01-18T13:17:01 | 2019-12-03T20:20:46 |
Python
|
UTF-8
|
Python
| false | false | 650 |
py
|
from arekit.common.data.input.sample import InputSampleBase
from arekit.contrib.utils.pipelines.text_opinion.filters.base import TextOpinionFilter
class DistanceLimitedTextOpinionFilter(TextOpinionFilter):
def __init__(self, terms_per_context):
super(DistanceLimitedTextOpinionFilter, self).__init__()
self.__terms_per_context = terms_per_context
def filter(self, text_opinion, parsed_news, entity_service_provider):
return InputSampleBase.check_ability_to_create_sample(
entity_service=entity_service_provider,
text_opinion=text_opinion,
window_size=self.__terms_per_context)
|
[
"[email protected]"
] | |
81d2d43d971b207b2dd0bcc44c97e8f6c0f921da
|
7f04fbc897ff52e4d27cc2f27ae6dfbabe43dfe0
|
/cellml/pmr2/tests/base.py
|
9100a7a3ffe800da9cdfd74b377716fd6c6545ab
|
[] |
no_license
|
metatoaster/cellml.pmr2
|
daae69721af04f7a28eae496dcbeb13b98e2d4d0
|
cbfe212effe325350b1e7087e6172952483b981f
|
refs/heads/master
| 2021-06-21T20:56:30.267128 | 2016-03-08T05:45:53 | 2016-03-08T05:45:53 | 2,396,487 | 1 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,105 |
py
|
import unittest
import doctest
from zope.component import testing
from Testing import ZopeTestCase as ztc
from Products.PloneTestCase import PloneTestCase as ptc
from Products.PloneTestCase.layer import onsetup
from Products.PloneTestCase.layer import onteardown
from Products.Five import fiveconfigure
from Zope2.App import zcml
import pmr2.app
from pmr2.testing.base import TestRequest
from pmr2.app.exposure.content import ExposureContainer
from pmr2.app.exposure.browser.browser import ExposureAddForm
from pmr2.app.exposure.browser.browser import ExposureFileGenForm
from pmr2.app.exposure.tests.base import ExposureDocTestCase
from pmr2.app.exposure.tests.base import ExposureExtendedDocTestCase
@onsetup
def setup():
import pmr2.app
import cellml.pmr2
fiveconfigure.debug_mode = True
# XXX dependant on pmr2.app still
zcml.load_config('configure.zcml', cellml.pmr2)
zcml.load_config('test.zcml', cellml.pmr2.tests)
fiveconfigure.debug_mode = False
ztc.installPackage('cellml.pmr2')
@onteardown
def teardown():
pass
setup()
teardown()
ptc.setupPloneSite(products=('cellml.pmr2',))
class CellMLDocTestCase(ExposureExtendedDocTestCase):
def setUp(self):
super(CellMLDocTestCase, self).setUp()
import cellml.pmr2
rev = u'2'
request = TestRequest(
form={
'form.widgets.workspace': u'rdfmodel',
'form.widgets.commit_id': rev,
'form.buttons.add': 1,
})
testform = ExposureAddForm(self.portal.exposure, request)
testform.update()
exp_id = testform._data['id']
context = self.portal.exposure[exp_id]
self.exposure1 = context
rdfmodel = self.portal.workspace.rdfmodel
self.file1 = u'example_model.cellml'
request = TestRequest(
form={
'form.widgets.filename': [self.file1],
'form.buttons.add': 1,
})
testform = ExposureFileGenForm(context, request)
testform.update()
self.exposure_file1 = context[self.file1]
|
[
"[email protected]"
] | |
33ca2cbec3283c60f3f48ff39bcc8624ecb5d8f8
|
a86bd96433a98e2311447a1923a400470d231f7e
|
/almanac/style/highlight.py
|
93bc92ffea0c08e9b9383963588506d9d14bda0a
|
[
"MIT",
"BSD-3-Clause"
] |
permissive
|
welchbj/almanac
|
3e0d1e8282ec00ad17854536526cf253b331a201
|
7ba473ef07173e0f017dd151e7ca425ba149b8fe
|
refs/heads/main
| 2022-12-18T12:51:53.039850 | 2022-07-06T01:25:03 | 2022-07-06T01:25:03 | 193,141,053 | 5 | 2 |
MIT
| 2022-12-08T14:28:58 | 2019-06-21T18:07:22 |
Python
|
UTF-8
|
Python
| false | false | 1,025 |
py
|
from typing import Optional, Type
from pygments import highlight
from pygments.formatter import Formatter
from pygments.formatters import TerminalFormatter
from pygments.lexers import get_lexer_for_mimetype
from pygments.util import ClassNotFound
def highlight_for_mimetype(
text: str,
mimetype: str,
*,
fallback_mimetype: Optional[str] = 'text/plain',
formatter_cls: Type[Formatter] = TerminalFormatter
) -> str:
"""Return ANSI-escaped highlighted text, as per the .
If ``mimetype`` cannot be resolved, then ``fallback_mimetype`` will be used.
If that cannot be resolved (or is ``None``), then the pygments ``ClassNotFound``
exception will be raised.
"""
try:
lexer = get_lexer_for_mimetype(mimetype)
except ClassNotFound as e:
if fallback_mimetype is not None:
lexer = get_lexer_for_mimetype(fallback_mimetype)
else:
raise e
highlighted_text: str = highlight(text, lexer, formatter_cls())
return highlighted_text
|
[
"[email protected]"
] | |
58d1f9cf803febc2a58fb26e573063434eae588c
|
caaf9046de59559bb92641c46bb8ab00f731cb46
|
/Configuration/Generator/python/JpsiMM_Pt_20_inf_8TeV_TuneCUETP8M1_cfi.py
|
3d826f915126679c530acffd43c4e184f6851393
|
[] |
no_license
|
neumeist/cmssw
|
7e26ad4a8f96c907c7373291eb8df205055f47f0
|
a7061201efe9bc5fa3a69069db037d572eb3f235
|
refs/heads/CMSSW_7_4_X
| 2020-05-01T06:10:08.692078 | 2015-01-11T22:57:32 | 2015-01-11T22:57:32 | 29,109,257 | 1 | 1 | null | 2015-01-11T22:56:51 | 2015-01-11T22:56:49 | null |
UTF-8
|
Python
| false | false | 2,939 |
py
|
import FWCore.ParameterSet.Config as cms
from Configuration.Generator.Pythia8CommonSettings_cfi import *
from Configuration.Generator.Pythia8CUEP8M1Settings_cfi import *
source = cms.Source("EmptySource")
generator = cms.EDFilter("Pythia8GeneratorFilter",
pythiaPylistVerbosity = cms.untracked.int32(0),
filterEfficiency = cms.untracked.double(0.0154),
pythiaHepMCVerbosity = cms.untracked.bool(False),
crossSection = cms.untracked.double(354400000.0),
comEnergy = cms.double(8000.0),
maxEventsToPrint = cms.untracked.int32(0),
PythiaParameters = cms.PSet(
pythia8CommonSettingsBlock,
pythia8CUEP8M1SettingsBlock,
processParameters = cms.vstring(
'Charmonium:states(3S1) = 443', # filter on 443 and prevents other onium states decaying to 443, so we should turn the others off
'Charmonium:O(3S1)[3S1(1)] = 1.16',
'Charmonium:O(3S1)[3S1(8)] = 0.0119',
'Charmonium:O(3S1)[1S0(8)] = 0.01',
'Charmonium:O(3S1)[3P0(8)] = 0.01',
'Charmonium:gg2ccbar(3S1)[3S1(1)]g = on',
'Charmonium:gg2ccbar(3S1)[3S1(8)]g = on',
'Charmonium:qg2ccbar(3S1)[3S1(8)]q = on',
'Charmonium:qqbar2ccbar(3S1)[3S1(8)]g = on',
'Charmonium:gg2ccbar(3S1)[1S0(8)]g = on',
'Charmonium:qg2ccbar(3S1)[1S0(8)]q = on',
'Charmonium:qqbar2ccbar(3S1)[1S0(8)]g = on',
'Charmonium:gg2ccbar(3S1)[3PJ(8)]g = on',
'Charmonium:qg2ccbar(3S1)[3PJ(8)]q = on',
'Charmonium:qqbar2ccbar(3S1)[3PJ(8)]g = on',
'443:onMode = off', # ignore cross-section re-weighting (CSAMODE=6) since selecting wanted decay mode
'443:onIfAny = 13',
'PhaseSpace:pTHatMin = 20.',
),
parameterSets = cms.vstring('pythia8CommonSettings',
'pythia8CUEP8M1Settings',
'processParameters',
)
)
)
oniafilter = cms.EDFilter("PythiaFilter",
Status = cms.untracked.int32(2),
MaxEta = cms.untracked.double(1000.0),
MinEta = cms.untracked.double(-1000.0),
MinPt = cms.untracked.double(0.0),
ParticleID = cms.untracked.int32(443)
)
mumugenfilter = cms.EDFilter("MCParticlePairFilter",
Status = cms.untracked.vint32(1, 1),
MinPt = cms.untracked.vdouble(0.5, 0.5),
MinP = cms.untracked.vdouble(2.7, 2.7),
MaxEta = cms.untracked.vdouble(2.5, 2.5),
MinEta = cms.untracked.vdouble(-2.5, -2.5),
ParticleCharge = cms.untracked.int32(-1),
ParticleID1 = cms.untracked.vint32(13),
ParticleID2 = cms.untracked.vint32(13)
)
ProductionFilterSequence = cms.Sequence(generator*oniafilter*mumugenfilter)
|
[
"[email protected]"
] | |
614afacb7d88abe1697191ba3dc5fea6cdce83ef
|
a520eb3a99c0e17760cb185b61da2c5e8ae36bed
|
/apps/users/tests.py
|
0e48b5040d17cd508e8ea78902476af196085d14
|
[] |
no_license
|
zhuoxiaojian/yishengAnalyze
|
9cd4b984a4c90d23d6e2d324def187b88d5b737b
|
18d2afad78f8cf3a734d41d835e7caf7635fca47
|
refs/heads/master
| 2022-12-10T21:30:25.176482 | 2019-01-19T08:55:46 | 2019-01-19T08:55:46 | 153,866,303 | 1 | 1 | null | 2022-12-09T05:32:09 | 2018-10-20T03:32:46 |
JavaScript
|
UTF-8
|
Python
| false | false | 133 |
py
|
from django.test import TestCase
# Create your tests here.
from users.tasks import test
if __name__ == '__main__':
test.delay()
|
[
"[email protected]"
] | |
3a19c9c5be00b701cdd309ad99d37a8fd77a6021
|
cd257631f442d24d2e4902cfb60d05095e7c49ad
|
/week-02/day-01/average_of_input.py
|
d18279b22f7452cd634a2164b12f176064e3c4ef
|
[] |
no_license
|
green-fox-academy/Chiflado
|
62e6fc1244f4b4f2169555af625b6bfdda41a975
|
008893c63a97f4c28ff63cab269b4895ed9b8cf1
|
refs/heads/master
| 2021-09-04T03:25:25.656921 | 2018-01-15T09:02:47 | 2018-01-15T09:02:47 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 296 |
py
|
# Write a program that asks for 5 integers in a row,
# then it should print the sum and the average of these numbers like:
#
# Sum: 22, Average: 4.4
number = 0
for i in range(0, 5):
number += int(input('Give me a number: '))
print('Sum: ' + str(number) + ' Average: ' + str(number/(i + 1)))
|
[
"[email protected]"
] | |
1210e7360134b655175e57ae56324fe180e8c0be
|
c6320735f140944d2c282729c008a7cf7cf1e98f
|
/docs/samples/explanation/income/train.py
|
1f390f64d00d252386861f2eb8e6c0452dd63fec
|
[
"Apache-2.0"
] |
permissive
|
gipster/kfserving
|
66d2dffd8917ba9029ca2e96f199e1f56df6e41b
|
bbd3da47a708403fb2a203e28955d5454bc2a1d5
|
refs/heads/master
| 2020-06-10T18:43:57.148347 | 2019-08-19T00:24:03 | 2019-08-19T00:24:03 | 193,709,786 | 0 | 0 |
Apache-2.0
| 2019-06-25T13:08:50 | 2019-06-25T13:08:49 | null |
UTF-8
|
Python
| false | false | 2,400 |
py
|
import numpy as np
from sklearn.ensemble import RandomForestClassifier
from sklearn.compose import ColumnTransformer
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import StandardScaler, OneHotEncoder
from alibi.datasets import adult
import joblib
import dill
from sklearn.pipeline import Pipeline
import alibi
# load data
data, labels, feature_names, category_map = adult()
# define train and test set
np.random.seed(0)
data_perm = np.random.permutation(np.c_[data, labels])
data = data_perm[:, :-1]
labels = data_perm[:, -1]
idx = 30000
X_train, Y_train = data[:idx, :], labels[:idx]
X_test, Y_test = data[idx + 1:, :], labels[idx + 1:]
# feature transformation pipeline
ordinal_features = [x for x in range(len(feature_names)) if x not in list(category_map.keys())]
ordinal_transformer = Pipeline(steps=[('imputer', SimpleImputer(strategy='median')),
('scaler', StandardScaler())])
categorical_features = list(category_map.keys())
categorical_transformer = Pipeline(steps=[('imputer', SimpleImputer(strategy='median')),
('onehot', OneHotEncoder(handle_unknown='ignore'))])
preprocessor = ColumnTransformer(transformers=[('num', ordinal_transformer, ordinal_features),
('cat', categorical_transformer, categorical_features)])
# train an RF model
print("Train random forest model")
np.random.seed(0)
clf = RandomForestClassifier(n_estimators=50)
pipeline = Pipeline([('preprocessor', preprocessor),
('clf', clf)])
pipeline.fit(X_train, Y_train)
print("Creating an explainer")
predict_fn = lambda x: clf.predict(preprocessor.transform(x))
explainer = alibi.explainers.AnchorTabular(predict_fn=predict_fn,
feature_names=feature_names,
categorical_names=category_map)
explainer.fit(X_train)
explainer.predict_fn = None # Clear explainer predict_fn as its a lambda and will be reset when loaded
with open("explainer.dill", 'wb') as f:
dill.dump(explainer,f)
print("Saving individual files")
# Dump files - for testing creating an AnchorExplainer from components
joblib.dump(pipeline, 'model.joblib')
joblib.dump(X_train, "train.joblib")
joblib.dump(feature_names, "features.joblib")
joblib.dump(category_map, "category_map.joblib")
|
[
"[email protected]"
] | |
c56f4e4fb4ccc6672145c456c1c1d50ffbfd1d54
|
eb91c2d2560a3e4ce35ebc2d6550f001579c03c5
|
/codeforces/1353/B.py
|
9f295a1f5fb9c4df31d57960b9fb7930b9a23708
|
[] |
no_license
|
kamojiro/codeforces
|
0a3a40c8cab96a0257bb9d6ed53de217192cbabb
|
9e66297fa3025ba6731111ab855096d579e86c67
|
refs/heads/master
| 2021-07-07T11:47:42.373189 | 2020-08-15T14:45:36 | 2020-08-15T14:45:36 | 176,296,160 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 522 |
py
|
#import sys
#input = sys.stdin.readline
def main():
N = int( input())
for _ in range(N):
n, k = map( int, input().split())
A = list( map( int, input().split()))
B = list( map( int, input().split()))
A.sort()
B.sort(reverse=True)
ans = 0
for i in range(n):
if i+1 <= k:
if A[i] < B[i]:
ans += B[i]
continue
ans += A[i]
print( ans)
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
d8d125160792a97e1c2c5c39a0c928f1655589b2
|
250124d214f6834230314dfee4a5dd03713953a2
|
/part-2/2-iterators/9-Iterating_callables.py
|
0dcb235c2e78a05bf787172829de56522d7aafc5
|
[
"MIT"
] |
permissive
|
boconlonton/python-deep-dive
|
3b26b913d1f6f2fdf451a8bc4f24a24d1bb85a64
|
c01591a4943c7b77d4d2cd90a8b23423280367a3
|
refs/heads/master
| 2023-08-30T21:20:12.240813 | 2021-09-29T22:21:26 | 2021-09-29T22:21:26 | 365,419,435 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,347 |
py
|
"""Iterating callables"""
import random
def counter():
i = 0
def inc():
nonlocal i
i += 1
return i
return inc
class CallableIterator:
def __init__(self, callable_, sentinel):
self.callable = callable_
self.sentinel = sentinel
self.is_consumed = False
def __iter__(self):
return self
def __next__(self):
if self.is_consumed:
# Exhaust the callable after consumed
raise StopIteration
else:
result = self.callable()
if result == self.sentinel:
self.is_consumed = True
raise StopIteration
else:
return result
# Usage
cnt = counter()
cnt_iter = CallableIterator(cnt, 5)
for c in cnt_iter:
print(c)
# Usage with iter()
cnt = counter()
cnt_iter = iter(cnt, 5)
for c in cnt_iter:
print(c)
# Create an iterator for random function
# which will stop when meet sentinel
random_iter = iter(lambda:
random.randint(0, 10), 8)
random.seed(0)
for num in random_iter:
print(num)
def countdown(start=10):
def run():
nonlocal start
start -= 1
return start
return run
print('---------')
takeoff = countdown(10)
takeoff_iter = iter(takeoff, -1)
for num in takeoff_iter:
print(num)
|
[
"[email protected]"
] | |
2b612f6eea0c6ac37a27d2d8fb6083285ff16073
|
19bc4d44dc7303e23a6949b1bc7b98b65bcf80e9
|
/python/Linear Regression in Python/Simple Linear Regression/Linear Regression at Codecademy/script.py
|
661d035628a95c8b252a74e85e4a4024c02fe7a9
|
[] |
no_license
|
henry1034/Challenge-Project-of-CodeCademy
|
c66190ff3a318e22f263fcf78344632773065c24
|
61ebe84696cec120393acca62b4fce4bdea0fb30
|
refs/heads/master
| 2023-07-04T01:04:16.978374 | 2021-07-29T17:27:56 | 2021-07-29T17:27:56 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,583 |
py
|
# Load libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import statsmodels.api as sm
import codecademylib3
# Read in the data
codecademy = pd.read_csv('codecademy.csv')
# Print the first five rows
print(codecademy.head())
# Create a scatter plot of score vs completed
plt.scatter(codecademy.completed, codecademy.score)
# Show then clear plot
plt.show()
plt.clf()
# Fit a linear regression to predict score based on prior lessons completed
model = sm.OLS.from_formula(
"score ~ completed",
data = codecademy
)
result = model.fit()
print(result.params)
# Intercept interpretation:
print("A learner who has previously completed 0 content items is expected to earn a quiz score of 13.2 points.")
# Slope interpretation:
print("Students who have completed one additional prior content item are expected to score 1.3 points higher on the quiz.")
# Plot the scatter plot with the line on top
plt.scatter(codecademy.completed, codecademy.score)
plt.plot(codecademy.completed, result.predict(codecademy))
# Show then clear plot
plt.show()
plt.clf()
# Predict score for learner who has completed 20 prior lessons
print(result.predict({'completed':[20]}))
intercept = result.params[0]
slope = result.params[1]
print(slope * 20 + intercept)
# Calculate fitted values
fitted_values = result.predict(codecademy)
# Calculate residuals
residuals = codecademy.score - fitted_values
# Check normality assumption
plt.hist(residuals)
# Show then clear the plot
plt.show()
plt.clf()
# Check homoscedasticity assumption
plt.scatter(fitted_values, residuals)
# Show then clear the plot
plt.show()
plt.clf()
# Create a boxplot of score vs lesson
sns.boxplot(
data = codecademy,
x = "lesson",
y = "score"
)
# Show then clear plot
plt.show()
plt.clf()
# Fit a linear regression to predict score based on which lesson they took
model = sm.OLS.from_formula(
"score ~ lesson",
data = codecademy
)
result = model.fit()
print(result.params)
# Calculate and print the group means and mean difference (for comparison)
mean_score_lessonA = np.mean(codecademy.score[codecademy.lesson == 'Lesson A'])
mean_score_lessonB = np.mean(codecademy.score[codecademy.lesson == 'Lesson B'])
print('Mean score (A): ', mean_score_lessonA)
print('Mean score (B): ', mean_score_lessonB)
print('Mean score difference: ', mean_score_lessonA - mean_score_lessonB)
# Use `sns.lmplot()` to plot `score` vs. `completed` colored by `lesson`
sns.lmplot(
x = "completed",
y = "score",
hue = "lesson",
data = codecademy
)
plt.show()
plt.clf()
|
[
"[email protected]"
] | |
950e9fce4dcbd3c0bc732cdc70d82b7bb4f0e7c3
|
5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d
|
/alipay/aop/api/domain/AlipayIserviceIsresourceTenantquerybytntidQueryModel.py
|
bf348d94e07635b10d4f588191dab57c1660c589
|
[
"Apache-2.0"
] |
permissive
|
alipay/alipay-sdk-python-all
|
8bd20882852ffeb70a6e929038bf88ff1d1eff1c
|
1fad300587c9e7e099747305ba9077d4cd7afde9
|
refs/heads/master
| 2023-08-27T21:35:01.778771 | 2023-08-23T07:12:26 | 2023-08-23T07:12:26 | 133,338,689 | 247 | 70 |
Apache-2.0
| 2023-04-25T04:54:02 | 2018-05-14T09:40:54 |
Python
|
UTF-8
|
Python
| false | false | 1,400 |
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayIserviceIsresourceTenantquerybytntidQueryModel(object):
def __init__(self):
self._tnt_inst_id = None
self._ur_id = None
@property
def tnt_inst_id(self):
return self._tnt_inst_id
@tnt_inst_id.setter
def tnt_inst_id(self, value):
self._tnt_inst_id = value
@property
def ur_id(self):
return self._ur_id
@ur_id.setter
def ur_id(self, value):
self._ur_id = value
def to_alipay_dict(self):
params = dict()
if self.tnt_inst_id:
if hasattr(self.tnt_inst_id, 'to_alipay_dict'):
params['tnt_inst_id'] = self.tnt_inst_id.to_alipay_dict()
else:
params['tnt_inst_id'] = self.tnt_inst_id
if self.ur_id:
if hasattr(self.ur_id, 'to_alipay_dict'):
params['ur_id'] = self.ur_id.to_alipay_dict()
else:
params['ur_id'] = self.ur_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayIserviceIsresourceTenantquerybytntidQueryModel()
if 'tnt_inst_id' in d:
o.tnt_inst_id = d['tnt_inst_id']
if 'ur_id' in d:
o.ur_id = d['ur_id']
return o
|
[
"[email protected]"
] | |
74ca68420b60222f058228f98a1a446f42d5311d
|
0e3f14722cd87767d29f794530dc1eabc4678a14
|
/projects/migrations/0001_initial.py
|
bf6df9a575080f5727e6d0d3115ebfc864eafca8
|
[] |
no_license
|
Mostaquim/mycampaign
|
e807386b5bc034c0bf8689f29da07bae752ef971
|
4343ff08cb7d86de3efcc3e81b49ca93d01e7ae9
|
refs/heads/master
| 2020-05-09T23:51:06.345794 | 2019-05-09T10:24:22 | 2019-05-09T10:24:22 | 181,513,963 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,738 |
py
|
# Generated by Django 2.1 on 2019-05-06 18:40
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('accounts', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('core', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Invoice',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('currency', models.IntegerField(choices=[(1, '£')])),
('sent_date', models.DateField(auto_now_add=True)),
('issue_date', models.DateField()),
('due_date', models.DateField()),
('paid_date', models.DateField(null=True)),
('terms', models.TextField()),
('discount', models.DecimalField(decimal_places=2, max_digits=11)),
('tax', models.DecimalField(decimal_places=2, max_digits=11)),
('total', models.DecimalField(decimal_places=2, max_digits=11)),
('status', models.IntegerField(choices=[(1, 'Sent'), (2, 'Open'), (3, 'Paid'), (4, 'Partially paid'), (5, 'Cancelled')])),
('second_tax', models.DecimalField(decimal_places=2, max_digits=11)),
],
),
migrations.CreateModel(
name='InvoiceItems',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('amount', models.DecimalField(decimal_places=2, max_digits=11)),
('description', models.TextField()),
('value', models.DecimalField(decimal_places=2, max_digits=11)),
('name', models.CharField(max_length=255, null=True)),
('item_type', models.CharField(max_length=255, null=True)),
],
),
migrations.CreateModel(
name='PrintingOrder',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('pages', models.IntegerField(choices=[(1, 'Single Sided'), (2, 'Double Sided'), (3, '2 Pages'), (4, '4 Pages'), (5, '6 Pages'), (6, '8 Pages'), (7, '10 Pages'), (8, '12 Pages')])),
('page_orientation', models.IntegerField(choices=[(1, 'Portrait'), (2, 'Landscape')])),
('colours', models.IntegerField(choices=[(1, '1/0-coloured Black'), (2, '2/0-coloured Black + Pantone'), (3, '2/0-coloured Black + Gold'), (4, '4/0-coloured CMYK')])),
('processing', models.IntegerField(choices=[(1, 'Trimming'), (2, 'Trimming Corner Rounded')])),
('priority', models.IntegerField(choices=[(1, 'Low'), (2, 'Normal'), (3, 'High'), (4, 'Urgent')], default=1)),
('created', models.DateTimeField(auto_now_add=True, null=True)),
],
),
migrations.CreateModel(
name='Project',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('type_of_service', models.IntegerField(choices=[(1, 'Business To Business'), (2, 'Hand To Hand'), (3, 'Direct Mail'), (4, 'Residential Homes'), (5, 'Shared Distribution'), (6, 'Consultation Distribution')], default=1)),
('number_of_boxes', models.IntegerField(choices=[(1, '1'), (2, '2'), (3, '3'), (4, '4 or more'), (5, 'N/A')], default=1)),
('type_of_media', models.IntegerField(choices=[(1, 'Flyer'), (2, 'Leaflet'), (3, 'Folded Leaflet'), (4, 'Other')], default=1)),
('require_collection', models.IntegerField(choices=[(1, 'Yes'), (2, 'No')], default=1)),
('quantity_of_flyers', models.IntegerField(null=True)),
('title_of_media', models.CharField(max_length=255, null=True)),
('campaign_details', models.TextField(max_length=255)),
('agreed_cost', models.DecimalField(decimal_places=2, max_digits=11)),
('campaign_start_date', models.DateField()),
('campaign_finish_date', models.DateField()),
('special_instruction', models.TextField()),
('progress', models.IntegerField(default=1)),
('created', models.DateTimeField(auto_now_add=True, null=True)),
('attachments', models.ManyToManyField(to='core.Attachments')),
('company', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='company', to='accounts.Company')),
('project_admin', models.ForeignKey(limit_choices_to={'staff': True}, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='project_admin', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='ProjectActivity',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, null=True)),
('subject', models.CharField(max_length=255)),
('message', models.TextField()),
('acitivity_type', models.CharField(max_length=255)),
('project', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='projects.Project')),
('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
],
),
]
|
[
"[email protected]"
] | |
f9757cd5f5931e24e90a9be34c09ca15d7bdbedd
|
f0adca7cac7fb12cdb89e7e821559fe2603bf4bc
|
/src/234/recipe_234_02.py
|
029ab82d6382993f4d8564ed733634fc696da9c6
|
[] |
no_license
|
eriamavro/python-recipe-src
|
dccfa06bc56fcc713f8da9e466f04d07c1f961f0
|
d14f3e4cd885515e9a9a7b8e3f064609c8e50fad
|
refs/heads/master
| 2023-02-13T02:08:44.531621 | 2021-01-14T12:03:05 | 2021-01-14T12:03:05 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 171 |
py
|
import requests
import json
payload = {'key1': 'value1', 'key2': 'value2'}
url = "http://httpbin.org/post"
r = requests.post(url, json=json.dumps(payload))
print(r.text)
|
[
"[email protected]"
] | |
94a836f98274030034fc1d71f9ea205e92cb8242
|
9c8c8ae3842ec9a6f36730234c02f93f71ebda20
|
/vndk/tools/sourcedr/ninja/list_installed_module_under_source.py
|
3643e9d57df15529f03701ae39cfbbabc54bc9a2
|
[
"Apache-2.0"
] |
permissive
|
batyrf/platform_development
|
437bc6560a062d0ce7b27bab17b78109a72b1773
|
d4f7efc0c58598e3fc02a1e4fe8e751bd4ae8f0a
|
refs/heads/master
| 2020-12-26T18:37:29.529464 | 2020-02-01T04:54:27 | 2020-02-01T04:54:27 | 237,598,759 | 3 | 0 | null | 2020-02-01T10:35:07 | 2020-02-01T10:35:06 | null |
UTF-8
|
Python
| false | false | 2,232 |
py
|
#!/usr/bin/env python3
import argparse
import itertools
import json
import posixpath
import re
def match_any(regex, iterable):
"""Check whether any element in iterable matches regex."""
return any(regex.match(elem) for elem in iterable)
class ModuleInfo(object):
def __init__(self, module_info_path):
with open(module_info_path, 'r') as module_info_file:
self._json = json.load(module_info_file)
def list(self, installed_filter=None, module_definition_filter=None):
for name, info in self._json.items():
installs = info['installed']
paths = info['path']
if installed_filter and not match_any(installed_filter, installs):
continue
if module_definition_filter and \
not match_any(module_definition_filter, paths):
continue
for install, path in itertools.product(installs, paths):
yield (install, path)
def _parse_args():
"""Parse command line arguments"""
parser = argparse.ArgumentParser()
parser.add_argument('module_info', help='Path to module-info.json')
parser.add_argument('--out-dir', default='out',
help='Android build output directory')
parser.add_argument('--installed-filter',
help='Installation filter (regular expression)')
parser.add_argument('--module-definition-filter',
help='Module definition filter (regular expression)')
return parser.parse_args()
def main():
"""Main function"""
args = _parse_args()
installed_filter = None
if args.installed_filter:
installed_filter = re.compile(
re.escape(posixpath.normpath(args.out_dir)) + '/' +
'(?:' + args.installed_filter + ')')
module_definition_filter = None
if args.module_definition_filter:
module_definition_filter = re.compile(args.module_definition_filter)
module_info = ModuleInfo(args.module_info)
for installed_file, module_path in \
module_info.list(installed_filter, module_definition_filter):
print(installed_file, module_path)
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
7be77a226991e8e5cd0cfa304d5c6e570a73c828
|
75eac06d5714843f1f4a1ead6d8a3164adcb9a61
|
/csqa/models/bert_sep.py
|
2f14af9e1c38b1fd04d1c54e957139e86b27b5be
|
[] |
no_license
|
Shuailong/csqa
|
0b3b8de0fc139d84c4841a948fff69a3d0855326
|
bc03dfbb1abe8fd37feee2870210f4209ad1d6af
|
refs/heads/master
| 2022-01-04T17:52:53.909954 | 2020-03-28T04:59:45 | 2020-03-28T04:59:45 | 181,131,710 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,157 |
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Author: Shuailong
# @Email: [email protected]
# @Date: 2019-05-18 23:07:29
# @Last Modified by: Shuailong
# @Last Modified time: 2019-05-18 23:07:39
import logging
from typing import Any, Dict, List, Optional
from overrides import overrides
import torch
from allennlp.common.checks import check_dimensions_match
from allennlp.data import Vocabulary
from allennlp.models.model import Model
from allennlp.modules import TextFieldEmbedder, FeedForward
from allennlp.nn import InitializerApplicator, RegularizerApplicator
from allennlp.training.metrics import CategoricalAccuracy
from allennlp.modules.attention import DotProductAttention
from allennlp.nn.util import weighted_sum
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
@Model.register("csqa-bert-sep")
class CSQABertSep(Model):
"""
This class implements baseline Bert model for commonsenseqa dataset descibed in NAACL 2019 paper
CommonsenseQA: A Question Answering Challenge Targeting Commonsense Knowledge [https://arxiv.org/abs/1811.00937].
In this set-up, a single instance is a list of question answer pairs, and an answer index to indicate
which one is correct.
Parameters
----------
vocab : ``Vocabulary``
text_field_embedder : ``TextFieldEmbedder``
Used to embed the ``qa_pairs`` ``TextFields`` we get as input to the model.
dropout : ``float``, optional (default=0.2)
If greater than 0, we will apply dropout with this probability after all encoders (pytorch
LSTMs do not apply dropout to their last layer).
"""
def __init__(self, vocab: Vocabulary,
bert: TextFieldEmbedder,
classifier: FeedForward,
dropout: float = 0.1,
initializer: InitializerApplicator = InitializerApplicator(),
regularizer: Optional[RegularizerApplicator] = None) -> None:
super().__init__(vocab, regularizer)
self._bert = bert
self._classifier = classifier
if dropout:
self.dropout = torch.nn.Dropout(dropout)
else:
self.dropout = None
self._pooler = FeedForward(input_dim=bert.get_output_dim(),
num_layers=1,
hidden_dims=bert.get_output_dim(),
activations=torch.tanh)
check_dimensions_match(bert.get_output_dim() * 2, classifier.get_input_dim(),
"bert embedding dim", "classifier input dim")
self._accuracy = CategoricalAccuracy()
self._loss = torch.nn.CrossEntropyLoss()
initializer(self)
def forward(self, # type: ignore
question: Dict[str, torch.LongTensor],
choices: Dict[str, torch.LongTensor],
evidence: Dict[str, torch.LongTensor],
answer_index: torch.IntTensor = None,
metadata: List[Dict[str, Any]
] = None # pylint:disable=unused-argument
) -> Dict[str, torch.Tensor]:
# pylint: disable=arguments-differ
"""
Parameters
----------
qa_pairs : Dict[str, torch.LongTensor]
From a ``ListField``.
answer_index : ``torch.IntTensor``, optional
From an ``IndexField``. This is what we are trying to predict.
If this is given, we will compute a loss that gets included in the output dictionary.
metadata : ``List[Dict[str, Any]]``, optional
If present, this should contain the question ID, question and choices for each instance
in the batch. The length of this list should be the batch size, and each dictionary
should have the keys ``qid``, ``question``, ``choices``, ``question_tokens`` and
``choices_tokens``.
Returns
-------
An output dictionary consisting of the followings.
qid : List[str]
A list consisting of question ids.
answer_logits : torch.FloatTensor
A tensor of shape ``(batch_size, num_options=5)`` representing unnormalised log
probabilities of the choices.
answer_probs : torch.FloatTensor
A tensor of shape ``(batch_size, num_options=5)`` representing probabilities of the
choices.
loss : torch.FloatTensor, optional
A scalar loss to be optimised.
"""
# batch, seq_len -> batch, seq_len, emb
question_hidden = self._bert(question)
batch_size, emb_size = question_hidden.size(0), question_hidden.size(2)
question_hidden = question_hidden[..., 0, :] # batch, emb
# batch, 5, seq_len -> batch, 5, seq_len, emb
choice_hidden = self._bert(choices, num_wrapping_dims=1)
choice_hidden = choice_hidden[..., 0, :] # batch, 5, emb
if self.dropout:
question_hidden = self.dropout(question_hidden)
choice_hidden = self.dropout(choice_hidden)
question_hidden = question_hidden.unsqueeze(
1).expand(batch_size, 5, emb_size)
cls_hidden = torch.cat([question_hidden, choice_hidden],
dim=-1)
# batch,5,emb*2
# the final MLP -- apply dropout to input, and MLP applies to hidden
answer_logits = self._classifier(cls_hidden).squeeze(-1) # batch, 5
answer_probs = torch.nn.functional.softmax(answer_logits, dim=-1)
qids = [m['qid'] for m in metadata]
output_dict = {"answer_logits": answer_logits,
"answer_probs": answer_probs,
"qid": qids}
if answer_index is not None:
answer_index = answer_index.squeeze(-1) # batch
loss = self._loss(answer_logits, answer_index)
self._accuracy(answer_logits, answer_index)
output_dict["loss"] = loss
return output_dict
@overrides
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
return {'accuracy': self._accuracy.get_metric(reset)}
|
[
"[email protected]"
] | |
85297224463e89bbcee3a6b86337b908c5929cb2
|
8a0e14299d8b915c0a909cf9fa9a86589dc63d76
|
/python/ray/tune/automl/__init__.py
|
cab4c4de4dab106306090e7cdc11ee1396f99abd
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
natashamjaques/ray
|
795e4271c3c5f3e261327afea40b81ffe6f362ac
|
aca9dd5ee7a8fef508a5383fdd26ad8ccdcb16e4
|
refs/heads/master
| 2020-04-12T05:58:15.680359 | 2019-03-06T22:08:10 | 2019-03-06T22:08:10 | 162,337,948 | 3 | 2 |
Apache-2.0
| 2018-12-18T19:47:02 | 2018-12-18T19:47:01 | null |
UTF-8
|
Python
| false | false | 464 |
py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from ray.tune.automl.genetic_searcher import GeneticSearch
from ray.tune.automl.search_policy import GridSearch, RandomSearch
from ray.tune.automl.search_space import SearchSpace, \
ContinuousSpace, DiscreteSpace
__all__ = [
"ContinuousSpace",
"DiscreteSpace",
"SearchSpace",
"GridSearch",
"RandomSearch",
"GeneticSearch",
]
|
[
"[email protected]"
] | |
0ceaa149f62c4d0ac1618af38585c3570814e82d
|
6aa7e203f278b9d1fd01244e740d5c944cc7c3d3
|
/airflow/providers/apache/kylin/hooks/kylin.py
|
59f6ce94ff23200923bd0942ba05a73279150f5b
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT",
"Python-2.0"
] |
permissive
|
laserpedro/airflow
|
83fc991d91749550b151c81876d9e7864bff3946
|
a28afa8172489e41ecf7c381674a0cb91de850ff
|
refs/heads/master
| 2023-01-02T04:55:34.030935 | 2020-10-24T15:55:11 | 2020-10-24T15:55:11 | 285,867,990 | 1 | 0 |
Apache-2.0
| 2020-08-07T15:56:49 | 2020-08-07T15:56:49 | null |
UTF-8
|
Python
| false | false | 2,795 |
py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from typing import Optional
from kylinpy import exceptions, kylinpy
from airflow.exceptions import AirflowException
from airflow.hooks.base_hook import BaseHook
class KylinHook(BaseHook):
"""
:param kylin_conn_id: The connection id as configured in Airflow administration.
:type kylin_conn_id: str
:param project: porject name
:type project: Optional[str]
:param dsn: dsn
:type dsn: Optional[str]
"""
def __init__(self,
kylin_conn_id: Optional[str] = 'kylin_default',
project: Optional[str] = None,
dsn: Optional[str] = None
):
super().__init__()
self.kylin_conn_id = kylin_conn_id
self.project = project
self.dsn = dsn
def get_conn(self):
conn = self.get_connection(self.kylin_conn_id)
if self.dsn:
return kylinpy.create_kylin(self.dsn)
else:
self.project = self.project if self.project else conn.schema
return kylinpy.Kylin(conn.host, username=conn.login,
password=conn.password, port=conn.port,
project=self.project, **conn.extra_dejson)
def cube_run(self, datasource_name, op, **op_args):
"""
run CubeSource command whitch in CubeSource.support_invoke_command
:param datasource_name:
:param op: command
:param op_args: command args
:return: response
"""
cube_source = self.get_conn().get_datasource(datasource_name)
try:
response = cube_source.invoke_command(op, **op_args)
return response
except exceptions.KylinError as err:
raise AirflowException("Cube operation {} error , Message: {}".format(op, err))
def get_job_status(self, job_id):
"""
get job status
:param job_id: kylin job id
:return: job status
"""
return self.get_conn().get_job(job_id).status
|
[
"[email protected]"
] | |
c618f3a535441e5c8587f2f8d2c91d6c2a046dd8
|
113f8ae533a75e9f2fdc1728661af0f19c8460a6
|
/books_app/books_app/settings.py
|
8f53b3a945f604d8a773d85e73cdd69c268b132c
|
[] |
no_license
|
PeterM358/Python-web-2021
|
cf08beaa3330495afc53e640f4a2aaf0429049e9
|
a3b7e1d1be0cc85675aaff646917d4f5b7f97b00
|
refs/heads/master
| 2023-07-09T15:09:08.868548 | 2021-07-24T13:49:22 | 2021-07-24T13:49:22 | 382,328,747 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,403 |
py
|
"""
Django settings for books_app project.
Generated by 'django-admin startproject' using Django 3.2.4.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-e05f*w&+x@+@w7-9g*7z!4^%7u+xmeb9uxz*j@!kz(e5max0c('
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'books_app.books',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'books_app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [BASE_DIR / 'templates']
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'books_app.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'books',
'USER': 'postgres',
'PASSWORD': 'asdf1234',
'HOST': '127.0.0.1',
'PORT': '5432',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
|
[
"estestveno"
] |
estestveno
|
d010fb79c796f34db9c3ccef04a23dd8ba9fc29a
|
2a54e8d6ed124c64abb9e075cc5524bb859ba0fa
|
/.history/8-loops_20200406005828.py
|
5e027ff5acfe70abba31bc7f2389a11006536d94
|
[] |
no_license
|
CaptainStorm21/Python-Foundation
|
01b5fbaf7a913506518cf22e0339dd948e65cea1
|
a385adeda74f43dd7fb2d99d326b0be23db25024
|
refs/heads/master
| 2021-05-23T01:29:18.885239 | 2020-04-23T19:18:06 | 2020-04-23T19:18:06 | 253,171,611 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 944 |
py
|
# A for loop is used for iterating over a sequence (that is either a list, a tuple, a dictionary, a set, or a string).
# Simple Loop
# people = ['John', 'Mary', 'Anna', 'Margaret', 'Sylvia']
# for person in people:
# print('Current person is: ', person)
# Break
# people1 = ['John', 'Mary', 'Anna', 'Margaret', 'Sylvia', 'Monique']
# for child in people1:
# if child == 'Anna':
# print('Current child is: ', child)
# break
# gamers = ['John', 'Mary', 'Anna', 'Margaret', 'Sylvia', 'Monique']
# for person in gamers:
# if person == 'Caty':
# continue
# print('Current gamer is: ', person)
# Range
# gamers = ['John', 'Mary', 'Anna', 'Margaret', 'Sylvia', 'Monique']
# for i in range (len(gamers)):
# print('Current gamer: ', gamers[i])
# for i in range (0, 10):
# print ('Number ', i)
# While loops execute a set of statements as long as a condition is true.
count = 0
|
[
"[email protected]"
] | |
f694103ad29e76cd74411c21fb687a6e63fcbdbf
|
6bda8a6e44d09397ada6ed222800e16f071674bf
|
/src/numdifftools/profiletools.py
|
4e6374add0e9bed8d01cf6a6f24116263cc71f59
|
[
"BSD-3-Clause"
] |
permissive
|
pbrod/numdifftools
|
557af2ee288339737a9e005fb0485542c13e8891
|
4f62e51d4776cc6acbdfb6268482635a487b860c
|
refs/heads/master
| 2023-07-20T19:26:53.241589 | 2022-11-14T13:39:42 | 2022-11-14T13:39:42 | 17,676,169 | 237 | 52 |
BSD-3-Clause
| 2023-07-05T15:21:37 | 2014-03-12T17:31:06 |
Python
|
UTF-8
|
Python
| false | false | 5,763 |
py
|
"""
This module is based on: https://zapier.com/engineering/profiling-python-boss/
See also:
https://www.pythoncentral.io/measure-time-in-python-time-time-vs-time-clock/
"""
from __future__ import absolute_import, print_function
import inspect
import cProfile
from functools import wraps
from timeit import default_timer as timer
import warnings
try:
from line_profiler import LineProfiler
def _add_all_class_methods(profiler, cls, except_=''):
for k, v in inspect.getmembers(cls, inspect.ismethod):
if k != except_:
profiler.add_function(v)
def _add_function_or_classmethod(profiler, f, args):
if isinstance(f, str): # f is a method of the
cls = args[0] # class instance
profiler.add_function(getattr(cls, f))
else:
profiler.add_function(f)
def do_profile(follow=(), follow_all_methods=False):
"""
Decorator to profile a function or class method
It uses line_profiler to give detailed reports on time spent on each
line in the code.
Pros: has intuitive and finely detailed reports. Can follow
functions in third party libraries.
Cons:
has external dependency on line_profiler and is quite slow,
so don't use it for benchmarking.
Handy tip:
Just decorate your test function or class method and pass any
additional problem function(s) in the follow argument!
If any follow argument is a string, it is assumed that the string
refers to bound a method of the class
See also
--------
do_cprofile, test_do_profile
"""
def inner(func):
def profiled_func(*args, **kwargs):
try:
profiler = LineProfiler()
profiler.add_function(func)
if follow_all_methods:
cls = args[0] # class instance
_add_all_class_methods(profiler, cls,
except_=func.__name__)
for f in follow:
_add_function_or_classmethod(profiler, f, args)
profiler.enable_by_count()
return func(*args, **kwargs)
finally:
profiler.print_stats()
return profiled_func
return inner
except ImportError as error:
LineProfiler = None
warnings.warn(str(error))
def do_profile(follow=(), follow_all_methods=False):
"Helpful if you accidentally leave in production!"
def inner(func):
def nothing(*args, **kwargs):
return func(*args, **kwargs)
return nothing
return inner
def timefun(fun):
""" Timing decorator
Timers require you to do some digging. Start wrapping a few of the higher level
functions and confirm where the bottleneck is, then drill down into that function,
repeating as you go. When you find the disproportionately slow bit of code, fix it,
and work your way back out confirming that it is fixed.
Handy tip: Don't forget the handy timeit module! It tends to be more useful for
benchmarking small pieces of code than for doing the actual investigation.
Timer Pros:
Easy to understand and implement. Also very simple to compare before and after fixes.
Works across many languages.
Timer Cons:
Sometimes a little too simplistic for extremely complex codebases, you might spend
more time placing and replacing boilerplate code than you will fixing the problem!
"""
@wraps(fun)
def measure_time(*args, **kwargs):
t1 = timer()
result = fun(*args, **kwargs)
t2 = timer()
print("@timefun:" + fun.__name__ + " took " + str(t2 - t1) + " seconds")
return result
return measure_time
class TimeWith():
"""
Timing context manager
"""
def __init__(self, name=''):
self.name = name
self.start = timer()
@property
def elapsed(self):
return timer() - self.start
def checkpoint(self, name=''):
print('{timer} {checkpoint} took {elapsed} seconds'.format(timer=self.name,
checkpoint=name,
elapsed=self.elapsed,
).strip())
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.checkpoint('finished')
def do_cprofile(func):
"""
Decorator to profile a function
It gives good numbers on various function calls but it omits a vital piece
of information: what is it about a function that makes it so slow?
However, it is a great start to basic profiling. Sometimes it can even
point you to the solution with very little fuss. I often use it as a
gut check to start the debugging process before I dig deeper into the
specific functions that are either slow are called way too often.
Pros:
No external dependencies and quite fast. Useful for quick high-level
checks.
Cons:
Rather limited information that usually requires deeper debugging; reports
are a bit unintuitive, especially for complex codebases.
See also
--------
do_profile, test_do_profile
"""
def profiled_func(*args, **kwargs):
profile = cProfile.Profile()
try:
profile.enable()
result = func(*args, **kwargs)
profile.disable()
return result
finally:
profile.print_stats()
return profiled_func
|
[
"[email protected]"
] | |
14c7f9577956db004b7db590687e30e8fdba3192
|
ad0e853db635edc578d58891b90f8e45a72a724f
|
/rllib/examples/inference_and_serving/policy_inference_after_training.py
|
17f033847ec1c046e9d6f405d8517c6f099104ee
|
[
"BSD-3-Clause",
"MIT",
"Apache-2.0"
] |
permissive
|
ericl/ray
|
8c93fc713af3b753215d4fe6221278700936e2db
|
e9a1c6d814fb1a81033809f56695030d651388f5
|
refs/heads/master
| 2023-08-31T11:53:23.584855 | 2023-06-07T21:04:28 | 2023-06-07T21:04:28 | 91,077,004 | 2 | 4 |
Apache-2.0
| 2023-01-11T17:19:10 | 2017-05-12T09:51:04 |
Python
|
UTF-8
|
Python
| false | false | 3,804 |
py
|
"""
Example showing how you can use your trained policy for inference
(computing actions) in an environment.
Includes options for LSTM-based models (--use-lstm), attention-net models
(--use-attention), and plain (non-recurrent) models.
"""
import argparse
import gymnasium as gym
import os
import ray
from ray import air, tune
from ray.rllib.algorithms.algorithm import Algorithm
from ray.tune.registry import get_trainable_cls
parser = argparse.ArgumentParser()
parser.add_argument(
"--run", type=str, default="PPO", help="The RLlib-registered algorithm to use."
)
parser.add_argument("--num-cpus", type=int, default=0)
parser.add_argument(
"--framework",
choices=["tf", "tf2", "torch"],
default="torch",
help="The DL framework specifier.",
)
parser.add_argument("--eager-tracing", action="store_true")
parser.add_argument(
"--stop-iters",
type=int,
default=200,
help="Number of iterations to train before we do inference.",
)
parser.add_argument(
"--stop-timesteps",
type=int,
default=100000,
help="Number of timesteps to train before we do inference.",
)
parser.add_argument(
"--stop-reward",
type=float,
default=150.0,
help="Reward at which we stop training before we do inference.",
)
parser.add_argument(
"--explore-during-inference",
action="store_true",
help="Whether the trained policy should use exploration during action "
"inference.",
)
parser.add_argument(
"--num-episodes-during-inference",
type=int,
default=10,
help="Number of episodes to do inference over after training.",
)
if __name__ == "__main__":
args = parser.parse_args()
ray.init(num_cpus=args.num_cpus or None)
config = (
get_trainable_cls(args.run)
.get_default_config()
.environment("FrozenLake-v1")
# Run with tracing enabled for tf2?
.framework(args.framework, eager_tracing=args.eager_tracing)
# Use GPUs iff `RLLIB_NUM_GPUS` env var set to > 0.
.resources(num_gpus=int(os.environ.get("RLLIB_NUM_GPUS", "0")))
)
stop = {
"training_iteration": args.stop_iters,
"timesteps_total": args.stop_timesteps,
"episode_reward_mean": args.stop_reward,
}
print("Training policy until desired reward/timesteps/iterations. ...")
tuner = tune.Tuner(
args.run,
param_space=config.to_dict(),
run_config=air.RunConfig(
stop=stop,
verbose=2,
checkpoint_config=air.CheckpointConfig(
checkpoint_frequency=1, checkpoint_at_end=True
),
),
)
results = tuner.fit()
print("Training completed. Restoring new Trainer for action inference.")
# Get the last checkpoint from the above training run.
checkpoint = results.get_best_result().checkpoint
# Create new Algorithm and restore its state from the last checkpoint.
algo = Algorithm.from_checkpoint(checkpoint)
# Create the env to do inference in.
env = gym.make("FrozenLake-v1")
obs, info = env.reset()
num_episodes = 0
episode_reward = 0.0
while num_episodes < args.num_episodes_during_inference:
# Compute an action (`a`).
a = algo.compute_single_action(
observation=obs,
explore=args.explore_during_inference,
policy_id="default_policy", # <- default value
)
# Send the computed action `a` to the env.
obs, reward, done, truncated, _ = env.step(a)
episode_reward += reward
# Is the episode `done`? -> Reset.
if done:
print(f"Episode done: Total reward = {episode_reward}")
obs, info = env.reset()
num_episodes += 1
episode_reward = 0.0
algo.stop()
ray.shutdown()
|
[
"[email protected]"
] | |
1ea7ec9cd6f0f33042d9eac704a7f47a193c0f13
|
8bcf973008b1d7549f59501a1667909848ea87dd
|
/Day0617/staff_info/bin/start.py
|
ff176549a916a65c76e64836aa50c52a7c6e5635
|
[] |
no_license
|
simplesmall/Python-FullStack
|
74ffeb2119eecb7fcb21a136d01aaaf2bcc2c24c
|
210844ef6443a5543d49a20dbec2db9a9b960230
|
refs/heads/master
| 2022-12-17T00:56:40.515335 | 2019-11-15T02:07:57 | 2019-11-15T02:07:57 | 221,816,447 | 0 | 1 | null | 2022-12-13T19:22:26 | 2019-11-15T01:10:55 |
Python
|
UTF-8
|
Python
| false | false | 327 |
py
|
import sys
import os
# print(sys.path)
#获取start.py的路径
#当前文件往上翻两层 staff_info
project_path = os.path.dirname(os.path.dirname(__file__))
sys.path.append(project_path) #把staff_info添加到sys.path中
print(project_path)
from core import main
if __name__ == '__main__':
main.home()
|
[
"[email protected]"
] | |
e3ae23e183adf64fde585cc7af4664706cfcceab
|
eed9b3d099facd98b8a139681808997d60b4e19c
|
/decorator_opt_arg/decorators.py
|
4442035bdc787580a9d4d98b7258dade8ef37179
|
[] |
no_license
|
pybites/blog_code
|
1240a3393a3672681d97c369711be6c7415d8c10
|
902ebb87e5f7a407714d0e399833f0331a1b915d
|
refs/heads/master
| 2022-12-10T19:50:57.718119 | 2020-08-08T17:13:15 | 2020-08-08T17:13:15 | 76,716,190 | 49 | 47 | null | 2022-11-22T01:54:20 | 2016-12-17T09:51:12 |
HTML
|
UTF-8
|
Python
| false | false | 514 |
py
|
from functools import wraps
import time
def sleep(seconds=None):
def real_decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
print('Sleeping for {} seconds'.format(seconds))
time.sleep(seconds if seconds else 1)
return func(*args, **kwargs)
return wrapper
return real_decorator
if __name__ == '__main__':
@sleep(1) # @sleep without arg fails
def hello():
print('hello world')
for _ in range(3):
hello()
|
[
"[email protected]"
] | |
71ba4ee7dbdb38f9f5e41c9b92d886fda6729209
|
91c7de67e656fec2b9c32b64e1b6ae88083a0283
|
/functional_tests/test_simple_list_creation.py
|
f5aee3c61fd7a18d274cbbaf40fa57f4feb504f4
|
[] |
no_license
|
pohily/TDD
|
e0a85c60c5ee2e7388323ffb00b7fe81372431c1
|
60d2a0f9debfcc22be54d85e981aee23f8113563
|
refs/heads/master
| 2022-05-04T20:07:46.296627 | 2019-07-24T11:57:19 | 2019-07-24T11:57:19 | 189,567,223 | 0 | 0 | null | 2022-04-22T21:23:44 | 2019-05-31T09:28:16 |
JavaScript
|
UTF-8
|
Python
| false | false | 3,307 |
py
|
from .base import FunctionalTest
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
class NewVisitorTest(FunctionalTest):
def test_can_start_a_list_for_one_user(self):
# Edith has heard about a cool new online to-do app. She goes
# to check out its homepage
self.browser.get(self.live_server_url)
# She notices the page title and header mention to-do lists
self.assertIn('To-Do', self.browser.title)
header_text = self.browser.find_element_by_tag_name('h1').text
self.assertIn('To-Do', header_text)
# She is invited to enter a to-do item straight away
inputbox = self.get_item_input_box()
self.assertEqual(
inputbox.get_attribute('placeholder'),
'Enter a to-do item'
)
# She types "Buy peacock feathers" into a text box (Edith's hobby
# is tying fly-fishing lures)
inputbox.send_keys('Buy peacock feathers')
# When she hits enter, the page updates, and now the page lists
# "1: Buy peacock feathers" as an item in a to-do list table
inputbox.send_keys(Keys.ENTER)
self.wait_for_row_in_list_table('1: Buy peacock feathers')
# There is still a text box inviting her to add another item. She
# enters "Use peacock feathers to make a fly" (Edith is very
# methodical)
self.add_list_item('Use peacock feathers to make a fly')
# The page updates again, and now shows both items on her list
self.wait_for_row_in_list_table('2: Use peacock feathers to make a fly')
self.wait_for_row_in_list_table('1: Buy peacock feathers')
# Satisfied, she goes back to sleep
def test_multiple_users_can_start_lists_at_different_urls(self):
# Edith starts a new to-do list
self.browser.get(self.live_server_url)
self.add_list_item('Buy peacock feathers')
# She notices that her list has a unique URL
edith_list_url = self.browser.current_url
self.assertRegex(edith_list_url, '/lists/.+')
# Now a new user, Francis, comes along to the site.
## We use a new browser session to make sure that no information
## of Edith's is coming through from cookies etc
self.browser.quit()
self.browser = webdriver.Firefox()
# Francis visits the home page. There is no sign of Edith's
# list
self.browser.get(self.live_server_url)
page_text = self.browser.find_element_by_tag_name('body').text
self.assertNotIn('Buy peacock feathers', page_text)
self.assertNotIn('make a fly', page_text)
# Francis starts a new list by entering a new item. He
# is less interesting than Edith...
self.add_list_item('Buy milk')
# Francis gets his own unique URL
francis_list_url = self.browser.current_url
self.assertRegex(francis_list_url, '/lists/.+')
self.assertNotEqual(francis_list_url, edith_list_url)
# Again, there is no trace of Edith's list
page_text = self.browser.find_element_by_tag_name('body').text
self.assertNotIn('Buy peacock feathers', page_text)
self.assertIn('Buy milk', page_text)
# Satisfied, they both go back to sleep
|
[
"[email protected]"
] | |
713275915abef8843f8041d6f606da3ed88339b9
|
f77593e9e9a112e85acd3c73c056a7466d76e15e
|
/request_delivery_installation/request_delivery_installation/urls.py
|
d15cc80688686b4ea06f1692684c43314ce8d0e5
|
[] |
no_license
|
geethusuresh/reqest_installation
|
bf47c915aee1e1f7730ea858c000a6dd434a79fb
|
d047fa9f303273915651d0cbe03b7795f157f31c
|
refs/heads/master
| 2021-01-25T04:09:10.282831 | 2014-09-28T06:40:10 | 2014-09-28T06:40:10 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,498 |
py
|
from django.conf.urls import patterns, include, url
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.contrib import admin
admin.autodiscover()
from web.views import *
urlpatterns = patterns('',
url(r'^$', login_required(Home.as_view()), name='home'),
url(r'^accounts/login/$', Login.as_view(), name='login'),
url(r'^logout/$', Logout.as_view(), name='logout'),
url(r'^register/$', login_required(Signup.as_view()), name='register'),
url(r'^dealer/(?P<user_id>[\d+]+)/add/subdealer/$',login_required(AddSubDealer.as_view()), name="add_subdealer"),
url(r'^add_purchase_info/$', login_required(AddPurchanseInfo.as_view()), name='add_purchase_info'),
url(r'^fetch_brand_names/$', FetchBrandNames.as_view(), name='fetch_brand_names'),
url(r'^fetch_purchase_sales_men/$', FetchPurchaseSalesManList.as_view(), name='fetch_purchase_sales_men'),
url(r'^fetch_dealers/$', FetchDealersList.as_view(), name='fetch_dealers'),
url(r'^purchase_info/(?P<purchase_info_id>[\d+]+)/$', login_required(PurchaseInfoView.as_view()), name='purchase_info'),
url(r'^search_purchase_info/(?P<delivery_order_number>[\w-]+)/$', login_required(SearchPurchaseInfo.as_view()), name="search_purchase_info"),
url(r'^fetch_dealer_company_names/$', FetchFirmNames.as_view(), name='fetch_firm_names'),
url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
url(r'^admin/', include(admin.site.urls)),
)
|
[
"[email protected]"
] | |
42e5956217bb73d7bf84ce47a3cd84c808b6c11f
|
2130aa6efd199c612b03b0cd949375dd828dd218
|
/acoustid/data/submission.py
|
b3897ac10f2f83bd8c45d4bea70e680730d28066
|
[
"MIT"
] |
permissive
|
bazo/acoustid-server
|
4774965b8a16555100c972c09582bb09ea10df3f
|
56b11f1bbd093e23970d9baae2a2655ecea34aee
|
refs/heads/master
| 2020-05-27T21:08:29.078822 | 2017-01-02T20:19:42 | 2017-01-02T20:19:42 | 83,599,159 | 1 | 0 | null | 2017-03-01T20:36:20 | 2017-03-01T20:36:20 | null |
UTF-8
|
Python
| false | false | 6,283 |
py
|
# Copyright (C) 2011 Lukas Lalinsky
# Distributed under the MIT license, see the LICENSE file for details.
import logging
from sqlalchemy import sql
from acoustid import tables as schema, const
from acoustid.data.fingerprint import lookup_fingerprint, insert_fingerprint, inc_fingerprint_submission_count, FingerprintSearcher
from acoustid.data.musicbrainz import resolve_mbid_redirect
from acoustid.data.track import insert_track, insert_mbid, insert_puid, merge_tracks, insert_track_meta, can_add_fp_to_track, can_merge_tracks, insert_track_foreignid
logger = logging.getLogger(__name__)
def insert_submission(conn, data):
"""
Insert a new submission into the database
"""
with conn.begin():
insert_stmt = schema.submission.insert().values({
'fingerprint': data['fingerprint'],
'length': data['length'],
'bitrate': data.get('bitrate'),
'mbid': data.get('mbid'),
'puid': data.get('puid'),
'source_id': data.get('source_id'),
'format_id': data.get('format_id'),
'meta_id': data.get('meta_id'),
'foreignid_id': data.get('foreignid_id'),
})
id = conn.execute(insert_stmt).inserted_primary_key[0]
logger.debug("Inserted submission %r with data %r", id, data)
return id
def import_submission(conn, submission, index=None):
"""
Import the given submission into the main fingerprint database
"""
with conn.begin():
update_stmt = schema.submission.update().where(
schema.submission.c.id == submission['id'])
conn.execute(update_stmt.values(handled=True))
mbids = []
if submission['mbid']:
mbids.append(resolve_mbid_redirect(conn, submission['mbid']))
logger.info("Importing submission %d with MBIDs %s",
submission['id'], ', '.join(mbids))
num_unique_items = len(set(submission['fingerprint']))
if num_unique_items < const.FINGERPRINT_MIN_UNIQUE_ITEMS:
logger.info("Skipping, has only %d unique items", num_unique_items)
return
num_query_items = conn.execute("SELECT icount(acoustid_extract_query(%(fp)s))", dict(fp=submission['fingerprint']))
if not num_query_items:
logger.info("Skipping, no data to index")
return
searcher = FingerprintSearcher(conn, index, fast=False)
searcher.min_score = const.TRACK_MERGE_THRESHOLD
matches = searcher.search(submission['fingerprint'], submission['length'])
fingerprint = {
'id': None,
'track_id': None,
'fingerprint': submission['fingerprint'],
'length': submission['length'],
'bitrate': submission['bitrate'],
'format_id': submission['format_id'],
}
if matches:
match = matches[0]
all_track_ids = set()
possible_track_ids = set()
for m in matches:
if m['track_id'] in all_track_ids:
continue
all_track_ids.add(m['track_id'])
logger.debug("Fingerprint %d with track %d is %d%% similar", m['id'], m['track_id'], m['score'] * 100)
if can_add_fp_to_track(conn, m['track_id'], submission['fingerprint'], submission['length']):
possible_track_ids.add(m['track_id'])
if not fingerprint['track_id']:
fingerprint['track_id'] = m['track_id']
if m['score'] > const.FINGERPRINT_MERGE_THRESHOLD:
fingerprint['id'] = m['id']
if len(possible_track_ids) > 1:
for group in can_merge_tracks(conn, possible_track_ids):
if fingerprint['track_id'] in group and len(group) > 1:
fingerprint['track_id'] = min(group)
group.remove(fingerprint['track_id'])
merge_tracks(conn, fingerprint['track_id'], list(group))
break
if not fingerprint['track_id']:
fingerprint['track_id'] = insert_track(conn)
if not fingerprint['id']:
fingerprint['id'] = insert_fingerprint(conn, fingerprint, submission['id'], submission['source_id'])
else:
inc_fingerprint_submission_count(conn, fingerprint['id'], submission['id'], submission['source_id'])
for mbid in mbids:
insert_mbid(conn, fingerprint['track_id'], mbid, submission['id'], submission['source_id'])
if submission['puid'] and submission['puid'] != '00000000-0000-0000-0000-000000000000':
insert_puid(conn, fingerprint['track_id'], submission['puid'], submission['id'], submission['source_id'])
if submission['meta_id']:
insert_track_meta(conn, fingerprint['track_id'], submission['meta_id'], submission['id'], submission['source_id'])
if submission['foreignid_id']:
insert_track_foreignid(conn, fingerprint['track_id'], submission['foreignid_id'], submission['id'], submission['source_id'])
return fingerprint
def import_queued_submissions(conn, index=None, limit=100, ids=None):
"""
Import the given submission into the main fingerprint database
"""
query = schema.submission.select(schema.submission.c.handled == False).\
order_by(schema.submission.c.mbid.nullslast(), schema.submission.c.id.desc())
if ids is not None:
query = query.where(schema.submission.c.id.in_(ids))
if limit is not None:
query = query.limit(limit)
count = 0
for submission in conn.execute(query):
import_submission(conn, submission, index=index)
count += 1
logger.debug("Imported %d submissions", count)
return count
def lookup_submission_status(db, ids):
if not ids:
return {}
source = schema.fingerprint_source.\
join(schema.fingerprint).\
join(schema.track)
query = sql.select([schema.fingerprint_source.c.submission_id, schema.track.c.gid], from_obj=source).\
where(schema.fingerprint_source.c.submission_id.in_(ids))
results = {}
for id, track_gid in db.execute(query):
results[id] = track_gid
return results
|
[
"[email protected]"
] | |
4ad97214cab242cab7be5cd4232d8eca3d8ff676
|
89d920e8de469466f45172948082284b24ee8ca6
|
/sdt/bin/sdpostpipelineutils.py
|
4ea778b4bbc53e16782ee12c4bdf0fc87ea83537
|
[] |
no_license
|
cedadev/synda
|
fb22cce909e8b4fb8e51e7ab506c337d6ec5d9d2
|
9b9fa5b9b13719e1307f093d208256e359e501af
|
refs/heads/master
| 2021-09-24T03:56:21.545769 | 2020-09-16T10:34:44 | 2020-09-16T10:34:44 | 187,797,897 | 1 | 0 | null | 2020-01-28T12:56:15 | 2019-05-21T08:45:47 |
Python
|
UTF-8
|
Python
| false | false | 1,918 |
py
|
#!/usr/bin/env python
# -*- coding: ISO-8859-1 -*-
##################################
# @program synda
# @description climate models data transfer program
# @copyright Copyright “(c)2009 Centre National de la Recherche Scientifique CNRS.
# All Rights Reserved”
# @license CeCILL (https://raw.githubusercontent.com/Prodiguer/synda/master/sdt/doc/LICENSE)
##################################
"""This module contains post pipeline generic functions. """
import sdapp
import sdconst
from sdexception import SDException
def exists_attached_parameter(file_,name):
if 'attached_parameters' in file_:
if name in file_['attached_parameters']:
return True
else:
return False
else:
return False
def get_attached_parameter(file_,name,default=None):
if 'attached_parameters' in file_:
return file_['attached_parameters'].get(name,default)
else:
return default
def get_attached_parameter__global(files,name):
"""This function assumes all files have the same value for the <name> attribute."""
if len(files)>0:
file_=files[0] # retrieve first file's (arbitrary)
return get_attached_parameter(file_,name)
else:
return None
# the two methods below is to have some abstration over file type
def get_functional_identifier_value(f):
name=get_functional_identifier_name(f)
if name in f:
functional_id=f[name]
else:
raise SDException('SYDUTILS-020','Incorrect identifier (%s)'%name)
return functional_id
def get_functional_identifier_name(f):
if f["type"]==sdconst.SA_TYPE_FILE:
functional_id='file_functional_id'
elif f["type"]==sdconst.SA_TYPE_DATASET:
functional_id='dataset_functional_id'
else:
raise SDException('SYDUTILS-028','Incorrect type (%s)'%f["type"])
return functional_id
|
[
"[email protected]"
] | |
25ef6c97fd596d1d2354d836019a500f2ecc8459
|
a1508558da875f6ea3c55840b44df74dfd8e5f54
|
/trade_free/portfolio/simple_portfolio.py
|
94769841a1f4946dcd4018c81dafdf1cb40da449
|
[
"Apache-2.0"
] |
permissive
|
NewLanded/TradeFree
|
49cea6a17b5f3b4661d1c98a81e031123f02b3e6
|
f65122f5ed01cc1272fd2f03121ff3805a1967cb
|
refs/heads/master
| 2020-07-19T21:13:01.976587 | 2020-01-09T14:02:29 | 2020-01-09T14:02:29 | 206,515,265 | 2 | 2 |
Apache-2.0
| 2020-01-09T14:02:31 | 2019-09-05T08:36:58 |
Python
|
UTF-8
|
Python
| false | false | 6,922 |
py
|
import datetime
import math
import numpy as np
from utils.constant_util import BUY, SELL
from .abs_portfolio import AbsPortfolio
from ..event import OrderEvent
class SimplePortfolio(AbsPortfolio):
"""
测试Portfolio, 向brokerage对象发送固定的交易数量, 不考虑风控或头寸
"""
def __init__(self, start_date, event_queue, bars, initial_capital):
"""
Parameters:
bars - The DataHandler object with current market data. # DataHandler对象, 当前市场数据
events - The Event Queue object. # 事件队列
start_date - The start date (bar) of the portfolio.
initial_capital - The starting capital in USD. # 初始现金
"""
self.bars = bars
self.event_queue = event_queue
self.symbol_list = self.bars.symbol_list
self.start_date_previous_day = start_date - datetime.timedelta(days=1)
self.initial_capital = initial_capital
self.all_positions = self._construct_all_positions()
self.current_positions = dict((k, v) for k, v in [(s, 0) for s in self.symbol_list])
self.all_holdings = self._construct_all_holdings()
self.current_holdings = self._construct_current_holdings()
self.bs_data = []
def _construct_all_positions(self):
"""
使用start_date构造all_positions,以确定时间索引何时开始
"""
all_positions = dict((k, v) for k, v in [(s, 0) for s in self.symbol_list])
all_positions['datetime'] = self.start_date_previous_day
return [all_positions]
def _construct_all_holdings(self):
"""
使用start_date构造all_holdings,以确定时间索引何时开始
"""
all_holdings = dict((k, v) for k, v in [(s, 0.0) for s in self.symbol_list])
all_holdings['datetime'] = self.start_date_previous_day
all_holdings['cash'] = self.initial_capital # 现金
all_holdings['commission'] = 0.0 # 累计佣金
all_holdings['total'] = self.initial_capital # 包括现金和任何未平仓头寸在内的总账户资产, 空头头寸被视为负值
return [all_holdings]
def _construct_current_holdings(self):
"""
和construct_all_holdings类似, 但是只作用于当前时刻
"""
current_holdings = dict((k, v) for k, v in [(s, 0.0) for s in self.symbol_list])
current_holdings['cash'] = self.initial_capital
current_holdings['commission'] = 0.0
current_holdings['total'] = self.initial_capital
return current_holdings
def update_signal(self, event):
"""
接收SignalEvent, 生成订单Event
"""
# if event.type == 'SIGNAL':
order_event = self.generate_naive_order(event)
self.event_queue.put(order_event)
def generate_naive_order(self, signal):
"""
简单的生成OrderEvent, 不考虑风控等
Parameters:
signal - The SignalEvent signal information.
"""
order = None
symbol = signal.symbol
event_id = signal.event_id
direction = signal.direction
order_type = signal.order_type
mkt_quantity = signal.quantity
mkt_price = signal.price
single_date = signal.single_date
if mkt_quantity:
order = OrderEvent(event_id, symbol, order_type, mkt_quantity, mkt_price, direction, single_date)
return order
def update_fill(self, event):
"""
使用FillEvent更新持仓
"""
# if event.type == 'FILL':
self.update_positions_from_fill(event)
self.update_holdings_from_fill(event)
self.update_bs_data_from_fill(event)
def update_positions_from_fill(self, fill):
"""
使用FilltEvent对象并更新 position
Parameters:
fill - The FillEvent object to update the positions with.
"""
# Check whether the fill is a buy or sell
fill_dir = 0
if fill.direction == BUY:
fill_dir = 1
if fill.direction == SELL:
fill_dir = -1
# Update positions list with new quantities
self.current_positions[fill.symbol] += fill_dir * fill.quantity
def update_bs_data_from_fill(self, fill):
"""记录buy sell 数据"""
close_point = self.bars.get_latest_bars(fill.symbol)[0][5]
bs_data = {"bs_date": fill.fill_date, "direction": fill.direction, "quantity": fill.quantity, "price": close_point, "symbol": fill.symbol}
self.bs_data.append(bs_data)
def update_holdings_from_fill(self, fill):
"""
使用FilltEvent对象并更新 holding
Parameters:
fill - The FillEvent object to update the holdings with.
"""
# Check whether the fill is a buy or sell
fill_dir = 0
if fill.direction == BUY:
fill_dir = 1
if fill.direction == SELL:
fill_dir = -1
# Update holdings list with new quantities
fill_cost = self.bars.get_latest_bars(fill.symbol)[0][5] # Close price
cost = fill_dir * fill_cost * fill.quantity
self.current_holdings[fill.symbol] += cost
self.current_holdings['commission'] += fill.commission
self.current_holdings['cash'] -= (cost + fill.commission)
self.current_holdings['total'] -= (cost + fill.commission)
def update_timeindex(self):
"""
添加新纪录到positions, 使用队列中的 MarketEvent
"""
bars = {}
for symbol in self.symbol_list:
bars[symbol] = self.bars.get_latest_bars(symbol, N=1)
# Update positions
data_position = dict((k, v) for k, v in [(s, 0) for s in self.symbol_list])
data_position['datetime'] = bars[self.symbol_list[0]][0][1]
for symbol in self.symbol_list:
data_position[symbol] = self.current_positions[symbol]
# Append the current positions
self.all_positions.append(data_position)
# Update holdings
data_holding = dict((k, v) for k, v in [(s, 0) for s in self.symbol_list])
data_holding['datetime'] = bars[self.symbol_list[0]][0][1]
data_holding['cash'] = self.current_holdings['cash']
data_holding['commission'] = self.current_holdings['commission']
data_holding['total'] = self.current_holdings['cash']
for symbol in self.symbol_list:
# Approximation to the real value
market_value = self.current_positions[symbol] * bars[symbol][0][5] # 数量 * 收盘价 进行估值
data_holding[symbol] = market_value
data_holding[symbol + "_close"] = bars[symbol][0][5]
data_holding['total'] = data_holding['total'] + market_value if math.isnan(market_value) is False else data_holding['total']
self.all_holdings.append(data_holding)
|
[
"[email protected]"
] | |
45149d5320d27687d7ff31975d14835cd619efa7
|
5d77833445b1ef95b5ca7b9a886f98cb38a16286
|
/code/9-12 TacotronDecoderwrapper.py
|
28ddda9aacb18edb2af96dfac848ac5941305610
|
[] |
no_license
|
wangbin0227/TensorFlow_Engineering_Implementation
|
bbafa4933c3244b65f0d3a2625fd58a9f8726c34
|
cb787e359da9ac5a08d00cd2458fecb4cb5a3a31
|
refs/heads/master
| 2023-03-18T10:58:58.916184 | 2021-03-16T15:03:49 | 2021-03-16T15:03:49 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,362 |
py
|
"""
@author: 代码医生工作室
@公众号:xiangyuejiqiren (内有更多优秀文章及学习资料)
@来源: <深度学习之TensorFlow工程化项目实战>配套代码 (700+页)
@配套代码技术支持:bbs.aianaconda.com (有问必答)
"""
import tensorflow as tf
from tensorflow.python.framework import ops, tensor_shape
from tensorflow.python.ops import array_ops, check_ops, rnn_cell_impl, tensor_array_ops
from tensorflow.python.util import nest
from tensorflow.contrib.seq2seq.python.ops import attention_wrapper
attention = __import__("9-11 attention")
LocationSensitiveAttention = attention.LocationSensitiveAttention
class TacotronDecoderwrapper(tf.nn.rnn_cell.RNNCell):
#初始化
def __init__(self,encoder_outputs, is_training, rnn_cell, num_mels , outputs_per_step):
super(TacotronDecoderwrapper, self).__init__()
self._training = is_training
self._attention_mechanism = LocationSensitiveAttention(256, encoder_outputs)# [N, T_in, attention_depth=256]
self._cell = rnn_cell
self._frame_projection = tf.keras.layers.Dense(units=num_mels * outputs_per_step, name='projection_frame')# [N, T_out/r, M*r]
# # [N, T_out/r, r]
self._stop_projection = tf.keras.layers.Dense(units=outputs_per_step,name='projection_stop')
self._attention_layer_size = self._attention_mechanism.values.get_shape()[-1].value
self._output_size = num_mels * outputs_per_step#定义输出大小
def _batch_size_checks(self, batch_size, error_message):
return [check_ops.assert_equal(batch_size, self._attention_mechanism.batch_size,
message=error_message)]
@property
def output_size(self):
return self._output_size
#@property
def state_size(self):#返回的状态大小(代码参考AttentionWrapper)
return tf.contrib.seq2seq.AttentionWrapperState(
cell_state=self._cell._cell.state_size,
time=tensor_shape.TensorShape([]),
attention=self._attention_layer_size,
alignments=self._attention_mechanism.alignments_size,
alignment_history=(),#)#,
attention_state = ())
def zero_state(self, batch_size, dtype):#返回一个0状态(代码参考AttentionWrapper)
with ops.name_scope(type(self).__name__ + "ZeroState", values=[batch_size]):
cell_state = self._cell.zero_state(batch_size, dtype)
error_message = (
"When calling zero_state of TacotronDecoderCell %s: " % self._base_name +
"Non-matching batch sizes between the memory "
"(encoder output) and the requested batch size.")
with ops.control_dependencies(
self._batch_size_checks(batch_size, error_message)):
cell_state = nest.map_structure(
lambda s: array_ops.identity(s, name="checked_cell_state"),
cell_state)
return tf.contrib.seq2seq.AttentionWrapperState(
cell_state=cell_state,
time=array_ops.zeros([], dtype=tf.int32),
attention=rnn_cell_impl._zero_state_tensors(self._attention_layer_size, batch_size, dtype),
alignments=self._attention_mechanism.initial_alignments(batch_size, dtype),
alignment_history=tensor_array_ops.TensorArray(dtype=dtype, size=0,dynamic_size=True),
attention_state = tensor_array_ops.TensorArray(dtype=dtype, size=0,dynamic_size=True)
)
def __call__(self, inputs, state):#本时刻的真实输出y,decoder对上一时刻输出的状态。一起预测下一时刻
drop_rate = 0.5 if self._training else 0.0#设置dropout
#对输入预处理
with tf.variable_scope('decoder_prenet'):# [N, T_in, prenet_depths[-1]=128]
for i, size in enumerate([256, 128]):
dense = tf.keras.layers.Dense(units=size, activation=tf.nn.relu, name='dense_%d' % (i+1))(inputs)
inputs = tf.keras.layers.Dropout( rate=drop_rate, name='dropout_%d' % (i+1))(dense, training=self._training)
#加入注意力特征
rnn_input = tf.concat([inputs, state.attention], axis=-1)
#经过一个全连接变换。再传入解码器rnn中
rnn_output, next_cell_state = self._cell(tf.keras.layers.Dense(256)(rnn_input), state.cell_state)
#计算本次注意力
context_vector, alignments, cumulated_alignments =attention_wrapper._compute_attention(self._attention_mechanism,
rnn_output,state.alignments,None)#state.alignments为上一次的累计注意力
#保存历史alignment(与原始的AttentionWrapper一致)
alignment_history = state.alignment_history.write(state.time, alignments)
#返回本次的wrapper状态
next_state = tf.contrib.seq2seq.AttentionWrapperState( time=state.time + 1,
cell_state=next_cell_state,attention=context_vector,
alignments=cumulated_alignments, alignment_history=alignment_history,
attention_state = state.attention_state)
#计算本次结果:将解码器输出与注意力结果concat起来。作为最终的输入
projections_input = tf.concat([rnn_output, context_vector], axis=-1)
#两个全连接分别预测输出的下一个结果和停止标志<stop_token>
cell_outputs = self._frame_projection(projections_input)#得到下一次outputs_per_step个帧的mel特征
stop_tokens = self._stop_projection(projections_input)
if self._training==False:
stop_tokens = tf.nn.sigmoid(stop_tokens)
return (cell_outputs, stop_tokens), next_state
|
[
"[email protected]"
] | |
ba257c7a32b2ec4aa2b22fc7c7b92e305f9f957d
|
5b3caf64b77161748d0929d244798a8fb914d9c5
|
/Python Excel Examples/GeneralApiDemo/convertInRequest.py
|
b196e1d1ec4e23d1a9d95f987f3a2b8969ea75af
|
[] |
no_license
|
EiceblueCloud/Spire.Cloud.Excel
|
0d56864991eaf8d44c38f21af70db614b1d804b7
|
d9845d5cefd15a3ab408b2c9f80828a4767e2b82
|
refs/heads/master
| 2021-07-20T23:44:39.068568 | 2021-07-15T03:04:49 | 2021-07-15T03:04:49 | 230,225,396 | 1 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 556 |
py
|
import spirecloudexcel
from spirecloudexcel.configuration import Configuration as ExcelConfiguration
from spirecloudexcel.api.general_api import GeneralApi
appId = "your id"
appKey = "your key"
baseUrl = "https://api.e-iceblue.cn"
configuration = ExcelConfiguration(appId, appKey,baseUrl)
api = spirecloudexcel.api.general_api.GeneralApi(configuration)
format = "Pdf" #Supported formats: Xlsx/Xls/Xlsb/Ods/Pdf/Xps/Ps/Pcl
file = "D:/inputFile/charts.xlsx"
password = ""
result = api.convert_in_request(format,file=file, password=password)
|
[
"[email protected]"
] | |
d634e31486f5044b31ab168805511a33ded6ef6a
|
eacfc1c0b2acd991ec2cc7021664d8e79c9e58f6
|
/ccpnmr2.4/python/ccp/format/marvin/generalIO.py
|
21409931818e74a5fd154a4652c790008a1b86d2
|
[] |
no_license
|
edbrooksbank/ccpnmr2.4
|
cfecb0896dcf8978d796e6327f7e05a3f233a921
|
f279ca9bb2d972b1ce075dad5fcc16e6f4a9496c
|
refs/heads/master
| 2021-06-30T22:29:44.043951 | 2019-03-20T15:01:09 | 2019-03-20T15:01:09 | 176,757,815 | 0 | 1 | null | 2020-07-24T14:40:26 | 2019-03-20T14:59:23 |
HTML
|
UTF-8
|
Python
| false | false | 2,522 |
py
|
"""
======================COPYRIGHT/LICENSE START==========================
generalIO.py: General I/O information for marvin files
Copyright (C) 2007 Wim Vranken (European Bioinformatics Institute)
=======================================================================
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
A copy of this license can be found in ../../../../license/LGPL.license
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
======================COPYRIGHT/LICENSE END============================
for further information, please contact :
- CCPN website (http://www.ccpn.ac.uk/)
- PDBe website (http://www.ebi.ac.uk/pdbe/)
- contact Wim Vranken ([email protected])
=======================================================================
If you are using this software for academic purposes, we suggest
quoting the following references:
===========================REFERENCE START=============================
R. Fogh, J. Ionides, E. Ulrich, W. Boucher, W. Vranken, J.P. Linge, M.
Habeck, W. Rieping, T.N. Bhat, J. Westbrook, K. Henrick, G. Gilliland,
H. Berman, J. Thornton, M. Nilges, J. Markley and E. Laue (2002). The
CCPN project: An interim report on a data model for the NMR community
(Progress report). Nature Struct. Biol. 9, 416-418.
Wim F. Vranken, Wayne Boucher, Tim J. Stevens, Rasmus
H. Fogh, Anne Pajon, Miguel Llinas, Eldon L. Ulrich, John L. Markley, John
Ionides and Ernest D. Laue (2005). The CCPN Data Model for NMR Spectroscopy:
Development of a Software Pipeline. Proteins 59, 687 - 696.
===========================REFERENCE END===============================
"""
from ccp.format.general.formatIO import FormatFile
from ccp.format.general.Constants import defaultMolCode
#####################
# Class definitions #
#####################
class MarvinGenericFile(FormatFile):
def setGeneric(self):
self.format = 'marvin'
self.defaultMolCode = defaultMolCode
|
[
"[email protected]"
] | |
8429023f1b3c30a87447a7c557bf8a050b626b9e
|
f1cb02057956e12c352a8df4ad935d56cb2426d5
|
/LeetCode/245. Shortest Word Distance III/Solution.py
|
fe576e1094fd4f1abf5f1fd442f98d9271e0048c
|
[] |
no_license
|
nhatsmrt/AlgorithmPractice
|
191a6d816d98342d723e2ab740e9a7ac7beac4ac
|
f27ba208b97ed2d92b4c059848cc60f6b90ce75e
|
refs/heads/master
| 2023-06-10T18:28:45.876046 | 2023-05-26T07:46:42 | 2023-05-26T07:47:10 | 147,932,664 | 15 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 768 |
py
|
class Solution:
def shortestWordDistance(self, words: List[str], word1: str, word2: str) -> int:
index = {}
for i, word in enumerate(words):
if word not in index:
index[word] = []
index[word].append(i)
ret = 10000000000
if word1 == word2:
for i in range(len(index[word1]) - 1):
ret = min(ret, index[word1][i + 1] - index[word1][i])
return ret
occ1 = index[word1]
occ2 = index[word2]
i = 0
j = 0
while i < len(occ1) and j < len(occ2):
ret = min(ret, abs(occ1[i] - occ2[j]))
if occ1[i] < occ2[j]:
i += 1
else:
j += 1
return ret
|
[
"[email protected]"
] | |
8d9d0e317790133f034bcece449e9d1801f40422
|
f124cb2443577778d8708993c984eafbd1ae3ec3
|
/saleor/plugins/openid_connect/dataclasses.py
|
df281787eae5485c4eed4cc9fa9dc62b63f84957
|
[
"BSD-3-Clause"
] |
permissive
|
quangtynu/saleor
|
ac467193a7779fed93c80251828ac85d92d71d83
|
5b0e5206c5fd30d81438b6489d0441df51038a85
|
refs/heads/master
| 2023-03-07T19:41:20.361624 | 2022-10-20T13:19:25 | 2022-10-20T13:19:25 | 245,860,106 | 1 | 0 |
BSD-3-Clause
| 2023-03-06T05:46:25 | 2020-03-08T17:44:18 |
Python
|
UTF-8
|
Python
| false | false | 316 |
py
|
from dataclasses import dataclass
@dataclass
class OpenIDConnectConfig:
client_id: str
client_secret: str
enable_refresh_token: bool
json_web_key_set_url: str
authorization_url: str
logout_url: str
token_url: str
user_info_url: str
audience: str
use_scope_permissions: bool
|
[
"[email protected]"
] | |
48b3d55b329489d00e4124a4623d217aa24253ca
|
9b64f0f04707a3a18968fd8f8a3ace718cd597bc
|
/huaweicloud-sdk-osm/huaweicloudsdkosm/v2/model/incident_message_v2.py
|
0bef2967a5076ff962fc33551f637afbe604a4a8
|
[
"Apache-2.0"
] |
permissive
|
jaminGH/huaweicloud-sdk-python-v3
|
eeecb3fb0f3396a475995df36d17095038615fba
|
83ee0e4543c6b74eb0898079c3d8dd1c52c3e16b
|
refs/heads/master
| 2023-06-18T11:49:13.958677 | 2021-07-16T07:57:47 | 2021-07-16T07:57:47 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,941 |
py
|
# coding: utf-8
import re
import six
class IncidentMessageV2:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'type': 'int',
'replier': 'str',
'content': 'str',
'message_id': 'str',
'replier_name': 'str',
'create_time': 'datetime',
'is_first_message': 'int',
'accessory_list': 'list[SimpleAccessoryV2]'
}
attribute_map = {
'type': 'type',
'replier': 'replier',
'content': 'content',
'message_id': 'message_id',
'replier_name': 'replier_name',
'create_time': 'create_time',
'is_first_message': 'is_first_message',
'accessory_list': 'accessory_list'
}
def __init__(self, type=None, replier=None, content=None, message_id=None, replier_name=None, create_time=None, is_first_message=None, accessory_list=None):
"""IncidentMessageV2 - a model defined in huaweicloud sdk"""
self._type = None
self._replier = None
self._content = None
self._message_id = None
self._replier_name = None
self._create_time = None
self._is_first_message = None
self._accessory_list = None
self.discriminator = None
if type is not None:
self.type = type
if replier is not None:
self.replier = replier
if content is not None:
self.content = content
if message_id is not None:
self.message_id = message_id
if replier_name is not None:
self.replier_name = replier_name
if create_time is not None:
self.create_time = create_time
if is_first_message is not None:
self.is_first_message = is_first_message
if accessory_list is not None:
self.accessory_list = accessory_list
@property
def type(self):
"""Gets the type of this IncidentMessageV2.
类型,0客户留言 1华为工程师留言
:return: The type of this IncidentMessageV2.
:rtype: int
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this IncidentMessageV2.
类型,0客户留言 1华为工程师留言
:param type: The type of this IncidentMessageV2.
:type: int
"""
self._type = type
@property
def replier(self):
"""Gets the replier of this IncidentMessageV2.
回复人ID
:return: The replier of this IncidentMessageV2.
:rtype: str
"""
return self._replier
@replier.setter
def replier(self, replier):
"""Sets the replier of this IncidentMessageV2.
回复人ID
:param replier: The replier of this IncidentMessageV2.
:type: str
"""
self._replier = replier
@property
def content(self):
"""Gets the content of this IncidentMessageV2.
留言内容
:return: The content of this IncidentMessageV2.
:rtype: str
"""
return self._content
@content.setter
def content(self, content):
"""Sets the content of this IncidentMessageV2.
留言内容
:param content: The content of this IncidentMessageV2.
:type: str
"""
self._content = content
@property
def message_id(self):
"""Gets the message_id of this IncidentMessageV2.
留言id
:return: The message_id of this IncidentMessageV2.
:rtype: str
"""
return self._message_id
@message_id.setter
def message_id(self, message_id):
"""Sets the message_id of this IncidentMessageV2.
留言id
:param message_id: The message_id of this IncidentMessageV2.
:type: str
"""
self._message_id = message_id
@property
def replier_name(self):
"""Gets the replier_name of this IncidentMessageV2.
回复人名称
:return: The replier_name of this IncidentMessageV2.
:rtype: str
"""
return self._replier_name
@replier_name.setter
def replier_name(self, replier_name):
"""Sets the replier_name of this IncidentMessageV2.
回复人名称
:param replier_name: The replier_name of this IncidentMessageV2.
:type: str
"""
self._replier_name = replier_name
@property
def create_time(self):
"""Gets the create_time of this IncidentMessageV2.
创建时间
:return: The create_time of this IncidentMessageV2.
:rtype: datetime
"""
return self._create_time
@create_time.setter
def create_time(self, create_time):
"""Sets the create_time of this IncidentMessageV2.
创建时间
:param create_time: The create_time of this IncidentMessageV2.
:type: datetime
"""
self._create_time = create_time
@property
def is_first_message(self):
"""Gets the is_first_message of this IncidentMessageV2.
是否是第一条留言
:return: The is_first_message of this IncidentMessageV2.
:rtype: int
"""
return self._is_first_message
@is_first_message.setter
def is_first_message(self, is_first_message):
"""Sets the is_first_message of this IncidentMessageV2.
是否是第一条留言
:param is_first_message: The is_first_message of this IncidentMessageV2.
:type: int
"""
self._is_first_message = is_first_message
@property
def accessory_list(self):
"""Gets the accessory_list of this IncidentMessageV2.
附件列表
:return: The accessory_list of this IncidentMessageV2.
:rtype: list[SimpleAccessoryV2]
"""
return self._accessory_list
@accessory_list.setter
def accessory_list(self, accessory_list):
"""Sets the accessory_list of this IncidentMessageV2.
附件列表
:param accessory_list: The accessory_list of this IncidentMessageV2.
:type: list[SimpleAccessoryV2]
"""
self._accessory_list = accessory_list
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
import simplejson as json
return json.dumps(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, IncidentMessageV2):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"[email protected]"
] | |
8376f3ba760e0968095243d0a6947b384dd9d9c9
|
d3efc82dfa61fb82e47c82d52c838b38b076084c
|
/utils/ETF/Redemption_SA/YW_ETFSS_SZSH_019.py
|
1b0fde97d8686e9b2f5c74c8d27dca8b23258a17
|
[] |
no_license
|
nantongzyg/xtp_test
|
58ce9f328f62a3ea5904e6ed907a169ef2df9258
|
ca9ab5cee03d7a2f457a95fb0f4762013caa5f9f
|
refs/heads/master
| 2022-11-30T08:57:45.345460 | 2020-07-30T01:43:30 | 2020-07-30T01:43:30 | 280,388,441 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,493 |
py
|
#!/usr/bin/python
# -*- encoding: utf-8 -*-
import sys
import time
sys.path.append("/home/yhl2/workspace/xtp_test/ETF")
from import_common import *
sys.path.append("/home/yhl2/workspace/xtp_test/ETF/etf_service")
from ETF_GetComponentShare import etf_get_all_component_stk
class YW_ETFSS_SZSH_019(xtp_test_case):
def test_YW_ETFSS_SZSH_019(self):
# -----------ETF赎回-------------
title = '深圳ETF赎回--允许现金替代:T-1日ETF拥股量1unit→T日赎回ETF'
# 定义当前测试用例的期待值
# 期望状态:初始、未成交、部成、全成、部撤已报、部撤、已报待撤、已撤、废单、撤废、内部撤单
# xtp_ID和cancel_xtpID默认为0,不需要变动
case_goal = {
'case_ID': 'ATC-204-019',
'期望状态': '全成',
'errorID': 0,
'errorMSG': '',
'是否生成报单': '是',
'是否是撤废': '否',
'xtp_ID': 0,
'cancel_xtpID': 0,
}
logger.warning(title + ', case_ID=' + case_goal['case_ID'])
unit_info = {
'ticker': '189902', # etf代码
'etf_unit': 1, # etf赎回单位数
'etf_unit_sell': 1, # etf卖出单位数
'component_unit_sell': 1 # 成分股卖出单位数
}
# -----------ETF赎回-------------
# 参数:证券代码、市场、证券类型、证券状态、交易状态、买卖方向(B买S卖)、期望状态、Api
stkparm = QueryEtfQty(unit_info['ticker'], '2', '14', '2', '0',
'B', case_goal['期望状态'], Api)
# -----------查询ETF赎回前成分股持仓-------------
component_stk_info = etf_get_all_component_stk(unit_info['ticker'])
# 定义委托参数信息------------------------------------------
# 如果下单参数获取失败,则用例失败
if stkparm['返回结果'] is False:
rs = {
'用例测试结果': stkparm['返回结果'],
'用例错误原因': '获取下单参数失败, ' + stkparm['错误原因'],
}
etf_query_log(case_goal, rs)
self.assertEqual(rs['用例测试结果'], True)
else:
wt_reqs = {
'business_type':
Api.const.XTP_BUSINESS_TYPE['XTP_BUSINESS_TYPE_ETF'],
'market':
Api.const.XTP_MARKET_TYPE['XTP_MKT_SZ_A'],
'ticker':
stkparm['证券代码'],
'side':
Api.const.XTP_SIDE_TYPE['XTP_SIDE_REDEMPTION'],
'price_type':
Api.const.XTP_PRICE_TYPE['XTP_PRICE_LIMIT'],
'quantity':
int(unit_info['etf_unit'] * stkparm['最小申赎单位']),
}
EtfParmIni(Api, case_goal['期望状态'], wt_reqs['price_type'])
CaseParmInsertMysql(case_goal, wt_reqs)
rs = etfServiceTest(Api, case_goal, wt_reqs, component_stk_info)
etf_creation_log(case_goal, rs)
# --------二级市场,卖出etf-----------
case_goal['期望状态'] = '废单'
case_goal['errorID'] = 11010121
case_goal['errorMSG'] = 'Failed to check security quantity.'
quantity = int(unit_info['etf_unit_sell'] *
stkparm['最小申赎单位']) # 二级市场卖出的etf数量
quantity_list = split_etf_quantity(quantity)
# 查询涨停价
limitup_px = getUpPrice(stkparm['证券代码'])
rs = {}
for etf_quantity in quantity_list:
wt_reqs_etf = {
'business_type':
Api.const.XTP_BUSINESS_TYPE['XTP_BUSINESS_TYPE_CASH'],
'market':
Api.const.XTP_MARKET_TYPE['XTP_MKT_SZ_A'],
'ticker':
stkparm['证券代码'],
'side':
Api.const.XTP_SIDE_TYPE['XTP_SIDE_SELL'],
'price_type':
Api.const.XTP_PRICE_TYPE['XTP_PRICE_BEST5_OR_CANCEL'],
'price':
limitup_px,
'quantity':
etf_quantity,
}
ParmIni(Api, case_goal['期望状态'], wt_reqs['price_type'])
CaseParmInsertMysql(case_goal, wt_reqs)
rs = serviceTest(Api, case_goal, wt_reqs_etf)
if rs['用例测试结果'] is False:
etf_sell_log(case_goal, rs)
self.assertEqual(rs['用例测试结果'], True)
return
etf_sell_log(case_goal, rs)
# ------------二级市场卖出成份股-----------
case_goal['期望状态'] = '全成'
case_goal['errorID'] = 0
case_goal['errorMSG'] = ''
etf_component_info = QueryEtfComponentsInfoDB(stkparm['证券代码'],wt_reqs['market'])
rs = {}
for stk_info in etf_component_info:
stk_code = stk_info[0]
components_share = QueryEtfComponentsDB(stkparm['证券代码'],
stk_code)
components_total = int(
components_share * unit_info['component_unit_sell'])
quantity = get_valid_amount(components_total)
limitup_px = getUpPrice(stk_code)
wt_reqs = {
'business_type':
Api.const.XTP_BUSINESS_TYPE['XTP_BUSINESS_TYPE_CASH'],
'market':
Api.const.XTP_MARKET_TYPE['XTP_MKT_SZ_A'],
'ticker':
stk_code,
'side':
Api.const.XTP_SIDE_TYPE['XTP_SIDE_SELL'],
'price_type':
Api.const.XTP_PRICE_TYPE['XTP_PRICE_BEST5_OR_CANCEL'],
'price':
limitup_px,
'quantity':
quantity,
}
ParmIni(Api, case_goal['期望状态'], wt_reqs['price_type'])
rs = serviceTest(Api, case_goal, wt_reqs)
if rs['用例测试结果'] is False:
etf_components_sell_log(case_goal, rs)
self.assertEqual(rs['用例测试结果'], True)
etf_components_sell_log(case_goal, rs)
self.assertEqual(rs['用例测试结果'], True)
if __name__ == '__main__':
unittest.main()
|
[
"[email protected]"
] | |
68a3e17117ffc29cf8def3bcc4810417498b7ef9
|
297c440536f04c5ff4be716b445ea28cf007c930
|
/App/migrations/0007_auto_20200403_2201.py
|
2c78246df112461b79785ba55ec3ca0a977b1975
|
[] |
no_license
|
Chukslord1/SchoolManagement
|
446ab8c643035c57d7320f48905ef471ab3e0252
|
23fd179c0078d863675b376a02193d7c1f3c52e0
|
refs/heads/master
| 2023-02-03T09:14:24.036840 | 2020-12-14T11:06:43 | 2020-12-14T11:06:43 | 247,177,159 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 412 |
py
|
# Generated by Django 3.0 on 2020-04-04 05:01
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('App', '0006_auto_20200403_1841'),
]
operations = [
migrations.AlterField(
model_name='userprofile',
name='secret_pin',
field=models.CharField(blank=True, max_length=12, null=True),
),
]
|
[
"[email protected]"
] | |
7c344fcf6b9f60cc30778e1ef5ef3f5afc6f3ea0
|
ba22f289ad1c49fb286105aeaa9abd8548907dc5
|
/tempest/tests/lib/test_tempest_lib.py
|
d70e53dee8a7a2d9d91a0a5a5f89d4b72c3be367
|
[
"Apache-2.0"
] |
permissive
|
ssameerr/tempest
|
cf3f41b3aa07124a1bac69c3c3f2e393b52e671c
|
e413f28661c2aab3f8da8d005db1fa5c59cc6b68
|
refs/heads/master
| 2023-08-08T05:00:45.998493 | 2016-06-08T13:13:48 | 2016-06-08T13:13:48 | 60,715,004 | 0 | 0 |
Apache-2.0
| 2023-02-15T02:18:34 | 2016-06-08T17:02:15 |
Python
|
UTF-8
|
Python
| false | false | 780 |
py
|
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
test_tempest.lib
----------------------------------
Tests for `tempest.lib` module.
"""
from tempest.tests import base
class TestTempest_lib(base.TestCase):
def test_something(self):
pass
|
[
"[email protected]"
] | |
c49cb629b81dd8bab875ff2f9d3dbd0a5ce2d44e
|
fea2eff6ed6ff05879e071d52d978b1f2f322f31
|
/TensorFlow深度学习应用实践_源代码/08/8-1.py
|
e433dd823a9c612a94b2b30fa227c819242e8df1
|
[] |
no_license
|
GetMyPower/mypython
|
71ec8db85c82e33b893c5d53ac64a007951fd8f0
|
1846148e327e7d14ebb96c9fea4b47aa61762a69
|
refs/heads/master
| 2022-03-22T08:11:56.113905 | 2019-12-20T15:00:23 | 2019-12-20T15:00:23 | 198,230,237 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 183 |
py
|
import tensorflow as tf
input1 = tf.constant(1)
print(input1)
input2 = tf.Variable(2,tf.int32)
print(input2)
input2 = input1
sess = tf.Session()
print(sess.run(input2))
|
[
"[email protected]"
] | |
8e0cd4727a216f881c84d55625a70efbdcadb46d
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_287/ch150_2020_04_13_20_45_30_391205.py
|
d44700ac9eb31e56af69bcc4d9db551fc97ab291
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 234 |
py
|
import math
def calcula_pi(n):
if n == 1:
p = (6**(1/2))
return p
p = 0
valores = list(range(n))
valores.remove(0)
for a in (valores):
p += (6/(a**2))
p = (p**(1/2))
return p
|
[
"[email protected]"
] | |
d78a7f97e2bbf295b699f32b08fc0480aa10688a
|
67ae1b00411ad63726e0abb07ba82ac5b75fc32a
|
/findmusician/wsgi.py
|
e3e754c495255bd2fa5792053e4e437c249d3059
|
[] |
no_license
|
SimonKorzonek/findmusician
|
e40429bf45115de0709ef6fe92ace3c5cd195660
|
fc23e0d6b5da7d98423accef5eb82b9b6c5516bc
|
refs/heads/main
| 2023-02-15T10:12:02.070458 | 2021-01-05T23:02:05 | 2021-01-05T23:02:05 | 327,074,301 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 401 |
py
|
"""
WSGI config for findmusician project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'findmusician.settings')
application = get_wsgi_application()
|
[
"[email protected]"
] | |
c4463b466523f98a0389beff01c3891c2fefadb3
|
e23a4f57ce5474d468258e5e63b9e23fb6011188
|
/125_algorithms/_exercises/templates/_algorithms_challenges/leetcode/LeetcodePythonProject_with_solution/leetcode_0751_0800/LeetCode792_NumberOfMatchingSubsequences.py
|
ce718c54dd28669b15ae5ae32138582fbd1dc330
|
[] |
no_license
|
syurskyi/Python_Topics
|
52851ecce000cb751a3b986408efe32f0b4c0835
|
be331826b490b73f0a176e6abed86ef68ff2dd2b
|
refs/heads/master
| 2023-06-08T19:29:16.214395 | 2023-05-29T17:09:11 | 2023-05-29T17:09:11 | 220,583,118 | 3 | 2 | null | 2023-02-16T03:08:10 | 2019-11-09T02:58:47 |
Python
|
UTF-8
|
Python
| false | false | 1,018 |
py
|
'''
Created on Apr 16, 2018
@author: tongq
'''
c_ Solution(o..
___ numMatchingSubseq S, words
"""
:type S: str
:type words: List[str]
:rtype: int
"""
hashmap # dict
___ i __ r..(26
c chr(o..('a')+i)
hashmap[c] # list
___ word __ words:
hashmap[word[0]].a..(word)
count 0
___ c __ S:
d.. hashmap[c]
size l..(d..)
___ i __ r..(size
word d...p.. 0)
__ l.. ? __ 1:
count += 1
____
hashmap[word[1]].a..(word[1:])
r.. count
___ test
testCases [
[
'abcde',
["a", "bb", "acd", "ace"],
],
]
___ s, words __ testCases:
result numMatchingSubseq(s, words)
print('result: %s' % result)
print('-='*30+'-')
__ _____ __ _____
Solution().test()
|
[
"[email protected]"
] | |
f8e7c4835096c2301aac6f202b1a28fee2bab730
|
4c984a318ccf26e765f902669399da66497e194d
|
/pollexe/urls.py
|
5ed934d9e6f6c6c24682d62a19f5786bdf6c0416
|
[] |
no_license
|
sajalmia381/pollexe
|
914af663bad6becb4308c738a16240028f37f99b
|
3ead47fee43855aba1ee0f4c2b3f222cac6a9a68
|
refs/heads/master
| 2020-04-21T12:42:49.283843 | 2019-02-07T13:43:40 | 2019-02-07T13:43:40 | 169,572,196 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 728 |
py
|
from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('', include('account.urls', namespace='account')),
path('', include('page.urls', namespace='page')),
path('', include('blog.urls', namespace='blog')),
path('', include('product.urls', namespace='product')),
# admin
path('admin/', admin.site.urls),
# Third party
path('summernote/', include('django_summernote.urls')),
path('front-edit/', include('front.urls')),
]
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
[
"[email protected]"
] | |
3ab3e27fb739a45761ef77b83f03b45a6dab15f9
|
b00efc53bec9b05f91703db81387325fae0a771e
|
/MAI/olymp/17.02.05/a.py
|
364918dd1e8451ebddaa61670614cbf7012cf250
|
[] |
no_license
|
21zaber/MAI
|
ac88eb1dd4b8f6b9d184527a3b1824a05993a9e1
|
45f25bdd5996329fd05f3e0ec7eb1289443f17b5
|
refs/heads/master
| 2021-01-17T07:12:22.303754 | 2018-02-08T15:05:30 | 2018-02-08T15:05:30 | 54,101,933 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 832 |
py
|
n = int(input())
q = n+3
t = [[[[0 for i in range(q)] for i in range(q)] for i in range(q)] for i in range(n+1)]
# len vs max last
t[1][0][0][0] = 1
for l in range(1, n):
for vs in range(l+1):
for mx in range(l):
for lst in range(mx+1):
c = 0
if t[l][vs][mx][lst] == 0:
continue
for i in range(vs+2):
if i <= lst:
t[l+1][vs][mx][i] += t[l][vs][mx][lst]
c+=1
elif i >= mx:
t[l+1][vs+1][i][i] += t[l][vs][mx][lst]
c+=1
#print('l: {}, vs: {}, m: {}, lst: {}, c: {}'.format(l, vs, mx, lst, c))
ans = 0
for i in t[-1]:
for j in i:
for k in j:
ans += k
print(ans)
|
[
"[email protected]"
] | |
1ecf217ac3f73bc4e4f65a2a705ed8b490973479
|
155b6c640dc427590737750fe39542a31eda2aa4
|
/api-test/easyloan/testAction/loginAction.py
|
1ffce9a91563013b011f796bed0bf0a925d88370
|
[] |
no_license
|
RomySaber/api-test
|
d4b3add00e7e5ed70a5c72bb38dc010f67bbd981
|
028c9f7fe0d321db2af7f1cb936c403194db850c
|
refs/heads/master
| 2022-10-09T18:42:43.352325 | 2020-06-11T07:00:04 | 2020-06-11T07:00:04 | 271,468,744 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,582 |
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time :2019-04-23 下午 2:33
@Author : 罗林
@File : loginAction.py
@desc :
"""
import requests
import json
from common.myConfig import ConfigUtils as conf
from common.myCommon.Logger import getlog
from common.myConfig import MysqlConfig as ms
from common.mydb import MysqlClent
User = 'fqhd001'
Passwd = '5e81f67ed14a5443ec6a3682513f0b9b'
mobile = '13699479886'
app_passwd = 'll123456'
DB = MysqlClent.get_conn('192.168.15.159', 3306, 'easyloan', 'easyloan', '78dk.com')
web_URL = ms.get('easyloan_web_apiURL')
app_URL = ms.get('easyloan_app_apiURL')
LOGGER = getlog(__name__)
API_TEST_HEADERS = {"Content-Type": "application/json"}
rq = requests.Session()
sign = conf.get('report', 'sign')
def test_easyloan_web_login():
url = web_URL + '/api/78dk/web/login'
querystring = json.dumps({"username": User, "password": Passwd})
response = rq.post(url, headers=API_TEST_HEADERS, data=querystring)
LOGGER.info("token:【{}】".format(response.json()["data"]["token"]))
return response.json()["data"]["token"]
def test_easyloan_app_login():
url = app_URL + '/api/78dk/clientapp/login/pwLogin'
querystring = json.dumps({"mobile": mobile, "password": app_passwd})
response = rq.post(url, headers=API_TEST_HEADERS, data=querystring)
LOGGER.info("token:【{}】".format(response.json()["data"]["token"]))
LOGGER.info(response.text)
return response.json()["data"]["token"]
def test_yygl_login():
pass
if __name__ == '__main__':
test_easyloan_app_login()
|
[
"[email protected]"
] | |
4852b83b2264cd75b2dfc36bc578fc47b1f9e399
|
cf5efed6bc1e9bd27f94663d2443c6bdd1cb472a
|
/1-pack_web_static.py
|
1688b66bfe9161d9b0827db23d9332f8638567fd
|
[] |
no_license
|
yulyzulu/AirBnB_clone_v2
|
593db702ede02ac17b6883b3e99b6e1eb36a33ee
|
1a40aec60996dc98ad9ff45f5e1224816ff6735b
|
refs/heads/master
| 2021-05-25T15:33:22.100621 | 2020-04-23T23:23:25 | 2020-04-23T23:23:25 | 253,810,650 | 0 | 0 | null | 2020-04-07T14:02:36 | 2020-04-07T14:02:35 | null |
UTF-8
|
Python
| false | false | 682 |
py
|
#!/usr/bin/python3
"""Module that execute functions"""
from fabric.api import local
from fabric.decorators import runs_once
from datetime import datetime
from os.path import getsize
@runs_once
def do_pack():
local("mkdir -p versions")
date_time = datetime.now().strftime("%Y%m%d%H%M%S")
command = local("tar -cvzf versions/web_static_{}.tgz ./web_stat\
ic".format(date_time))
if command.succeeded:
size = getsize('versions/web_static_{}.tgz'.format(date_time))
print("web_static packed: versions/web_static_{}.tgz -> {}Byt\
es".format(date_time, size))
return ('versions/web_static_{}.tgz'.format(date_time))
else:
return None
|
[
"[email protected]"
] | |
6f92dc3b1e46aec56a6ea497917e884f922966d1
|
a23ec1e8470f87d1b3fa34b01506d6bdd63f6569
|
/algorithms/967. Numbers With Same Consecutive Differences.py
|
3750ace278b12334a762bdf37e95b48783d3f618
|
[] |
no_license
|
xiaohai0520/Algorithm
|
ae41d2137e085a30b2ac1034b8ea00e6c9de3ef1
|
96945ffadd893c1be60c3bde70e1f1cd51edd834
|
refs/heads/master
| 2023-04-14T17:41:21.918167 | 2021-04-20T13:57:09 | 2021-04-20T13:57:09 | 156,438,761 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 819 |
py
|
Dfs problem.
each time we add one digit, we make sure it satifiy the condition.
Code:
class Solution:
def numsSameConsecDiff(self, N, K):
"""
:type N: int
:type K: int
:rtype: List[int]
"""
if N == 1:
return [i for i in range(10)]
if K == 0:
return list(map(int,[str(i)*N for i in range(1,10)]))
res = []
def dfs(path,l):
if l == N:
res.append(path)
return
cur = path % 10
if cur + K < 10:
dfs(path * 10 + cur + K, l + 1)
if cur - K >= 0:
dfs(path * 10 + cur - K, l + 1)
for i in range(1,10):
dfs(i,1)
return res
|
[
"[email protected]"
] | |
fd340134c630935c8dff1c7e83d8d2b1a4bd61dc
|
fcdfe976c9ed60b18def889692a17dc18a8dd6d7
|
/python/qt/close_dialog.py
|
732b8043635aa9a35802bd6867ad50d908c18473
|
[] |
no_license
|
akihikoy/ay_test
|
4907470889c9bda11cdc84e8231ef3156fda8bd7
|
a24dfb720960bfedb94be3b4d147e37616e7f39a
|
refs/heads/master
| 2023-09-02T19:24:47.832392 | 2023-08-27T06:45:20 | 2023-08-27T06:45:20 | 181,903,332 | 6 | 3 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,098 |
py
|
#!/usr/bin/python
#\file close_dialog.py
#\brief certain python script
#\author Akihiko Yamaguchi, [email protected]
#\version 0.1
#\date Apr.01, 2017
# http://stackoverflow.com/questions/14834494/pyqt-clicking-x-doesnt-trigger-closeevent
import sys
from PyQt4 import QtGui, QtCore, uic
class MainWindow(QtGui.QWidget):
def __init__(self):
QtGui.QWidget.__init__(self)
# Set window size.
self.resize(320, 240)
# Set window title
self.setWindowTitle("Hello World!")
# Add a button
btn= QtGui.QPushButton('Hello World!', self)
btn.setToolTip('Click to quit!')
btn.clicked.connect(self.close)
btn.resize(btn.sizeHint())
btn.move(100, 80)
def closeEvent(self, event):
print("event")
reply = QtGui.QMessageBox.question(self, 'Message',
"Are you sure to quit?", QtGui.QMessageBox.Yes, QtGui.QMessageBox.No)
if reply == QtGui.QMessageBox.Yes:
event.accept()
else:
event.ignore()
if __name__ == "__main__":
app = QtGui.QApplication(sys.argv)
win = MainWindow()
win.show()
sys.exit(app.exec_())
|
[
"[email protected]"
] | |
2e4c319c80a704585fbab79e4c5ae8329e38f201
|
ddc7e22952de6298d14b9297e765db29f327cfcb
|
/BFS/medium/minKnightMoves.py
|
ec82adee4ecf9c80d54548712c8789fa3cbcdfdb
|
[
"MIT"
] |
permissive
|
linminhtoo/algorithms
|
154a557b4acada2618aac09a8868db9f3722204f
|
884422a7c9f531e7ccaae03ba1ccbd6966b23dd3
|
refs/heads/master
| 2023-03-21T23:01:58.386497 | 2021-03-16T07:13:32 | 2021-03-16T07:13:32 | 296,247,654 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,461 |
py
|
# leetcode is premium problem
# see https://www.geeksforgeeks.org/minimum-steps-reach-target-knight/
# https://www.hackerrank.com/challenges/knightl-on-chessboard/problem <-- slightly harder version of the same problem (SCROLL DOWN)
# BFS
class Cell: # don't have to use this, can just use a tuple also (x, y, dist)
def __init__(self, x: int, y: int, dist: int):
self.x = x
self.y = y
self.dist = dist
from typing import Tuple
from collections import deque
class Solution:
def inBoard(self, x: int, y: int) -> bool:
return (0 <= x < 8) and (0 <= y < 8)
def minKnightMoves(self, knight_pos: Tuple[int, int],
target_pos: Tuple[int, int]) -> int:
dirs = [
(1, 2),
(2, 1),
(-1, -2),
(-2, -1),
(-1, 2),
(2, -1),
(1, -2),
(-2, 1)
]
queue = deque()
queue.append(Cell(knight_pos[0], knight_pos[1], 0))
visited = [[False] * 8 for _ in range(8)]
visited[knight_pos[0]][knight_pos[1]] = True
while queue:
now = queue.popleft()
if (now.x, now.y) == target_pos:
return now.dist
for i in range(8):
next_x = now.x + dirs[i][0]
next_y = now.y + dirs[i][1]
if self.inBoard(next_x, next_y):
if not visited[next_x][next_y]:
visited[next_x][next_y] = True
queue.append(Cell(next_x, next_y, now.dist + 1))
# https://www.hackerrank.com/challenges/knightl-on-chessboard/problem
class Solution_hackerrank_mine_passall:
def knightlOnAChessboard(self, n: int):
out = [[0]*(n-1) for _ in range(n-1)]
for i in range(1, n):
for j in range(1, n):
if out[j-1][i-1] != 0: # output array is symmetric
out[i-1][j-1] = out[j-1][i-1]
else:
out[i-1][j-1] = makeMove(n, i, j)
return out
@staticmethod
def inBoard(n: int, x: int, y: int) -> bool:
return (0 <= x < n) and (0 <= y < n)
@staticmethod
def makeMove(n: int, a: int, b: int) -> int:
dirs = [
(a, b),
(b, a),
(-a, b),
(b, -a),
(a, -b),
(-b, a),
(-a, -b),
(-b, -a)
]
queue = deque()
queue.append(Cell(0, 0, 0))
visited = [[False] * n for _ in range(n)]
visited[0][0] = True
while queue:
now = queue.popleft()
if (now.x, now.y) == (n-1, n-1):
return now.dist
for i in range(8):
next_x = now.x + dirs[i][0]
next_y = now.y + dirs[i][1]
if inBoard(n, next_x, next_y):
# exploit symmetry of chess board (start from topleft, end at bottomright)
# ONLY works in this special problem! (not for the generic leetcode problem above)
# offers small speedup
if visited[next_y][next_x]:
visited[next_x][next_y] = True
if not visited[next_x][next_y]:
visited[next_x][next_y] = True
queue.append(Cell(next_x, next_y, now.dist + 1))
return -1
|
[
"[email protected]"
] | |
a9999691c3e277bd3c41bb28c97ea2216afad0fb
|
508cd804441ce076b318df056153870d2fe52e1b
|
/sphere.py
|
e43689710948ecd61748515c08b01fe57e116aba
|
[] |
no_license
|
archibate/taichi_works
|
ffe80e6df27b7bcb3ce1c4b24e23ceeb0ac4ff8a
|
9aaae1de9fe53740030c6e24a0a57fc39d71dd71
|
refs/heads/master
| 2022-11-18T19:07:37.122093 | 2020-07-17T08:45:36 | 2020-07-17T08:45:36 | 276,714,718 | 5 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,284 |
py
|
import taichi as ti
import taichi_glsl as tl
import random, math
ti.init()#kernel_profiler=True)
dt = 0.01
kMaxParticles = 1024
kResolution = 512
kKernelSize = 16 / 512
kKernelFactor = 0.5 / kKernelSize**2
kGravity = tl.vec(0.0, -0.0)
kUseImage = False
kBackgroundColor = 0x112f41
kParticleColor = 0x068587
kBoundaryColor = 0xebaca2
kParticleDisplaySize = 0.2 * kKernelSize * kResolution
particle_pos = ti.Vector(2, ti.f32, kMaxParticles)
particle_vel = ti.Vector(2, ti.f32, kMaxParticles)
property_vel = ti.Vector(2, ti.f32, kMaxParticles)
property_density = ti.var(ti.f32, kMaxParticles)
property_force = ti.Vector(2, ti.f32, kMaxParticles)
n_particles = ti.var(ti.i32, ())
if kUseImage:
image = ti.Vector(3, ti.f32, (kResolution, kResolution))
@ti.func
def smooth(distance):
ret = 0.0
r2 = distance.norm_sqr()
if r2 < kKernelSize**2:
ret = ti.exp(-r2 * kKernelFactor)
return ret
@ti.func
def grad_smooth(distance):
ret = tl.vec2(0.0)
r2 = distance.norm_sqr()
if r2 < kKernelSize**2:
ret = (-2 * kKernelFactor) * distance * ti.exp(-r2 * kKernelFactor)
return ret
@ti.func
def alloc_particle():
ret = ti.atomic_add(n_particles[None], 1)
assert ret < kMaxParticles
return ret
@ti.kernel
def add_particle_at(mx: ti.f32, my: ti.f32, vx: ti.f32, vy: ti.f32):
id = alloc_particle()
particle_pos[id] = tl.vec(mx, my)
particle_vel[id] = tl.vec(vx, vy)
@ti.func
def preupdate(rho, rho_0=1000, gamma=7.0, c_0=20.0):
b = rho_0 * c_0**2 / gamma
return b * ((rho / rho_0) ** gamma - 1.0)
@ti.func
def update_property():
for i in range(n_particles[None]):
my_pos = particle_pos[i]
property_vel[i] = particle_vel[i]
property_density[i] = 1.0
for j in range(n_particles[None]):
w = smooth(my_pos - particle_pos[j])
property_vel[i] += w * particle_vel[j]
property_density[i] += w
property_vel[i] /= property_density[i]
for i in range(n_particles[None]):
my_pos = particle_pos[i]
property_force[i] = tl.vec2(0.0)
for j in range(n_particles[None]):
dw = grad_smooth(my_pos - particle_pos[j])
ds = particle_pos[j] - particle_pos[i]
dv = particle_vel[j] - particle_vel[i]
force = dw * property_density[j] * dv.dot(ds)
property_force[i] += force
@ti.kernel
def substep():
update_property()
for i in range(n_particles[None]):
gravity = (0.5 - particle_pos[i]) * 2.0
particle_vel[i] += gravity * dt
particle_vel[i] += property_force[i] * dt
particle_vel[i] = tl.boundReflect(particle_pos[i], particle_vel[i],
kKernelSize, 1 - kKernelSize, 0)
particle_pos[i] += particle_vel[i] * dt
particle_pressure[i] = preupdate(particle_density)
@ti.kernel
def update_image():
for i in ti.grouped(image):
image[i] = tl.vec3(0)
for i in range(n_particles[None]):
pos = particle_pos[i]
A = ti.floor(max(0, pos - kKernelSize)) * kResolution
B = ti.ceil(min(1, pos + kKernelSize + 1)) * kResolution
for pix in ti.grouped(ti.ndrange((A.x, B.x), (A.y, B.y))):
pix_pos = pix / kResolution
w = smooth(pix_pos - particle_pos[i])
image[pix].x += w
last_mouse = tl.vec2(0.0)
gui = ti.GUI('WCSPH', kResolution, background_color=kBackgroundColor)
while gui.running:
for e in gui.get_events():
if e.key == gui.ESCAPE:
gui.running = False
elif e.key == gui.LMB:
if e.type == gui.PRESS:
last_mouse = tl.vec(*gui.get_cursor_pos())
else:
mouse = tl.vec(*gui.get_cursor_pos())
diff = (mouse - last_mouse) * 2.0
add_particle_at(mouse.x, mouse.y, diff.x, diff.y)
elif e.key == 'r':
a = random.random() * math.tau
add_particle_at(math.cos(a) * 0.4 + 0.5, math.sin(a) * 0.4 + 0.5, 0, 0)
substep()
if kUseImage:
update_image()
gui.set_image(image)
else:
gui.circles(particle_pos.to_numpy()[:n_particles[None]],
radius=kParticleDisplaySize, color=kParticleColor)
gui.show()
|
[
"[email protected]"
] | |
50be32c063b21f51fb59e29080e17d63f03faeea
|
77c2010bb9533ecbdfa46cd41c16ee5ae26e94fa
|
/library/migrations/0001_initial.py
|
d100e69ebfc03b3f1d153433b33548151de3b8ec
|
[] |
no_license
|
dimansion/portfolio-django
|
b2cbb28dff97dd03cdf795f0bc661d39bcfae83d
|
2dffe0e8579b2a426cb7aceb1ee085933b122d90
|
refs/heads/master
| 2020-05-23T08:15:38.205372 | 2017-03-05T14:44:14 | 2017-03-05T14:44:14 | 70,251,368 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,605 |
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-10-09 06:20
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Book',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(blank=True, null=True, upload_to='')),
('author', models.CharField(max_length=128, unique=True)),
('title', models.CharField(max_length=200)),
('synopsis', models.TextField()),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('slug', models.SlugField()),
],
),
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=128, unique=True)),
('slug', models.SlugField()),
],
),
migrations.AddField(
model_name='book',
name='category',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='library.Category'),
),
]
|
[
"[email protected]"
] | |
ee3747640d2d81beb67e38eb7bf9195041503fd6
|
51bdac517ec342a7a38a67e2b3c521f8bd53c5f2
|
/numba/tests/pointers/test_null.py
|
fa46e26b67aa41253b5f4b2b6e874e710d7a3aaf
|
[
"BSD-2-Clause"
] |
permissive
|
cu6yu4/numba
|
66bc7ee751fdfaabab92b6f571dbff00cb4d7652
|
f64aced5a7c94a434fd2d8c678d93ff8ac3ae1fb
|
refs/heads/master
| 2020-12-25T13:45:44.629782 | 2013-01-25T20:28:12 | 2013-01-25T20:28:12 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 548 |
py
|
import ctypes
import numba
from numba import *
#intp = ctypes.POINTER(ctypes.c_int)
#voidp = ctypes.c_void_p
intp = int_.pointer()
voidp = void.pointer()
@autojit
def test_compare_null():
"""
>>> test_compare_null()
True
"""
return intp(Py_uintptr_t(0)) == NULL
@autojit
def test_compare_null_attribute():
"""
>>> test_compare_null_attribute()
True
"""
return voidp(Py_uintptr_t(0)) == numba.NULL
if __name__ == '__main__':
# test_compare_null()
# test_compare_null_attribute()
numba.testmod()
|
[
"[email protected]"
] | |
2cffed30653acf460e4754cf7749eaf6a5e2e45b
|
cc0cc5268223f9c80339d1bbc2e499edc828e904
|
/wallets/thrifty_wallets/manage.py
|
e4a50db512daef9656866ea7fe7ac714993b463d
|
[] |
no_license
|
deone/thrifty
|
0ba2b0445e7e9fd4cc378350de158dc6c89838b4
|
a0ee4af9447b2765f4139deb87a3c1464e7c7751
|
refs/heads/master
| 2021-01-10T12:00:00.618968 | 2015-11-01T23:36:36 | 2015-11-01T23:36:36 | 45,340,007 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 258 |
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "thrifty_wallets.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
[
"[email protected]"
] | |
1b340ebd2248f63c39e2921394126e7da83f5247
|
8a46f370477ea9fabd36249a4f6d70226917c24b
|
/blogdown/plugin.py
|
8ad745eddeda8d95b2c65f40734315b3b18705c3
|
[
"BSD-3-Clause"
] |
permissive
|
blogdown/blogdown
|
af551991013d03e3b7b033cf45687f952eb41def
|
4a463d341a1fe7547a3de33f03d356e74a89569e
|
refs/heads/master
| 2022-06-09T11:53:03.728491 | 2022-05-17T19:26:54 | 2022-05-17T19:28:16 | 5,064,814 | 5 | 4 | null | 2016-03-14T02:44:58 | 2012-07-16T08:30:38 |
Python
|
UTF-8
|
Python
| false | false | 2,147 |
py
|
# -*- coding: utf-8 -*-
"""
blogdown.plugin
~~~~~~~~~~~~~~~
Utilities for a simple plugin system.
:copyright: (c) 2015 by Thomas Gläßle
:license: BSD, see LICENSE for more details.
"""
import os
from importlib import import_module
from pkg_resources import iter_entry_points
from runpy import run_path
__all__ = [
"EntryPointLoader",
"PathLoader",
"PackageLoader",
"ChainLoader",
]
class EntryPointLoader:
"""Load plugins from specified entrypoint group."""
def __init__(self, ep_group):
self.ep_group = ep_group
def __call__(self, name):
for ep in iter_entry_points(self.ep_group, name):
yield ep.load()
class PathLoader:
"""Load plugins from specified folder."""
def __init__(self, search_path):
self.search_path = os.path.abspath(search_path)
def __call__(self, name):
module_path = os.path.join(self.search_path, name + ".py")
if not os.path.isfile(module_path):
return
module = run_path(module_path)
try:
yield module["setup"]
except KeyError:
raise AttributeError(
"Module at {0!r} can't be used as a plugin, "
"since it has no 'setup' function.".format(module_path)
)
class PackageLoader:
"""Load plugins from specified package."""
def __init__(self, package_name):
self.package_name = package_name
def __call__(self, module_name):
try:
module = import_module(self.package_name + "." + module_name)
except ImportError:
return
try:
yield module.setup
except AttributeError:
raise AttributeError(
"{0!r} can't be used as a plugin, "
"since it has no 'setup' function.".format(module)
)
class ChainLoader:
"""Load plugins from all of the sub-loaders."""
def __init__(self, loaders):
self.loaders = loaders
def __call__(self, name):
for loader in self.loaders:
for plugin in loader(name):
yield plugin
|
[
"[email protected]"
] | |
c2b459c2282096b0821f5cafcca9b1d79861dd95
|
9619daf132259c31b31c9e23a15baa675ebc50c3
|
/memphis.users/memphis/users/registration.py
|
42d0886c2b83d4155d69ef9eca86b59d3b64b673
|
[] |
no_license
|
fafhrd91/memphis-dev
|
ade93c427c1efc374e0e1266382faed2f8e7cd89
|
c82aac1ad3a180ff93370b429498dbb1c2e655b8
|
refs/heads/master
| 2016-09-05T19:32:35.109441 | 2011-08-22T06:30:43 | 2011-08-22T06:30:43 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 353 |
py
|
from zope import interface
from memphis import controlpanel
from interfaces import _, ISiteRegistration
class SiteRegistration(object):
interface.implements(ISiteRegistration)
controlpanel.registerConfiglet(
'principals.registration', ISiteRegistration, SiteRegistration,
_("Site registration"), _("Site registration configuration."))
|
[
"[email protected]"
] | |
82d49a9ea24a6ef56776243ff4a21c12b816e9f6
|
eab72229ae04d1160704cbf90a08a582802a739c
|
/put_zero_den.py
|
34666a2ec393a250b458da9b91999832b8c281fe
|
[
"MIT"
] |
permissive
|
megatazm/Crowd-Counting
|
444d39b0e3d6e98995f53badf4c073829038b6b7
|
647a055baccee2c3b6b780f38930e2ffd14d1664
|
refs/heads/master
| 2022-04-01T04:49:16.409675 | 2020-01-31T21:24:02 | 2020-01-31T21:24:02 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 471 |
py
|
import cv2
import numpy as np
import os
import glob
from paint_den import paint
import params
path = params.input
dirs = [f for f in glob.glob(path + '/*/')]
images = []
for x in dirs:
images.append([f for f in glob.glob(x + '/*_pos.png')])
images.sort()
images = [item for sublist in images for item in sublist]
for img_path in images:
#paint(img_path, 36, 1785, 393, 75, 567, 60, 951, 1776)
paint(img_path, 0, 3234, 737, 198, 1034, 220, 1617, 3228)
|
[
"[email protected]"
] | |
5ef53c9e1394c1d2f92962a9f34217c5c9134413
|
11841e8fb1e44c69ae7e50c0b85b324c4d90abda
|
/chutu/exmapxx.py
|
5a8c550eb45031c938a4fb4f4a1d660bcf2fed3d
|
[] |
no_license
|
chenlong2019/python
|
1d7bf6fb60229221c79538234ad2f1a91bb03c50
|
fc9e239754c5715a67cb6d743109800b64d74dc8
|
refs/heads/master
| 2020-12-08T11:11:49.951752 | 2020-01-10T04:58:29 | 2020-01-10T04:59:50 | 232,968,232 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,386 |
py
|
# coding=utf-8
import arcpy
import os,glob,time
res=200
# 模板mxd文档路径,生成mxd文档路径
def createMxd(modelpath,mxdpath,symbologyLayer,jpgpath,string,lyrfile):
mxd=arcpy.mapping.MapDocument(modelpath)
if(os.path.exists(mxdpath)):
mxd=arcpy.mapping.MapDocument(mxdpath)
print("location as "+mxdpath)
arcpy.AddWarning("该文件已经存在")
else:
mxd.saveACopy(mxdpath)
print(mxdpath+" saveAs succefssful")
if(os.path.exists(mxdpath)):
mxd=arcpy.mapping.MapDocument(mxdpath)
print("location in "+mxdpath)
# 查找数据框
df = arcpy.mapping.ListDataFrames(mxd, "*")[0]
# 增加底图
#symbologyLayer = "D:\\cs\\model\\lyr\\Rectangle_#1_常熟卫图_Level_16.tif.lyr"
#"F:\\xinxiang\\fil\\20190817mydPM25.tif"
rasLayer=arcpy.mapping.Layer(lyrfile)
symbologyLayr=arcpy.mapping.Layer(symbologyLayer)
# rasLayer.symbology.
arcpy.ApplySymbologyFromLayer_management (rasLayer,symbologyLayr)
arcpy.mapping.AddLayer(df, rasLayer, "AUTO_ARRANGE")
arcpy.AddMessage(str(time.ctime())+":"+symbologyLayer+"添加成功。。。")
for legend in arcpy.mapping.ListLayoutElements(mxd, "LEGEND_ELEMENT", "Legend"):
print(legend.items)
arcpy.RefreshActiveView()
for legend in arcpy.mapping.ListLayoutElements(mxd, "LEGEND_ELEMENT", "Legend"):
print(legend.items)
mxd.save()
arcpy.mapping.ExportToJPEG(mxd, jpgpath, resolution = res)
if __name__ == '__main__':
rootpath=u'F:\\xx\\中心城区'
pathDir = os.listdir(rootpath)
try:
os.makedirs(u'F:\\xx\\AutoMap\\result\\mxd\\o3')
except:
pass
try:
os.makedirs(u'F:\\xx\\AutoMap\\result\\JpgOutput')
except:
pass
for filename in pathDir:
if filename[-4:].lower() == '.tif':
# o3
if filename[-5:-4].lower() == '3':
try:
filepath=os.path.join(rootpath,filename)
print(filename)
mxdpath=u"F:\\xx\\AutoMap\\result\\mxd\\xinxiang{}.mxd".format(filename[:-4])
modelpath=u"F:\\xx\\AutoMap\\Mxd\\xinxiang_O3.mxd"
# mxd模板文件路径
#modelpath=arcpy.GetParameterAsText(0)
# 输出mxd文件路径
#mxdpath=arcpy.GetParameterAsText(1)
# tif文件路径
symbologyLayer=u'F:\\xx\\Lyr\\C_20191111modo356.lyr'
#filepath = "D:\\cs\\data\\pic3"
# shp文件夹路径
#filepath=arcpy.GetParameterAsText(3)
# jpg输出路径
jpgpath=u"F:\\xx\\AutoMap\\result\\JpgOutput\\{}.jpg".format(filename[:-4])
# jpgpath=arcpy.GetParameterAsText(4)
arcpy.AddMessage('')
arcpy.AddMessage(str(time.ctime())+"输出开始!")
createMxd(modelpath,mxdpath,symbologyLayer,jpgpath,'',filepath)
print('successful')
arcpy.AddMessage(str(time.ctime())+"输出完成!")
except Exception as e:
print(e.message)
|
[
"[email protected]"
] | |
10c811755bbeff6b27cebbc77dbe356bb64edc11
|
15ed3ab4510677e6df9b11af8fd7a36fc6d826fc
|
/v1/og_mc_3/tau=0.01/eta=0.04/library/mc6.py
|
a573e2553235d58cd70aaa9530cdec9d32c14c5f
|
[] |
no_license
|
pe-ge/Computational-analysis-of-memory-capacity-in-echo-state-networks
|
929347575538de7015190d35a7c2f5f0606235f2
|
85873d8847fb2876cc8a6a2073c2d1779ea1b20b
|
refs/heads/master
| 2020-04-02T08:08:38.595974 | 2018-01-17T08:12:26 | 2018-01-17T08:12:26 | 61,425,490 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,280 |
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
mc6.py
Created 21.3.2015
Based on mc5.py
Goal: Measuring Memory Capacity of reservoirs.
Changes:
- removed correlation coefficient correction MC <- MC - q / iterations_coef_measure
- added input-to-output connections
"""
from numpy import random, zeros, tanh, dot, linalg, \
corrcoef, average, std, sqrt, hstack
from library.lyapunov import lyapunov_exp
import scipy.linalg
def memory_capacity(W, WI, memory_max=None,
iterations=1000, iterations_skipped=None, iterations_coef_measure=1000,
runs=1, input_dist=(-1., 1.),
use_input=False, target_later=False, calc_lyapunov=False):
"""Calculates memory capacity of a NN
[given by its input weights WI and reservoir weights W].
W = q x q matrix storing hidden reservoir weights
WI = q x 1 vector storing input weights
Returns: a non-negative real number MC
MC: memory capacity sum for histories 1..MEMORY_MAX
"""
# matrix shape checks
if len(WI.shape) != 1:
raise Exception("input matrix WI must be vector-shaped!")
q, = WI.shape
if W.shape != (q, q):
raise Exception("W and WI matrix sizes do not match")
if memory_max is None:
memory_max = q
if iterations_skipped is None:
iterations_skipped = max(memory_max, 100) + 1
iterations_measured = iterations - iterations_skipped
dist_input = lambda: random.uniform(input_dist[0], input_dist[1], iterations)
# vector initialization
X = zeros(q)
if use_input:
S = zeros([q + 1, iterations_measured])
else:
S = zeros([q, iterations_measured])
# generate random input
u = dist_input() # all input; dimension: [iterations, 1]
# run 2000 iterations and fill the matrices D and S
for it in range(iterations):
X = tanh(dot(W, X) + dot(WI, u[it]))
if it >= iterations_skipped:
# record the state of reservoir activations X into S
if use_input:
S[:, it - iterations_skipped] = hstack([X, u[it]])
else:
S[:, it - iterations_skipped] = X
# prepare matrix D of desired values (that is, shifted inputs)
assert memory_max < iterations_skipped
D = zeros([memory_max, iterations_measured])
if target_later:
# if we allow direct input-output connections, there is no point in measuring 0-delay corr. coef. (it is always 1)
for h in range(memory_max):
D[h,:] = u[iterations_skipped - (h+1) : iterations - (h+1)]
else:
for h in range(memory_max):
D[h,:] = u[iterations_skipped - h : iterations - h]
# calculate pseudoinverse S+ and with it, the matrix WO
S_PINV = scipy.linalg.pinv(S)
WO = dot(D, S_PINV)
# do a new run for an unbiased test of quality of our newly trained WO
# we skip memory_max iterations to have large enough window
MC = zeros([runs, memory_max]) # here we store memory capacity
LE = zeros(runs) # lyapunov exponent
for run in range(runs):
u = random.uniform(input_dist[0], input_dist[1], iterations_coef_measure + memory_max)
X = zeros(q)
o = zeros([memory_max, iterations_coef_measure]) # 200 x 1000
for it in range(iterations_coef_measure + memory_max):
X = tanh(dot(W, X) + dot(WI, u[it]))
if it >= memory_max:
# we calculate output nodes using WO
if use_input:
o[:, it - memory_max] = dot(WO, hstack([X, u[it]]))
else:
o[:, it - memory_max] = dot(WO, X)
# correlate outputs with inputs (shifted)
for h in range(memory_max):
k = h + 1
if target_later:
cc = corrcoef(u[memory_max - k : memory_max + iterations_coef_measure - k], o[h, : ]) [0, 1]
else:
cc = corrcoef(u[memory_max - h : memory_max + iterations_coef_measure - h], o[h, : ]) [0, 1]
MC[run, h] = cc * cc
# calculate lyapunov
if calc_lyapunov:
LE[run] = lyapunov_exp(W, WI, X)
return sum(average(MC, axis=0)), LE
def main():
print("I am a library. Please don't run me directly.")
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
16e0ae410ab9c5056f793ef00a29456e3926cbfc
|
3b9bf497cd29cea9c24462e0411fa8adbfa6ba60
|
/leetcode/Problems/116--Populating-Next-Right-Pointers-in-Each-Node-Medium.py
|
2e8b81530cf65226d4d6de3352b0c75892188c4a
|
[] |
no_license
|
niteesh2268/coding-prepation
|
918823cb7f4965bec096ec476c639a06a9dd9692
|
19be0766f6b9c298fb32754f66416f79567843c1
|
refs/heads/master
| 2023-01-02T05:30:59.662890 | 2020-10-17T13:12:34 | 2020-10-17T13:12:34 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 705 |
py
|
"""
# Definition for a Node.
class Node:
def __init__(self, val: int = 0, left: 'Node' = None, right: 'Node' = None, next: 'Node' = None):
self.val = val
self.left = left
self.right = right
self.next = next
"""
class Solution:
def connect(self, root: 'Node') -> 'Node':
if not root:
return None
def assign(r1, r2):
if not r1:
return
r1.next = r2
assign(r1.left, r1.right)
if r2:
assign(r1.right, r2.left)
assign(r2.left, r2.right)
assign(r2.right, None)
assign(root.left, root.right)
return root
|
[
"[email protected]"
] | |
fb3ec15864cfb1866c1711d0586b7d7b0fff7090
|
ad13583673551857615498b9605d9dcab63bb2c3
|
/output/models/ms_data/particles/particles_ie013_xsd/__init__.py
|
e0ffd1abcba9a881fbd645379ab76771f0c5d955
|
[
"MIT"
] |
permissive
|
tefra/xsdata-w3c-tests
|
397180205a735b06170aa188f1f39451d2089815
|
081d0908382a0e0b29c8ee9caca6f1c0e36dd6db
|
refs/heads/main
| 2023-08-03T04:25:37.841917 | 2023-07-29T17:10:13 | 2023-07-30T12:11:13 | 239,622,251 | 2 | 0 |
MIT
| 2023-07-25T14:19:04 | 2020-02-10T21:59:47 |
Python
|
UTF-8
|
Python
| false | false | 169 |
py
|
from output.models.ms_data.particles.particles_ie013_xsd.particles_ie013 import (
Base,
Doc,
Testing,
)
__all__ = [
"Base",
"Doc",
"Testing",
]
|
[
"[email protected]"
] | |
0cd0e4e8ac5f482d0c574c61b50f82a0ddd477af
|
f07a42f652f46106dee4749277d41c302e2b7406
|
/Data Set/bug-fixing-1/f30589c91d8946586faff2c994e99395239bd50b-<main>-fix.py
|
1d51b1b9cb0c17280c516c955697eab9c96e41df
|
[] |
no_license
|
wsgan001/PyFPattern
|
e0fe06341cc5d51b3ad0fe29b84098d140ed54d1
|
cc347e32745f99c0cd95e79a18ddacc4574d7faa
|
refs/heads/main
| 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 935 |
py
|
def main():
argument_spec = purefa_argument_spec()
argument_spec.update(dict(name=dict(required=True), eradicate=dict(default='false', type='bool'), state=dict(default='present', choices=['present', 'absent']), size=dict()))
required_if = [('state', 'present', ['size'])]
module = AnsibleModule(argument_spec, required_if=required_if, supports_check_mode=True)
if (not HAS_PURESTORAGE):
module.fail_json(msg='purestorage sdk is required for this module in volume')
state = module.params['state']
array = get_system(module)
volume = get_volume(module, array)
if ((state == 'present') and (not volume)):
create_volume(module, array)
elif ((state == 'present') and volume):
update_volume(module, array)
elif ((state == 'absent') and volume):
delete_volume(module, array)
elif ((state == 'absent') and (not volume)):
module.exit_json(changed=False)
|
[
"[email protected]"
] | |
25b0faff57a134389ac668ba40d1d3421f140816
|
08ee36e0bb1c250f7f2dfda12c1a73d1984cd2bc
|
/src/mnistk/networks/conv1dtanh_24.py
|
c8888a08e65cd985171dd5b7947bf12cd3c0dedf
|
[] |
no_license
|
ahgamut/mnistk
|
58dadffad204602d425b18549e9b3d245dbf5486
|
19a661185e6d82996624fc6fcc03de7ad9213eb0
|
refs/heads/master
| 2021-11-04T07:36:07.394100 | 2021-10-27T18:37:12 | 2021-10-27T18:37:12 | 227,103,881 | 2 | 1 | null | 2020-02-19T22:07:24 | 2019-12-10T11:33:09 |
Python
|
UTF-8
|
Python
| false | false | 1,688 |
py
|
# -*- coding: utf-8 -*-
"""
conv1dtanh_24.py
:copyright: (c) 2019 by Gautham Venkatasubramanian.
:license: MIT
"""
import torch
from torch import nn
class Conv1dTanh_24(nn.Module):
def __init__(self):
nn.Module.__init__(self)
self.f0 = nn.Conv1d(in_channels=16, out_channels=22, kernel_size=(11,), stride=(1,), padding=(0,), dilation=(1,), groups=1, bias=True, padding_mode='zeros')
self.f1 = nn.Conv1d(in_channels=22, out_channels=16, kernel_size=(38,), stride=(1,), padding=(0,), dilation=(1,), groups=1, bias=False, padding_mode='zeros')
self.f2 = nn.Conv1d(in_channels=16, out_channels=27, kernel_size=(2,), stride=(1,), padding=(0,), dilation=(1,), groups=1, bias=False, padding_mode='zeros')
self.f3 = nn.Tanh()
self.f4 = nn.Conv1d(in_channels=27, out_channels=35, kernel_size=(1,), stride=(1,), padding=(0,), dilation=(1,), groups=1, bias=False, padding_mode='zeros')
self.f5 = nn.Tanh()
self.f6 = nn.Conv1d(in_channels=35, out_channels=30, kernel_size=(1,), stride=(1,), padding=(0,), dilation=(1,), groups=1, bias=False, padding_mode='zeros')
self.f7 = nn.Conv1d(in_channels=30, out_channels=10, kernel_size=(1,), stride=(1,), padding=(0,), dilation=(1,), groups=1, bias=False, padding_mode='zeros')
self.f8 = nn.LogSoftmax(dim=1)
def forward(self, *inputs):
x = inputs[0]
x = x.view(x.shape[0],16,49)
x = self.f0(x)
x = self.f1(x)
x = self.f2(x)
x = self.f3(x)
x = self.f4(x)
x = self.f5(x)
x = self.f6(x)
x = self.f7(x)
x = x.view(x.shape[0],10)
x = self.f8(x)
return x
|
[
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.