blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
281
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 6
116
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 313
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 18.2k
668M
⌀ | star_events_count
int64 0
102k
| fork_events_count
int64 0
38.2k
| gha_license_id
stringclasses 17
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 107
values | src_encoding
stringclasses 20
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.02M
| extension
stringclasses 78
values | content
stringlengths 2
6.02M
| authors
listlengths 1
1
| author
stringlengths 0
175
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
78f830f6a4e511a9f2c0d2035dc92486062ffd25
|
4191b96f4274939d528932386dade39fad642737
|
/qa/rpc-tests/test_framework/util.py
|
0a27ba74f01f805bc225186c70632375feffd757
|
[
"MIT"
] |
permissive
|
furcalor/hardcode
|
e925441a6188be7fe2d882aa1e11aad532b0362e
|
a420a2c1cd8729ad7f957f953d02114d366c2ed2
|
refs/heads/master
| 2023-04-03T18:57:27.068234 | 2021-04-01T13:59:22 | 2021-04-01T13:59:22 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 26,747 |
py
|
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Helpful routines for regression testing
#
import os
import sys
from binascii import hexlify, unhexlify
from base64 import b64encode
from decimal import Decimal, ROUND_DOWN
import json
import http.client
import random
import shutil
import subprocess
import time
import re
import errno
from . import coverage
from .authproxy import AuthServiceProxy, JSONRPCException
COVERAGE_DIR = None
# The maximum number of nodes a single test can spawn
MAX_NODES = 8
# Don't assign rpc or p2p ports lower than this
PORT_MIN = 11000
# The number of ports to "reserve" for p2p and rpc, each
PORT_RANGE = 5000
BITCOIND_PROC_WAIT_TIMEOUT = 60
class PortSeed:
# Must be initialized with a unique integer for each process
n = None
#Set Mocktime default to OFF.
#MOCKTIME is only needed for scripts that use the
#cached version of the blockchain. If the cached
#version of the blockchain is used without MOCKTIME
#then the mempools will not sync due to IBD.
MOCKTIME = 0
def enable_mocktime():
#For backwared compatibility of the python scripts
#with previous versions of the cache, set MOCKTIME
#to Jan 1, 2014 + (201 * 10 * 60)
global MOCKTIME
MOCKTIME = 1388534400 + (201 * 10 * 60)
def disable_mocktime():
global MOCKTIME
MOCKTIME = 0
def get_mocktime():
return MOCKTIME
def enable_coverage(dirname):
"""Maintain a log of which RPC calls are made during testing."""
global COVERAGE_DIR
COVERAGE_DIR = dirname
def get_rpc_proxy(url, node_number, timeout=None):
"""
Args:
url (str): URL of the RPC server to call
node_number (int): the node number (or id) that this calls to
Kwargs:
timeout (int): HTTP timeout in seconds
Returns:
AuthServiceProxy. convenience object for making RPC calls.
"""
proxy_kwargs = {}
if timeout is not None:
proxy_kwargs['timeout'] = timeout
proxy = AuthServiceProxy(url, **proxy_kwargs)
proxy.url = url # store URL on proxy for info
coverage_logfile = coverage.get_filename(
COVERAGE_DIR, node_number) if COVERAGE_DIR else None
return coverage.AuthServiceProxyWrapper(proxy, coverage_logfile)
def p2p_port(n):
assert(n <= MAX_NODES)
return PORT_MIN + n + (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES)
def rpc_port(n):
return PORT_MIN + PORT_RANGE + n + (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES)
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def count_bytes(hex_string):
return len(bytearray.fromhex(hex_string))
def bytes_to_hex_str(byte_str):
return hexlify(byte_str).decode('ascii')
def hex_str_to_bytes(hex_str):
return unhexlify(hex_str.encode('ascii'))
def str_to_b64str(string):
return b64encode(string.encode('utf-8')).decode('ascii')
def sync_blocks(rpc_connections, *, wait=1, timeout=60):
"""
Wait until everybody has the same tip.
sync_blocks needs to be called with an rpc_connections set that has least
one node already synced to the latest, stable tip, otherwise there's a
chance it might return before all nodes are stably synced.
"""
# Use getblockcount() instead of waitforblockheight() to determine the
# initial max height because the two RPCs look at different internal global
# variables (chainActive vs latestBlock) and the former gets updated
# earlier.
maxheight = max(x.getblockcount() for x in rpc_connections)
start_time = cur_time = time.time()
while cur_time <= start_time + timeout:
tips = [r.waitforblockheight(maxheight, int(wait * 1000)) for r in rpc_connections]
if all(t["height"] == maxheight for t in tips):
if all(t["hash"] == tips[0]["hash"] for t in tips):
return
raise AssertionError("Block sync failed, mismatched block hashes:{}".format(
"".join("\n {!r}".format(tip) for tip in tips)))
cur_time = time.time()
raise AssertionError("Block sync to height {} timed out:{}".format(
maxheight, "".join("\n {!r}".format(tip) for tip in tips)))
def sync_chain(rpc_connections, *, wait=1, timeout=60):
"""
Wait until everybody has the same best block
"""
while timeout > 0:
best_hash = [x.getbestblockhash() for x in rpc_connections]
if best_hash == [best_hash[0]]*len(best_hash):
return
time.sleep(wait)
timeout -= wait
raise AssertionError("Chain sync failed: Best block hashes don't match")
def sync_mempools(rpc_connections, *, wait=1, timeout=60):
"""
Wait until everybody has the same transactions in their memory
pools
"""
while timeout > 0:
pool = set(rpc_connections[0].getrawmempool())
num_match = 1
for i in range(1, len(rpc_connections)):
if set(rpc_connections[i].getrawmempool()) == pool:
num_match = num_match+1
if num_match == len(rpc_connections):
return
time.sleep(wait)
timeout -= wait
raise AssertionError("Mempool sync failed")
bitcoind_processes = {}
def initialize_datadir(dirname, n):
datadir = os.path.join(dirname, "node"+str(n))
if not os.path.isdir(datadir):
os.makedirs(datadir)
rpc_u, rpc_p = rpc_auth_pair(n)
with open(os.path.join(datadir, "ethros.conf"), 'w', encoding='utf8') as f:
f.write("regtest=1\n")
f.write("rpcuser=" + rpc_u + "\n")
f.write("rpcpassword=" + rpc_p + "\n")
f.write("port="+str(p2p_port(n))+"\n")
f.write("rpcport="+str(rpc_port(n))+"\n")
f.write("listenonion=0\n")
return datadir
def rpc_auth_pair(n):
return 'rpcuser💻' + str(n), 'rpcpass🔑' + str(n)
def rpc_url(i, rpchost=None):
rpc_u, rpc_p = rpc_auth_pair(i)
host = '127.0.0.1'
port = rpc_port(i)
if rpchost:
parts = rpchost.split(':')
if len(parts) == 2:
host, port = parts
else:
host = rpchost
return "http://%s:%s@%s:%d" % (rpc_u, rpc_p, host, int(port))
def wait_for_bitcoind_start(process, url, i):
'''
Wait for bitcoind to start. This means that RPC is accessible and fully initialized.
Raise an exception if bitcoind exits during initialization.
'''
while True:
if process.poll() is not None:
raise Exception('ethrosd exited with status %i during initialization' % process.returncode)
try:
rpc = get_rpc_proxy(url, i)
blocks = rpc.getblockcount()
break # break out of loop on success
except IOError as e:
if e.errno != errno.ECONNREFUSED: # Port not yet open?
raise # unknown IO error
except JSONRPCException as e: # Initialization phase
if e.error['code'] != -28: # RPC in warmup?
raise # unknown JSON RPC exception
time.sleep(0.25)
def initialize_chain(test_dir, num_nodes, cachedir):
"""
Create a cache of a 200-block-long chain (with wallet) for MAX_NODES
Afterward, create num_nodes copies from the cache
"""
assert num_nodes <= MAX_NODES
create_cache = False
for i in range(MAX_NODES):
if not os.path.isdir(os.path.join(cachedir, 'node'+str(i))):
create_cache = True
break
if create_cache:
#find and delete old cache directories if any exist
for i in range(MAX_NODES):
if os.path.isdir(os.path.join(cachedir,"node"+str(i))):
shutil.rmtree(os.path.join(cachedir,"node"+str(i)))
# Create cache directories, run bitcoinds:
for i in range(MAX_NODES):
datadir=initialize_datadir(cachedir, i)
args = [ os.getenv("BITCOIND", "ethrosd"), "-server", "-keypool=1", "-datadir="+datadir, "-discover=0" ]
if i > 0:
args.append("-connect=127.0.0.1:"+str(p2p_port(0)))
bitcoind_processes[i] = subprocess.Popen(args)
if os.getenv("PYTHON_DEBUG", ""):
print("initialize_chain: ethrosd started, waiting for RPC to come up")
wait_for_bitcoind_start(bitcoind_processes[i], rpc_url(i), i)
if os.getenv("PYTHON_DEBUG", ""):
print("initialize_chain: RPC successfully started")
rpcs = []
for i in range(MAX_NODES):
try:
rpcs.append(get_rpc_proxy(rpc_url(i), i))
except:
sys.stderr.write("Error connecting to "+url+"\n")
sys.exit(1)
# Create a 200-block-long chain; each of the 4 first nodes
# gets 25 mature blocks and 25 immature.
# Note: To preserve compatibility with older versions of
# initialize_chain, only 4 nodes will generate coins.
#
# blocks are created with timestamps 10 minutes apart
# starting from 2010 minutes in the past
enable_mocktime()
block_time = get_mocktime() - (201 * 10 * 60)
for i in range(2):
for peer in range(4):
for j in range(25):
set_node_times(rpcs, block_time)
rpcs[peer].generate(1)
block_time += 10*60
# Must sync before next peer starts generating blocks
sync_blocks(rpcs)
# Shut them down, and clean up cache directories:
stop_nodes(rpcs)
disable_mocktime()
for i in range(MAX_NODES):
os.remove(log_filename(cachedir, i, "debug.log"))
os.remove(log_filename(cachedir, i, "db.log"))
os.remove(log_filename(cachedir, i, "peers.dat"))
os.remove(log_filename(cachedir, i, "fee_estimates.dat"))
for i in range(num_nodes):
from_dir = os.path.join(cachedir, "node"+str(i))
to_dir = os.path.join(test_dir, "node"+str(i))
shutil.copytree(from_dir, to_dir)
initialize_datadir(test_dir, i) # Overwrite port/rpcport in bitcoin.conf
def initialize_chain_clean(test_dir, num_nodes):
"""
Create an empty blockchain and num_nodes wallets.
Useful if a test case wants complete control over initialization.
"""
for i in range(num_nodes):
datadir=initialize_datadir(test_dir, i)
def _rpchost_to_args(rpchost):
'''Convert optional IP:port spec to rpcconnect/rpcport args'''
if rpchost is None:
return []
match = re.match('(\[[0-9a-fA-f:]+\]|[^:]+)(?::([0-9]+))?$', rpchost)
if not match:
raise ValueError('Invalid RPC host spec ' + rpchost)
rpcconnect = match.group(1)
rpcport = match.group(2)
if rpcconnect.startswith('['): # remove IPv6 [...] wrapping
rpcconnect = rpcconnect[1:-1]
rv = ['-rpcconnect=' + rpcconnect]
if rpcport:
rv += ['-rpcport=' + rpcport]
return rv
def start_node(i, dirname, extra_args=None, rpchost=None, timewait=None, binary=None):
"""
Start a bitcoind and return RPC connection to it
"""
datadir = os.path.join(dirname, "node"+str(i))
if binary is None:
binary = os.getenv("LITECOIND", "ethrosd")
args = [ binary, "-datadir="+datadir, "-server", "-keypool=1", "-discover=0", "-rest", "-mocktime="+str(get_mocktime()) ]
if extra_args is not None: args.extend(extra_args)
bitcoind_processes[i] = subprocess.Popen(args)
if os.getenv("PYTHON_DEBUG", ""):
print("start_node: ethrosd started, waiting for RPC to come up")
url = rpc_url(i, rpchost)
wait_for_bitcoind_start(bitcoind_processes[i], url, i)
if os.getenv("PYTHON_DEBUG", ""):
print("start_node: RPC successfully started")
proxy = get_rpc_proxy(url, i, timeout=timewait)
if COVERAGE_DIR:
coverage.write_all_rpc_commands(COVERAGE_DIR, proxy)
return proxy
def start_nodes(num_nodes, dirname, extra_args=None, rpchost=None, timewait=None, binary=None):
"""
Start multiple bitcoinds, return RPC connections to them
"""
if extra_args is None: extra_args = [ None for _ in range(num_nodes) ]
if binary is None: binary = [ None for _ in range(num_nodes) ]
rpcs = []
try:
for i in range(num_nodes):
rpcs.append(start_node(i, dirname, extra_args[i], rpchost, timewait=timewait, binary=binary[i]))
except: # If one node failed to start, stop the others
stop_nodes(rpcs)
raise
return rpcs
def log_filename(dirname, n_node, logname):
return os.path.join(dirname, "node"+str(n_node), "regtest", logname)
def stop_node(node, i):
try:
node.stop()
except http.client.CannotSendRequest as e:
print("WARN: Unable to stop node: " + repr(e))
return_code = bitcoind_processes[i].wait(timeout=BITCOIND_PROC_WAIT_TIMEOUT)
assert_equal(return_code, 0)
del bitcoind_processes[i]
def stop_nodes(nodes):
for i, node in enumerate(nodes):
stop_node(node, i)
assert not bitcoind_processes.values() # All connections must be gone now
def set_node_times(nodes, t):
for node in nodes:
node.setmocktime(t)
def connect_nodes(from_connection, node_num):
ip_port = "127.0.0.1:"+str(p2p_port(node_num))
from_connection.addnode(ip_port, "onetry")
# poll until version handshake complete to avoid race conditions
# with transaction relaying
while any(peer['version'] == 0 for peer in from_connection.getpeerinfo()):
time.sleep(0.1)
def connect_nodes_bi(nodes, a, b):
connect_nodes(nodes[a], b)
connect_nodes(nodes[b], a)
def find_output(node, txid, amount):
"""
Return index to output of txid with value amount
Raises exception if there is none.
"""
txdata = node.getrawtransaction(txid, 1)
for i in range(len(txdata["vout"])):
if txdata["vout"][i]["value"] == amount:
return i
raise RuntimeError("find_output txid %s : %s not found"%(txid,str(amount)))
def gather_inputs(from_node, amount_needed, confirmations_required=1):
"""
Return a random set of unspent txouts that are enough to pay amount_needed
"""
assert(confirmations_required >=0)
utxo = from_node.listunspent(confirmations_required)
random.shuffle(utxo)
inputs = []
total_in = Decimal("0.00000000")
while total_in < amount_needed and len(utxo) > 0:
t = utxo.pop()
total_in += t["amount"]
inputs.append({ "txid" : t["txid"], "vout" : t["vout"], "address" : t["address"] } )
if total_in < amount_needed:
raise RuntimeError("Insufficient funds: need %d, have %d"%(amount_needed, total_in))
return (total_in, inputs)
def make_change(from_node, amount_in, amount_out, fee):
"""
Create change output(s), return them
"""
outputs = {}
amount = amount_out+fee
change = amount_in - amount
if change > amount*2:
# Create an extra change output to break up big inputs
change_address = from_node.getnewaddress()
# Split change in two, being careful of rounding:
outputs[change_address] = Decimal(change/2).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
change = amount_in - amount - outputs[change_address]
if change > 0:
outputs[from_node.getnewaddress()] = change
return outputs
def send_zeropri_transaction(from_node, to_node, amount, fee):
"""
Create&broadcast a zero-priority transaction.
Returns (txid, hex-encoded-txdata)
Ensures transaction is zero-priority by first creating a send-to-self,
then using its output
"""
# Create a send-to-self with confirmed inputs:
self_address = from_node.getnewaddress()
(total_in, inputs) = gather_inputs(from_node, amount+fee*2)
outputs = make_change(from_node, total_in, amount+fee, fee)
outputs[self_address] = float(amount+fee)
self_rawtx = from_node.createrawtransaction(inputs, outputs)
self_signresult = from_node.signrawtransaction(self_rawtx)
self_txid = from_node.sendrawtransaction(self_signresult["hex"], True)
vout = find_output(from_node, self_txid, amount+fee)
# Now immediately spend the output to create a 1-input, 1-output
# zero-priority transaction:
inputs = [ { "txid" : self_txid, "vout" : vout } ]
outputs = { to_node.getnewaddress() : float(amount) }
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"])
def random_zeropri_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random zero-priority transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment*random.randint(0,fee_variants)
(txid, txhex) = send_zeropri_transaction(from_node, to_node, amount, fee)
return (txid, txhex, fee)
def random_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment*random.randint(0,fee_variants)
(total_in, inputs) = gather_inputs(from_node, amount+fee)
outputs = make_change(from_node, total_in, amount, fee)
outputs[to_node.getnewaddress()] = float(amount)
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"], fee)
def assert_fee_amount(fee, tx_size, fee_per_kB):
"""Assert the fee was in range"""
target_fee = tx_size * fee_per_kB / 1000
if fee < target_fee:
raise AssertionError("Fee of %s LTC too low! (Should be %s LTC)"%(str(fee), str(target_fee)))
# allow the wallet's estimation to be at most 2 bytes off
if fee > (tx_size + 2) * fee_per_kB / 1000:
raise AssertionError("Fee of %s LTC too high! (Should be %s LTC)"%(str(fee), str(target_fee)))
def assert_equal(thing1, thing2, *args):
if thing1 != thing2 or any(thing1 != arg for arg in args):
raise AssertionError("not(%s)" % " == ".join(str(arg) for arg in (thing1, thing2) + args))
def assert_greater_than(thing1, thing2):
if thing1 <= thing2:
raise AssertionError("%s <= %s"%(str(thing1),str(thing2)))
def assert_greater_than_or_equal(thing1, thing2):
if thing1 < thing2:
raise AssertionError("%s < %s"%(str(thing1),str(thing2)))
def assert_raises(exc, fun, *args, **kwds):
assert_raises_message(exc, None, fun, *args, **kwds)
def assert_raises_message(exc, message, fun, *args, **kwds):
try:
fun(*args, **kwds)
except exc as e:
if message is not None and message not in e.error['message']:
raise AssertionError("Expected substring not found:"+e.error['message'])
except Exception as e:
raise AssertionError("Unexpected exception raised: "+type(e).__name__)
else:
raise AssertionError("No exception raised")
def assert_raises_jsonrpc(code, message, fun, *args, **kwds):
"""Run an RPC and verify that a specific JSONRPC exception code and message is raised.
Calls function `fun` with arguments `args` and `kwds`. Catches a JSONRPCException
and verifies that the error code and message are as expected. Throws AssertionError if
no JSONRPCException was returned or if the error code/message are not as expected.
Args:
code (int), optional: the error code returned by the RPC call (defined
in src/rpc/protocol.h). Set to None if checking the error code is not required.
message (string), optional: [a substring of] the error string returned by the
RPC call. Set to None if checking the error string is not required
fun (function): the function to call. This should be the name of an RPC.
args*: positional arguments for the function.
kwds**: named arguments for the function.
"""
try:
fun(*args, **kwds)
except JSONRPCException as e:
# JSONRPCException was thrown as expected. Check the code and message values are correct.
if (code is not None) and (code != e.error["code"]):
raise AssertionError("Unexpected JSONRPC error code %i" % e.error["code"])
if (message is not None) and (message not in e.error['message']):
raise AssertionError("Expected substring not found:"+e.error['message'])
except Exception as e:
raise AssertionError("Unexpected exception raised: "+type(e).__name__)
else:
raise AssertionError("No exception raised")
def assert_is_hex_string(string):
try:
int(string, 16)
except Exception as e:
raise AssertionError(
"Couldn't interpret %r as hexadecimal; raised: %s" % (string, e))
def assert_is_hash_string(string, length=64):
if not isinstance(string, str):
raise AssertionError("Expected a string, got type %r" % type(string))
elif length and len(string) != length:
raise AssertionError(
"String of length %d expected; got %d" % (length, len(string)))
elif not re.match('[abcdef0-9]+$', string):
raise AssertionError(
"String %r contains invalid characters for a hash." % string)
def assert_array_result(object_array, to_match, expected, should_not_find = False):
"""
Pass in array of JSON objects, a dictionary with key/value pairs
to match against, and another dictionary with expected key/value
pairs.
If the should_not_find flag is true, to_match should not be found
in object_array
"""
if should_not_find == True:
assert_equal(expected, { })
num_matched = 0
for item in object_array:
all_match = True
for key,value in to_match.items():
if item[key] != value:
all_match = False
if not all_match:
continue
elif should_not_find == True:
num_matched = num_matched+1
for key,value in expected.items():
if item[key] != value:
raise AssertionError("%s : expected %s=%s"%(str(item), str(key), str(value)))
num_matched = num_matched+1
if num_matched == 0 and should_not_find != True:
raise AssertionError("No objects matched %s"%(str(to_match)))
if num_matched > 0 and should_not_find == True:
raise AssertionError("Objects were found %s"%(str(to_match)))
def satoshi_round(amount):
return Decimal(amount).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
# Helper to create at least "count" utxos
# Pass in a fee that is sufficient for relay and mining new transactions.
def create_confirmed_utxos(fee, node, count):
node.generate(int(0.5*count)+101)
utxos = node.listunspent()
iterations = count - len(utxos)
addr1 = node.getnewaddress()
addr2 = node.getnewaddress()
if iterations <= 0:
return utxos
for i in range(iterations):
t = utxos.pop()
inputs = []
inputs.append({ "txid" : t["txid"], "vout" : t["vout"]})
outputs = {}
send_value = t['amount'] - fee
outputs[addr1] = satoshi_round(send_value/2)
outputs[addr2] = satoshi_round(send_value/2)
raw_tx = node.createrawtransaction(inputs, outputs)
signed_tx = node.signrawtransaction(raw_tx)["hex"]
txid = node.sendrawtransaction(signed_tx)
while (node.getmempoolinfo()['size'] > 0):
node.generate(1)
utxos = node.listunspent()
assert(len(utxos) >= count)
return utxos
# Create large OP_RETURN txouts that can be appended to a transaction
# to make it large (helper for constructing large transactions).
def gen_return_txouts():
# Some pre-processing to create a bunch of OP_RETURN txouts to insert into transactions we create
# So we have big transactions (and therefore can't fit very many into each block)
# create one script_pubkey
script_pubkey = "6a4d0200" #OP_RETURN OP_PUSH2 512 bytes
for i in range (512):
script_pubkey = script_pubkey + "01"
# concatenate 128 txouts of above script_pubkey which we'll insert before the txout for change
txouts = "81"
for k in range(128):
# add txout value
txouts = txouts + "0000000000000000"
# add length of script_pubkey
txouts = txouts + "fd0402"
# add script_pubkey
txouts = txouts + script_pubkey
return txouts
def create_tx(node, coinbase, to_address, amount):
inputs = [{ "txid" : coinbase, "vout" : 0}]
outputs = { to_address : amount }
rawtx = node.createrawtransaction(inputs, outputs)
signresult = node.signrawtransaction(rawtx)
assert_equal(signresult["complete"], True)
return signresult["hex"]
# Create a spend of each passed-in utxo, splicing in "txouts" to each raw
# transaction to make it large. See gen_return_txouts() above.
def create_lots_of_big_transactions(node, txouts, utxos, num, fee):
addr = node.getnewaddress()
txids = []
for _ in range(num):
t = utxos.pop()
inputs=[{ "txid" : t["txid"], "vout" : t["vout"]}]
outputs = {}
change = t['amount'] - fee
outputs[addr] = satoshi_round(change)
rawtx = node.createrawtransaction(inputs, outputs)
newtx = rawtx[0:92]
newtx = newtx + txouts
newtx = newtx + rawtx[94:]
signresult = node.signrawtransaction(newtx, None, None, "NONE")
txid = node.sendrawtransaction(signresult["hex"], True)
txids.append(txid)
return txids
def mine_large_block(node, utxos=None):
# generate a 66k transaction,
# and 14 of them is close to the 1MB block limit
num = 14
txouts = gen_return_txouts()
utxos = utxos if utxos is not None else []
if len(utxos) < num:
utxos.clear()
utxos.extend(node.listunspent())
fee = 100 * node.getnetworkinfo()["relayfee"]
create_lots_of_big_transactions(node, txouts, utxos, num, fee=fee)
node.generate(1)
def get_bip9_status(node, key):
info = node.getblockchaininfo()
return info['bip9_softforks'][key]
|
[
"[email protected]"
] | |
5e17f43083d9a47da10c74843cd9354b5b2beefe
|
4b9c6991cf570484ef236e102f985249df6ab2d4
|
/templatetags/react.py
|
0a6f295488333db80319cf2d78a3356e677eb520
|
[] |
no_license
|
walkness/django-react
|
c4cfc4c1b63b050d0d2379e28300aae3e80c97c7
|
364f89565de2c9ed8b097faca56920742c512978
|
refs/heads/master
| 2021-04-09T16:09:41.245534 | 2016-08-17T15:41:49 | 2016-08-17T15:41:49 | 64,865,193 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 856 |
py
|
from django import template
from django.utils.safestring import mark_safe
from webpack_loader.templatetags.webpack_loader import _get_bundle
register = template.Library()
def render_as_tags(bundle, defer):
tags = []
for chunk in bundle:
url = chunk['url']
if chunk['name'].endswith('.js'):
tags.append((
'<script{1} type="text/javascript" src="{0}"></script>'
).format(url, ' defer' if defer else '',))
elif chunk['name'].endswith('.css'):
tags.append((
'<link type="text/css" href="{0}" rel="stylesheet"/>'
).format(url))
return mark_safe('\n'.join(tags))
@register.simple_tag
def render_bundle(bundle_name, extension=None, config='DEFAULT', defer=False):
return render_as_tags(_get_bundle(bundle_name, extension, config), defer)
|
[
"[email protected]"
] | |
d9928d82395d5c0f5f21546f330a57c49586eccf
|
a3db112bcb20da7c59477d47e5040b65ab862bc5
|
/api/Test/ddt.py
|
1408cf77b86cf587b5e1bb4db59de0d815bb069a
|
[] |
no_license
|
wangyongsheng920811/jenkins
|
20a9824b86ea6d73d1cde5f95cdb03c795434da0
|
5ad4313362ceb7c740a1435d42d0772070b64641
|
refs/heads/master
| 2022-11-29T13:48:42.389463 | 2020-08-11T15:05:10 | 2020-08-11T15:05:10 | 286,773,308 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 9,920 |
py
|
# -*- coding: utf-8 -*-
# This file is a part of DDT (https://github.com/txels/ddt)
# Copyright 2012-2015 Carles Barrobés and DDT contributors
# For the exact contribution history, see the git revision log.
# DDT is licensed under the MIT License, included in
# https://github.com/txels/ddt/blob/master/LICENSE.md
import inspect
import json
import os
import re
import codecs
from functools import wraps
try:
import yaml
except ImportError: # pragma: no cover
_have_yaml = False
else:
_have_yaml = True
__version__ = '1.1.2'
# These attributes will not conflict with any real python attribute
# They are added to the decorated test method and processed later
# by the `ddt` class decorator.
DATA_ATTR = '%values' # store the data the test must run with
FILE_ATTR = '%file_path' # store the path to JSON file
UNPACK_ATTR = '%unpack' # remember that we have to unpack values
index_len = 5 # default max length of case index
try:
trivial_types = (type(None), bool, int, float, basestring)
except NameError:
trivial_types = (type(None), bool, int, float, str)
def is_trivial(value):
if isinstance(value, trivial_types):
return True
elif isinstance(value, (list, tuple)):
return all(map(is_trivial, value))
return False
def unpack(func):
"""
Method decorator to add unpack feature.
"""
setattr(func, UNPACK_ATTR, True)
return func
def data(*values):
"""
Method decorator to add to your test methods.
Should be added to methods of instances of ``unittest.TestCase``.
"""
global index_len
index_len = len(str(len(values)))
return idata(values)
def idata(iterable):
"""
Method decorator to add to your test methods.
Should be added to methods of instances of ``unittest.TestCase``.
"""
def wrapper(func):
setattr(func, DATA_ATTR, iterable)
return func
return wrapper
def file_data(value):
"""
Method decorator to add to your test methods.
Should be added to methods of instances of ``unittest.TestCase``.
``value`` should be a path relative to the directory of the file
containing the decorated ``unittest.TestCase``. The file
should contain JSON encoded data, that can either be a list or a
dict.
In case of a list, each value in the list will correspond to one
test case, and the value will be concatenated to the test method
name.
In case of a dict, keys will be used as suffixes to the name of the
test case, and values will be fed as test data.
"""
def wrapper(func):
setattr(func, FILE_ATTR, value)
return func
return wrapper
# # bak
# def mk_test_name(name, value, index=0):
# """
# Generate a new name for a test case.
#
# It will take the original test name and append an ordinal index and a
# string representation of the value, and convert the result into a valid
# python identifier by replacing extraneous characters with ``_``.
#
# We avoid doing str(value) if dealing with non-trivial values.
# The problem is possible different names with different runs, e.g.
# different order of dictionary keys (see PYTHONHASHSEED) or dealing
# with mock objects.
# Trivial scalar values are passed as is.
#
# A "trivial" value is a plain scalar, or a tuple or list consisting
# only of trivial values.
# """
# # Add zeros before index to keep order
# index = "{0:0{1}}".format(index + 1, index_len)
# if not is_trivial(value):
# return "{0}_{1}".format(name, index)
# try:
# value = str(value)
# except UnicodeEncodeError:
# # fallback for python2
# value = value.encode('ascii', 'backslashreplace')
# test_name = "{0}_{1}_{2}".format(name, index, value)
# return re.sub(r'\W|^(?=\d)', '_', test_name)
# 自定义用例标题
def mk_test_name(name, value, index=0):
"""
Generate a new name for a test case.
It will take the original test name and append an ordinal index and a
string representation of the value, and convert the result into a valid
python identifier by replacing extraneous characters with ``_``.
We avoid doing str(value) if dealing with non-trivial values.
The problem is possible different names with different runs, e.g.
different order of dictionary keys (see PYTHONHASHSEED) or dealing
with mock objects.
Trivial scalar values are passed as is.
A "trivial" value is a plain scalar, or a tuple or list consisting
only of trivial values.
"""
# Add zeros before index to keep order
# index = "{0:0{1}}".format(index + 1, index_len)
index = "{0:0{1}}".format(index + 1, 4)#将序列号强制改成3位
try:
test_name = name+'_'+str(index)+':\t'+str(value[0])
return test_name
except:
if not is_trivial(value):
return "{0}_{1}".format(name, index)
try:
value = str(value)
except UnicodeEncodeError:
# fallback for python2
value = value.encode('ascii', 'backslashreplace')
test_name = "{0}_{1}_{2}".format(name, index, value)
return re.sub(r'\W|^(?=\d)', '_', test_name)
def feed_data(func, new_name, *args, **kwargs):
"""
This internal method decorator feeds the test data item to the test.
"""
@wraps(func)
def wrapper(self):
return func(self, *args, **kwargs)
wrapper.__name__ = new_name
wrapper.__wrapped__ = func
# Try to call format on the docstring
if func.__doc__:
try:
wrapper.__doc__ = func.__doc__.format(*args, **kwargs)
except (IndexError, KeyError):
# Maybe the user has added some of the formating strings
# unintentionally in the docstring. Do not raise an exception as it
# could be that he is not aware of the formating feature.
pass
return wrapper
def add_test(cls, test_name, func, *args, **kwargs):
"""
Add a test case to this class.
The test will be based on an existing function but will give it a new
name.
"""
setattr(cls, test_name, feed_data(func, test_name, *args, **kwargs))
def process_file_data(cls, name, func, file_attr):
"""
Process the parameter in the `file_data` decorator.
"""
cls_path = os.path.abspath(inspect.getsourcefile(cls))
data_file_path = os.path.join(os.path.dirname(cls_path), file_attr)
def create_error_func(message): # pylint: disable-msg=W0613
def func(*args):
raise ValueError(message % file_attr)
return func
# If file does not exist, provide an error function instead
if not os.path.exists(data_file_path):
test_name = mk_test_name(name, "error")
add_test(cls, test_name, create_error_func("%s does not exist"), None)
return
_is_yaml_file = data_file_path.endswith((".yml", ".yaml"))
# Don't have YAML but want to use YAML file.
if _is_yaml_file and not _have_yaml:
test_name = mk_test_name(name, "error")
add_test(
cls,
test_name,
create_error_func("%s is a YAML file, please install PyYAML"),
None
)
return
with codecs.open(data_file_path, 'r', 'utf-8') as f:
# Load the data from YAML or JSON
if _is_yaml_file:
data = yaml.safe_load(f)
else:
data = json.load(f)
_add_tests_from_data(cls, name, func, data)
def _add_tests_from_data(cls, name, func, data):
"""
Add tests from data loaded from the data file into the class
"""
for i, elem in enumerate(data):
if isinstance(data, dict):
key, value = elem, data[elem]
test_name = mk_test_name(name, key, i)
elif isinstance(data, list):
value = elem
test_name = mk_test_name(name, value, i)
if isinstance(value, dict):
add_test(cls, test_name, func, **value)
else:
add_test(cls, test_name, func, value)
def ddt(cls):
"""
Class decorator for subclasses of ``unittest.TestCase``.
Apply this decorator to the test case class, and then
decorate test methods with ``@data``.
For each method decorated with ``@data``, this will effectively create as
many methods as data items are passed as parameters to ``@data``.
The names of the test methods follow the pattern
``original_test_name_{ordinal}_{data}``. ``ordinal`` is the position of the
data argument, starting with 1.
For data we use a string representation of the data value converted into a
valid python identifier. If ``data.__name__`` exists, we use that instead.
For each method decorated with ``@file_data('test_data.json')``, the
decorator will try to load the test_data.json file located relative
to the python file containing the method that is decorated. It will,
for each ``test_name`` key create as many methods in the list of values
from the ``data`` key.
"""
for name, func in list(cls.__dict__.items()):
if hasattr(func, DATA_ATTR):
for i, v in enumerate(getattr(func, DATA_ATTR)):
test_name = mk_test_name(name, getattr(v, "__name__", v), i)
if hasattr(func, UNPACK_ATTR):
if isinstance(v, tuple) or isinstance(v, list):
add_test(cls, test_name, func, *v)
else:
# unpack dictionary
add_test(cls, test_name, func, **v)
else:
add_test(cls, test_name, func, v)
delattr(cls, name)
elif hasattr(func, FILE_ATTR):
file_attr = getattr(func, FILE_ATTR)
process_file_data(cls, name, func, file_attr)
delattr(cls, name)
return cls
|
[
"[email protected]"
] | |
0cb57b09b565be270fb0b66f2fa3a221babd734e
|
faaca77efe25d55335a95b878c8686f7bdb395f9
|
/server_tests/C8H10N4O2_test_116.py
|
56cad3684012816a44fa34f274f81365ee86d795
|
[] |
no_license
|
JayZisch/CompilerProject
|
fafde7dc6950a3a9e82703b4fe17a4087b2288b8
|
9e017305218799d4e9843896e108ab5478f18474
|
refs/heads/master
| 2020-06-02T19:57:07.311202 | 2011-12-15T02:40:38 | 2011-12-15T02:40:38 | 2,885,277 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 68 |
py
|
class C:
x = 1
y = x
x = 2
print y
print C.y
C.x = 10
print C.y
|
[
"[email protected]"
] | |
8a6fa6896581a869d9bd68c1edf00f636e662dc3
|
0dddcf0dbc8519270e754a4569faff845e991f36
|
/pybrowser/downloader.py
|
97d8fabf47ae3892dfb198fb13bf4476e6d64eac
|
[
"MIT"
] |
permissive
|
abranjith/pybrowser
|
bc5b08933513eadf688e19f93f0890e50a512dd8
|
8a4d435c7071e64e881f2c274fabd5cd7805ea34
|
refs/heads/master
| 2022-12-13T04:33:04.665674 | 2019-04-28T16:08:53 | 2019-04-28T16:08:53 | 168,921,806 | 22 | 2 |
MIT
| 2022-12-08T04:57:29 | 2019-02-03T07:44:21 |
Python
|
UTF-8
|
Python
| false | false | 14,626 |
py
|
import sys
import os
import zipfile
try:
import tarfile
except ImportError:
pass
try:
from urllib import request
except ImportError:
import urllib as request
from requests import Session as HTTPSession
from selenium import webdriver
from selenium.webdriver.chrome.options import Options as ChromeOptions
from .constants import CONSTANTS, CHROME_CONSTANTS, IE_CONSTANTS, FIREFOX_CONSTANTS
from .exceptions import InvalidArgumentError, OperationFailedException
from .common_utils import (get_user_home_dir, is_valid_url, copy_file, hash_, rm_files,
guess_filename_from_url, add_to_path, find_patterns_in_str, os_name,
make_dir, file_exists)
from .htmlm import HTML
from .log_adapter import get_logger
from .decorators import task_runner
#TODO: abstract away all usage of anything dealing with file systems. eg: os, platform, ntpath etc
#TODO: user provided hash comparsion and security
def download_driver(driver_name, version=None, download_filename=None, add_to_ospath=True,
overwrite_existing=True):
if driver_name == CONSTANTS.CHROME_DRIVER:
webdriver_downloader.download_chrome(version=version, download_filename=download_filename,
add_to_ospath=add_to_ospath, overwrite_existing=overwrite_existing)
elif driver_name == CONSTANTS.IE_DRIVER:
webdriver_downloader.download_ie(version=version, download_filename=download_filename,
add_to_ospath=add_to_ospath, overwrite_existing=overwrite_existing)
elif driver_name == CONSTANTS.FIREFOX_DRIVER:
webdriver_downloader.download_firefox(version=version, download_filename=download_filename,
add_to_ospath=add_to_ospath, overwrite_existing=overwrite_existing)
else:
get_logger().error(f"Unable to download {driver_name} driver at this point")
def download_url(url, to_dir=None, download_filename=None, overwrite_existing=True, asynch=True,
unzip=False, del_zipfile=False, add_to_ospath=False, callback=None):
d = Downloader(from_url=url, to_dir=to_dir, download_filename=download_filename,
overwrite_existing=overwrite_existing, asynch=asynch, unzip=unzip,
del_zipfile=del_zipfile, add_to_ospath=add_to_ospath, callback=callback)
d_files = d.download()
return d_files
class Downloader(object):
@staticmethod
def any_file_exists(files):
if not isinstance(files, list):
files = [files]
for f in files:
if file_exists(f):
return True
return False
def __init__(self, from_url=None, to_dir=None, download_filename=None, unzip_filename=None,
overwrite_existing=True, asynch=False, unzip=True, del_zipfile=True, add_to_ospath=False, callback=None):
if not is_valid_url(from_url):
get_logger().error(f"{__class__.__name__}: from_url is mandatory")
raise InvalidArgumentError("from_url is mandatory and should be a valid url")
self.from_url = from_url
self.to_dir = to_dir or get_user_home_dir()
self.overwrite_existing = overwrite_existing
self.download_ok = False
self.download_fullfilename = None
if not download_filename:
download_filename = guess_filename_from_url(from_url)
if download_filename:
self.download_fullfilename = os.path.join(self.to_dir, download_filename)
if unzip_filename:
self.unzip_fullfilename = os.path.join(self.to_dir, unzip_filename)
self.unzip = unzip
self.del_zipfile = del_zipfile
self.add_to_ospath = add_to_ospath
self.filehash = None
self.downloaded_files = None
self.asynch = asynch
self.callback = callback
def _downloadhook(self, blocknum, blocksize, totalsize):
readsofar = blocknum * blocksize
if totalsize > 0:
percent = 100.0 if readsofar > totalsize else (readsofar * 100.0 / totalsize)
s = f"\rdownloading...[{percent:.0f}%]"
sys.stdout.write(s)
if readsofar >= totalsize: # near the end
sys.stdout.write("\n")
self.download_ok = True
else: # total size is unknown
sys.stderr.write("read %d\n" % (readsofar,))
def _can_proceed(self):
if not self.overwrite_existing:
#print("self.unzip_fullfilename - " + self.unzip_fullfilename)
if Downloader.any_file_exists([self.unzip_fullfilename, self.download_fullfilename]):
return False
return True
def _unzip(self):
f = self.download_fullfilename
if not f:
return
extracted_names = []
extracted = False
if zipfile.is_zipfile(f):
with zipfile.ZipFile(f) as zf:
zf.extractall(path=self.to_dir)
extracted_names = zf.namelist()
extracted = True
elif f.endswith("tar.gz"):
with tarfile.open(f, "r:gz") as tar:
tar.extractall(path=self.to_dir)
extracted_names = tar.getnames()
extracted = True
elif f.endswith("tar"):
with tarfile.open(f, "r:") as tar:
tar.extractall(path=self.to_dir)
extracted_names = tar.getnames()
extracted = True
if extracted:
self.downloaded_files = [os.path.join(self.to_dir, fl) for fl in extracted_names]
if self.del_zipfile:
rm_files(f)
def _add_to_path(self):
if not self.to_dir:
return
add_to_path(self.to_dir)
#decorate download function for ability to run in background
@task_runner
def download(self):
if self._can_proceed():
filename, headers = None, None
try:
hook = None
if not self.asynch:
hook = self._downloadhook
filename, headers = request.urlretrieve(self.from_url, filename=self.download_fullfilename,
reporthook=hook)
self.download_ok = True
except Exception as e:
raise OperationFailedException(f"Download from {self.from_url} failed. Details - \n{str(e)}")
self.filehash = hash_(filename)
if (not self.download_fullfilename) and filename:
bn = os.path.basename(filename)
copy_file(filename, self.to_dir, overwrite=self.overwrite_existing)
rm_files(filename)
self.download_fullfilename = os.path.join(self.to_dir, bn)
if self.download_ok:
self.downloaded_files = [self.download_fullfilename]
if self.unzip:
#print("unzipping")
self._unzip()
else:
raise OperationFailedException(f"Download from {self.from_url} failed")
if self.add_to_ospath:
self._add_to_path()
if self.callback and callable(self.callback):
self.callback(self.downloaded_files)
return self.downloaded_files
class WebdriverDownloader(Downloader):
_OSMAP_CHROME = {'windows':"win32", 'mac':"mac64", 'linux':"linux64"}
_OSMAP_FIREFOX = {'windows':"win64", 'mac':"macos", 'linux':"linux64"}
_ZIPEXTMAP_FIREFOX = {'windows':"zip", 'mac':"tar.gz", 'linux':"tar.gz"}
WEBDRIVERNAMES = {CONSTANTS.CHROME_DRIVER : "chromedriver",
CONSTANTS.IE_DRIVER : "IEDriverServer",
CONSTANTS.FIREFOX_DRIVER : "geckodriver" }
def __init__(self, url=None, to_dir=None, overwrite_existing=True, download_filename=None,
unzip_filename=None, asynch=False, unzip=True, add_to_ospath=False):
self.to_dir = to_dir or self.default_download_directory()
super(WebdriverDownloader, self).__init__(from_url=url, to_dir=self.to_dir, download_filename=download_filename,
unzip_filename=unzip_filename, overwrite_existing=overwrite_existing,
asynch=asynch, unzip=unzip, add_to_ospath=add_to_ospath)
@staticmethod
def default_download_directory():
dir_name = CONSTANTS.DOWNLOAD_DIR_NAME
start_dir = CONSTANTS.DIR_PATH or get_user_home_dir()
#home_dir = get_user_home_dir()
default_dir = os.path.join(start_dir, CONSTANTS.DIR_NAME, dir_name)
make_dir(default_dir)
return default_dir
@staticmethod
def latest_chrome_version(use_default=True):
#TODO: this needs to change
#get from chromium website
LATEST_VERSION_PATTERN = r'Latest Release:.*ChromeDriver ([\d+\.+]+)'
plain_text = ""
with HTTPSession() as session:
h = session.get(CHROME_CONSTANTS.HOME_URL)
r = HTML(h.text)
r.render()
plain_text = str(r.text).encode('ascii', errors='ignore').decode()
v = find_patterns_in_str(LATEST_VERSION_PATTERN, plain_text, first=True)
if (not v) and use_default:
v = CHROME_CONSTANTS.DEFAULT_VERSION
if not v:
message = """
Unable to pull latest available Chromedriver version. Check,
1. Your internet connection
2. If internet is fine, contact implementor. Perhaps requires logic change
"""
raise OperationFailedException(message)
return str(v).strip()
@staticmethod
def ie_download_url(version, filename):
home_url = IE_CONSTANTS.DOWNLOAD_URL.format(version, filename)
return home_url
@classmethod
def download_chrome(cls, to_dir=None, version=None, download_filename=None, overwrite_existing=False,
asynch=False, unzip=True, add_to_ospath=True):
unzip_filename = WebdriverDownloader.WEBDRIVERNAMES[CONSTANTS.CHROME_DRIVER]
#TODO: de-duplication
start_dir = to_dir or WebdriverDownloader.default_download_directory()
f_dir = os.path.join(start_dir, unzip_filename)
if (not overwrite_existing) and file_exists(f_dir):
if add_to_ospath:
add_to_path(start_dir)
return
download_url = CHROME_CONSTANTS.DOWNLOAD_URL
if download_url:
url = download_url
else:
#determine download_url
if version:
version = str(version)
version = version or CHROME_CONSTANTS.VERSION or WebdriverDownloader.latest_chrome_version()
filename = CHROME_CONSTANTS.FILENAME_TEMPLATE.format(WebdriverDownloader._OSMAP_CHROME[os_name()])
if not download_filename:
download_filename = filename
url = CHROME_CONSTANTS.DOWNLOAD_URL_TEMPLATE.format(version, filename)
wd = WebdriverDownloader(url=url, to_dir=to_dir, download_filename=download_filename,
unzip_filename=unzip_filename, overwrite_existing=overwrite_existing,
asynch=asynch, unzip=unzip, add_to_ospath=add_to_ospath)
wd.download()
#TODO: automatically determine version
@classmethod
def download_ie(cls, to_dir=None, version=None, download_filename=None, overwrite_existing=False,
asynch=False, unzip=True, add_to_ospath=True):
unzip_filename = WebdriverDownloader.WEBDRIVERNAMES[CONSTANTS.IE_DRIVER]
#TODO: de-duplication
start_dir = to_dir or WebdriverDownloader.default_download_directory()
f_dir = os.path.join(start_dir, unzip_filename)
if (not overwrite_existing) and file_exists(f_dir):
if add_to_ospath:
add_to_path(start_dir)
return
download_url = IE_CONSTANTS.DOWNLOAD_URL
if download_url:
url = download_url
else:
#determine download_url
version = version or IE_CONSTANTS.VERSION
version = str(version)
filename = IE_CONSTANTS.FILENAME_TEMPLATE.format(version)
if not download_filename:
download_filename = filename
url = IE_CONSTANTS.DOWNLOAD_URL_TEMPLATE.format(version, filename)
wd = WebdriverDownloader(url=url, to_dir=to_dir, download_filename=download_filename,
unzip_filename=unzip_filename, overwrite_existing=overwrite_existing,
asynch=asynch, unzip=unzip, add_to_ospath=add_to_ospath)
wd.download()
#TODO: logic to get latest version from Website
@classmethod
def download_firefox(cls, to_dir=None, version=None, download_filename=None, overwrite_existing=False,
asynch=False, unzip=True, add_to_ospath=True):
unzip_filename = WebdriverDownloader.WEBDRIVERNAMES[CONSTANTS.FIREFOX_DRIVER]
#TODO: de-duplication
start_dir = to_dir or WebdriverDownloader.default_download_directory()
f_dir = os.path.join(start_dir, unzip_filename)
if (not overwrite_existing) and file_exists(f_dir):
if add_to_ospath:
add_to_path(start_dir)
return
download_url = FIREFOX_CONSTANTS.DOWNLOAD_URL
if download_url:
url = download_url
else:
#determine download_url
version = version or FIREFOX_CONSTANTS.VERSION or FIREFOX_CONSTANTS.DEFAULT_VERSION
version = str(version)
ospart = WebdriverDownloader._OSMAP_FIREFOX[os_name()]
extpart = WebdriverDownloader._ZIPEXTMAP_FIREFOX[os_name()]
filename = FIREFOX_CONSTANTS.FILENAME_TEMPLATE.format(version, ospart, extpart)
if not download_filename:
download_filename = filename
url = FIREFOX_CONSTANTS.DOWNLOAD_URL_TEMPLATE.format(version, filename)
wd = WebdriverDownloader(url=url, to_dir=to_dir, download_filename=download_filename,
unzip_filename=unzip_filename, overwrite_existing=overwrite_existing,
asynch=asynch, unzip=unzip, add_to_ospath=add_to_ospath)
wd.download()
webdriver_downloader = WebdriverDownloader
|
[
"[email protected]"
] | |
a91cd6a30720f35b923f7d66fc4dd8f19c5c66be
|
90b16221850962629ec974cd760eb8511187a4c8
|
/estados.py
|
8b6ecd4d1b8a1dae15ddb200738d62194c04e4d5
|
[
"MIT"
] |
permissive
|
hi-hi-ray/space-invaders
|
d4763e3007e8fda03f3bba93468be5f6e8eaa333
|
8df3bc2b8bf2093d26486dfd17f9acc2caaa0ceb
|
refs/heads/master
| 2020-03-28T01:28:03.409867 | 2018-09-26T04:56:19 | 2018-09-26T04:56:19 | 147,507,209 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 376 |
py
|
# Space Invaders
# Created by Raysa Dutra
# -*- coding: utf-8 -*-
#!/usr/bin/env python
from estados_orm import StateOrm
class State(object):
def __init__(self):
self.dao = StateOrm()
def save_state(self, player_position, player_life, enemy_type):
self.dao.create(player_position=player_position, player_life=player_life, enemy_type=enemy_type)
|
[
"[email protected]"
] | |
1b0e51b919441b6d97ed1baefb9ae33a0afd199e
|
b8fb60543cfd1c432640397727dcbb64c0c71b17
|
/blog/migrations/0046_auto_20201115_1826.py
|
bf32dca3cdc6f288b46bc4ccb5a7848e6276663b
|
[] |
no_license
|
thambub/eatnowtrack
|
57e8cc98897de8904adf91cd0328c4abda61d632
|
998eceed88ccdc8f1de9b2823bd0a175d1a25190
|
refs/heads/master
| 2023-01-28T17:09:30.821820 | 2020-12-03T23:47:14 | 2020-12-03T23:47:14 | 318,330,861 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 674 |
py
|
# Generated by Django 3.0.8 on 2020-11-15 23:26
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('blog', '0045_auto_20201115_1516'),
]
operations = [
migrations.AlterField(
model_name='comment',
name='post_connected',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='blog.Post'),
),
migrations.AlterField(
model_name='preference',
name='post',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='blog.Post'),
),
]
|
[
"[email protected]"
] | |
6fb599d6df3dd66e74d595abedac11f9dba1dbca
|
5b68f33352ac79df3dd69e580a482f9284980b5f
|
/cellcycleclassification/__init__.py
|
dc59cad5e1a6ff548bfd5f76d0e5fc71ce6f4ee4
|
[] |
no_license
|
em812/CellCycleClassification
|
709a454f0563ef46c4d6c719090301183f6c4424
|
0d782112f1114e7a10b27a94e2a7912b9e4a72f3
|
refs/heads/master
| 2023-02-16T12:02:24.216249 | 2021-01-11T21:48:01 | 2021-01-11T21:48:01 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 332 |
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 11 18:56:02 2021
@author: lferiani
"""
from pathlib import Path
base_path = Path(__file__).parent
BINARY_MODEL_PATH = base_path / 'trained_models' / 'v_06_60_best.pth'
MULTICLASS_MODEL_PATH = (
base_path / 'trained_models' / 'v_12_63_20201218_213041.pth')
|
[
"[email protected]"
] | |
f03251f2e2c93487fb9538d28c53e60da6493523
|
772f8f0a197b736cba22627485ccbdb65ed45e4b
|
/day09/mygui3.py
|
85903ce1a15afbb06fa75763d482edd7e38d2f79
|
[] |
no_license
|
zhpg/python1805
|
ddc69cd1b3bda8bef1cb0c2913d456ea2c29a391
|
3d98c8ebc106fd0aab633a4c99ae6591013e4438
|
refs/heads/master
| 2020-03-26T11:26:59.378511 | 2018-08-05T09:25:21 | 2018-08-05T09:25:21 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 610 |
py
|
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
import tkinter
from functools import partial
def say_hi(world):
def welcome():
label.config(text='Hello %s' % world)
return welcome
root = tkinter.Tk()
label = tkinter.Label(text='Hello world', font="15px")
b1 = tkinter.Button(root, bg='red', fg='white', text='button1', comand=say_hi('sss'))
MyButton = partial(tkinter.Button(root, bg='red', fg='white')) # 偏函数
b2 = MyButton(text='button2', command=say_hi('chine'))
b3 = MyButton(text='quit', command=root.quit())
label.pack() # ?
b1.pack()
b2.pack()
b3.pack()
root.mainloop() # ?
|
[
"[email protected]"
] | |
d9c287cab24e6c7f442c5f92b060f296c5897baf
|
efa947c7bc90a0dfbce2ada2fe0986a4fa3baff0
|
/todos/urls.py
|
a9d19064c4ddd51a387b814b1451ac0d0585f07c
|
[] |
no_license
|
Lakshmeesh-H/pythontodo
|
064a7cd6d621d780238d69cc061c8eff7b8c74bb
|
3d344dd5ade3d79e36d0619369419b8562ce5cc3
|
refs/heads/master
| 2020-04-26T07:47:12.580144 | 2019-03-02T07:57:21 | 2019-03-02T07:57:21 | 173,403,977 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 116 |
py
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.index, name='index')
]
|
[
"[email protected]"
] | |
98252c27be41a24bc7fc6115b4d0a60b67779206
|
9b75aba6053794beb3ba50ba86fbc41909f54fa3
|
/welp/api/client.py
|
ca83bd4271156e7ba96fd3abb5b8dca36acd915d
|
[
"MIT"
] |
permissive
|
azye/welp
|
a16a07391d3db6773038eb7c16fc7e27bd79a355
|
af18a94673c8e04fdf10f8222d15b67af924f9e8
|
refs/heads/master
| 2023-05-09T13:15:38.346926 | 2021-05-30T04:27:33 | 2021-05-30T04:27:33 | 237,330,064 | 0 | 0 |
MIT
| 2021-03-06T10:50:20 | 2020-01-31T00:03:34 |
Python
|
UTF-8
|
Python
| false | false | 271 |
py
|
import requests
from .google.geolocation import Geolocation
from .yelp.business import BusinessSearch
class Client:
def __init__(self):
self.session = requests.session()
self.yelp = BusinessSearch(self)
self.geolocation = Geolocation(self)
|
[
"[email protected]"
] | |
781ffa6094e1e065a1662ff414e97c2d8e72f5f6
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/nouns/_clambake.py
|
a60e6c1d2e25f9b27e08fb830685e405c05f668b
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 359 |
py
|
#calss header
class _CLAMBAKE():
def __init__(self,):
self.name = "CLAMBAKE"
self.definitions = [u'an event in which seafood is cooked and eaten outside, usually near the sea']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
|
[
"[email protected]"
] | |
93431b4260ae9bcc50dc2babafb602fe5f3a56f8
|
f3598888ce889075d006de9559aa67499ca0d708
|
/Common/CenterToLeft.py
|
d1cf789f60c0bb67849262f0612c7c308bf8032d
|
[] |
no_license
|
JinYanming/jym_cmot_semi_mask
|
6f1ceafa344d2831cdc91e1af0515b417b3939d6
|
be5fc9694f802ab0fb2eaeb11c7eca10ee0e72b3
|
refs/heads/master
| 2022-02-20T05:56:36.418283 | 2019-09-18T18:23:40 | 2019-09-18T18:23:40 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 722 |
py
|
# Generated with SMOP 0.41
from libsmop import *
# /workspace/MOT/cmot-v1/Common/CenterToLeft.m
@function
def CenterToLeft(x=None,y=None,height=None,width=None,*args,**kwargs):
varargin = CenterToLeft.varargin
nargin = CenterToLeft.nargin
## Copyright (C) 2014 Seung-Hwan Bae
## All rights reserved.
# (x,y): Center position
h_height=height / 2
# /workspace/MOT/cmot-v1/Common/CenterToLeft.m:7
h_width=width / 2
# /workspace/MOT/cmot-v1/Common/CenterToLeft.m:8
L_x=x - round(h_width)
# /workspace/MOT/cmot-v1/Common/CenterToLeft.m:10
L_y=y - round(h_height)
# /workspace/MOT/cmot-v1/Common/CenterToLeft.m:11
return L_x,L_y
if __name__ == '__main__':
pass
|
[
"[email protected]"
] | |
085d1484b6ee362c5c3585056107cee77dbe61f9
|
2a6640a155c3b5d64b57c2403d1eaa573a24cef4
|
/sales/models.py
|
f68ef1890fcff41b94d3ad9c709dc408ac1194be
|
[] |
no_license
|
mortex/Mortex-System
|
db29813cc0e82c0c5fc840161b1703ffadecacba
|
f24972012a05dd4b5700b121c31b5bd5298b106c
|
refs/heads/master
| 2020-05-06T13:52:51.243130 | 2011-02-20T20:48:08 | 2011-02-20T20:48:08 | 1,390,293 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,893 |
py
|
from django.db import models
class ShirtStyles(models.Model):
ShirtStyleNumber = models.CharField('Style Number', max_length=20)
ShirtStyleDescription = models.CharField('Description', max_length=200)
def __unicode__(self):
return self.ShirtStyleNumber + ' ' + self.ShirtStyleDescription
class StyleColorCategories(models.Model):
StyleColorCategoryName = models.CharField('Color Category', max_length=20)
class ShirtStyleSKUs(models.Model):
ShirtStyle = models.ForeignKey(ShirtStyles)
StyleColorCategory = models.ForeignKey(StyleColorCategories)
ShirtStyleSize = models.CharField('Size', max_length=2)
FabricRollYield = models.IntegerField('Fabric Roll Yield')
KnitSize = models.FloatField('Knit Size')
SizePrice = models.FloatField('Size Price')
Active = models.BooleanField()
def __unicode__(self):
return self.ShirtStyleSize
class Customers(models.Model):
CustomerName = models.CharField('Customer Name', max_length=40)
def __unicode__(self):
return self.CustomerName
class ShirtOrders(models.Model):
Customer = models.ForeignKey(Customers)
PONumber = models.CharField('PO#', max_length=20)
Complete = models.BooleanField()
def __unicode__(self):
return self.PONumber
class ShirtOrderSKUs(models.Model):
ShirtOrder = models.ForeignKey(ShirtOrders)
ShirtStyleSKU = models.ForeignKey(ShirtStyleSKUs)
OrderQuantity = models.IntegerField('Quantity')
class Shipments(models.Model):
DateShipped = models.DateTimeField('Date Shipped')
class ShirtSKUInventory(models.Model):
CutOrder = models.CharField('Cut Order', max_length=20)
Pieces = models.IntegerField()
Add = models.BooleanField()
class ShipmentSKUs(models.Model):
Shipment = models.ForeignKey(Shipments)
ShirtOrderSKU = models.ForeignKey(ShirtOrderSKUs)
CutOrder = models.CharField('Cut Order', max_length=20)
|
[
"[email protected]"
] | |
ac6c740e1eb8aae6b9ed374a1d30c0b69a3fcaca
|
7a2369f3bbb12ca187a92b7607300b3010ad01d6
|
/heavywaterapp/settings.py
|
216322a8ad1cea6af92cbebbe9d2a3fbc38384dc
|
[] |
no_license
|
DanielJKelly/doc-clf
|
68f3e53b4c59413a73267130ea776b3c9826f1ba
|
d2eba4a97221b0885cd8d9556946730832f795d2
|
refs/heads/master
| 2020-03-14T17:33:58.889579 | 2018-05-07T16:54:27 | 2018-05-07T16:54:27 | 131,723,083 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,278 |
py
|
"""
Django settings for heavywaterapp project.
Generated by 'django-admin startproject' using Django 2.0.4.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
SECRET_KEY = os.environ.get('SECRET_KEY')
ALLOWED_HOSTS = [
'django-env4.p3r3dh3xti.us-east-1.elasticbeanstalk.com',
'localhost',
'127.0.0.1'
]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'docclf'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'heavywaterapp.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['docclf/templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'heavywaterapp.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'US/Eastern'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = 'static'
|
[
"[email protected]"
] | |
f43eac6a2df0d3e193a9bd001c6fd16852221592
|
898289e0a9e6638f1a85ceeb7efab6e031306355
|
/sharkcop-server/utils/Helper.py
|
0e9e3959bbd6406c36a9c4a51d3cc6c311de9f84
|
[] |
no_license
|
CaoHoangTung/sharkcop
|
7c6beaf43765b6b2ae5b3d7a30d3842a2ff29726
|
7322601caf3a3780e38dda13960949f9b37e1931
|
refs/heads/master
| 2021-08-22T17:45:53.495133 | 2020-10-02T01:57:11 | 2020-10-02T01:57:11 | 227,616,144 | 8 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,945 |
py
|
import threading
from utils.Checker import Checker
class Helper():
def url_is_internal(url,compare):
# url is the param needed to be compared to compare
if ".".join(extract(url)) == ".".join(extract(compare)) or (url[0:4] != "http" and url[0] != "#"):
return True
else:
return False
def embed_url(url):
features_size = 30
threads = [None]*features_size
arr_threads_result = []
arr = []
try:
threads[0] = threading.Thread(target=lambda arg1: arr_threads_result.append((Checker.having_IP_Address(arg1),0)), args=(url,))
threads[1] = threading.Thread(target=lambda arg1: arr_threads_result.append((Checker.URL_Length(arg1),1)), args=(url,))
threads[2] = threading.Thread(target=lambda arg1: arr_threads_result.append((Checker.Shortining_Service(arg1),2)), args=(url,))
threads[3] = threading.Thread(target=lambda arg1: arr_threads_result.append((Checker.having_At_Symbol(arg1),3)), args=(url,))
threads[4] = threading.Thread(target=lambda arg1: arr_threads_result.append((Checker.double_slash_redirecting(arg1),4)), args=(url,))
threads[5] = threading.Thread(target=lambda arg1: arr_threads_result.append((Checker.Prefix_Suffix(arg1),5)), args=(url,))
threads[6] = threading.Thread(target=lambda arg1: arr_threads_result.append((Checker.having_Sub_Domain(arg1),6)), args=(url,))
threads[7] = threading.Thread(target=lambda arg1: arr_threads_result.append((Checker.SSLfinal_State(arg1),7)), args=(url,))
threads[8] = threading.Thread(target=lambda arg1: arr_threads_result.append((Checker.Domain_registeration_length(arg1),8)), args=(url,))
threads[9] = threading.Thread(target=lambda arg1: arr_threads_result.append((Checker.Favicon(arg1),9)), args=(url,))
threads[10]= threading.Thread(target=lambda arg1: arr_threads_result.append((Checker.port(arg1),10)), args=(url,))
threads[11]= threading.Thread(target=lambda arg1: arr_threads_result.append((Checker.HTTPS_token(arg1),11)), args=(url,))
threads[12]= threading.Thread(target=lambda arg1: arr_threads_result.append((Checker.Request_URL(arg1),12)), args=(url,))
threads[13]= threading.Thread(target=lambda arg1: arr_threads_result.append((Checker.URL_of_Anchor(arg1),13)), args=(url,))
threads[14]= threading.Thread(target=lambda arg1: arr_threads_result.append((Checker.Links_in_tags(arg1),14)), args=(url,))
threads[15]= threading.Thread(target=lambda arg1: arr_threads_result.append((Checker.SFH(arg1),15)), args=(url,))
threads[16]= threading.Thread(target=lambda arg1: arr_threads_result.append((Checker.Submitting_to_email(arg1),16)), args=(url,))
threads[17]= threading.Thread(target=lambda arg1: arr_threads_result.append((Checker.Abnormal_URL(arg1),17)), args=(url,))
threads[18]= threading.Thread(target=lambda arg1: arr_threads_result.append((Checker.Redirect(arg1),18)), args=(url,))
threads[19]= threading.Thread(target=lambda arg1: arr_threads_result.append((Checker.on_mouseover (arg1),19)), args=(url,))
threads[20]= threading.Thread(target=lambda arg1: arr_threads_result.append((Checker.RightClick (arg1),20)), args=(url,))
threads[21]= threading.Thread(target=lambda arg1: arr_threads_result.append((Checker.popUpWidnow (arg1),21)), args=(url,))
threads[22]= threading.Thread(target=lambda arg1: arr_threads_result.append((Checker.Iframe(arg1),22)), args=(url,))
threads[23]= threading.Thread(target=lambda arg1: arr_threads_result.append((Checker.age_of_domain(arg1),23)), args=(url,))
threads[24]= threading.Thread(target=lambda arg1: arr_threads_result.append((Checker.DNSRecord(arg1),24)), args=(url,))
threads[25]= threading.Thread(target=lambda arg1: arr_threads_result.append((Checker.web_traffic(arg1),25)), args=(url,))
threads[26]= threading.Thread(target=lambda arg1: arr_threads_result.append((Checker.Page_Rank(arg1),26)), args=(url,))
threads[27]= threading.Thread(target=lambda arg1: arr_threads_result.append((Checker.Google_Index(arg1),27)), args=(url,))
threads[28]= threading.Thread(target=lambda arg1: arr_threads_result.append((Checker.Links_pointing_to_page(arg1),28)), args=(url,))
threads[29]= threading.Thread(target=lambda arg1: arr_threads_result.append((Checker.Statistical_report(arg1),29)), args=(url,))
for i in range(features_size):
threads[i].start()
for i in range(features_size):
threads[i].join()
arr_threads_result.sort(key=lambda tup: tup[1])
for elem in arr_threads_result:
arr.append(elem[0])
return arr
except Exception as e:
return e
|
[
"[email protected]"
] | |
cb2c71aa663f2ac3fe8a4c27dbcd52b9d4deb57d
|
ac33fa7e87fd771c2169a254b301665271a9dbb1
|
/util/preprocessing_yesno.py
|
82b15f6ee26c596ec48f23a8b97f88f7997febed
|
[
"Apache-2.0"
] |
permissive
|
mimbres/FFTNet
|
c9444ed974040daf3909d30353f5bb1530e2b19d
|
3a6bfb4731bab2e0a59fc3a1ddb55f19f84aeba2
|
refs/heads/master
| 2021-07-06T02:50:09.106668 | 2020-07-21T19:50:37 | 2020-07-21T19:50:37 | 132,438,209 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,724 |
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
preprocessing_yesno.py
Created on Thu May 3 00:15:48 2018
This code saves:
- apply zero padding to the first 48,000 samples
- [mu-law encoded audio] to <out_filedir>/enc
- [mfcc] to <out_filedir>/mfcc
- NOT IMPLEMENTED YET ([f0] to <out_filedir>/f0 *)
@author: sungkyun
"""
import argparse
import numpy as np
import pandas as pd # required for generating .csv files
import librosa # required for audio pre-processing, loading mp3 (sudo apt-get install libav-tools)
import glob, os # required for obtaining test file ID
from util.utils import mu_law_encode
#%% Argument Parser
parser = argparse.ArgumentParser(description='Audio Preprocessing for yesno dataset')
parser.add_argument('-sr', '--sr', type=int, default=16000, metavar='N',
help='target sampling rate, default 16000')
parser.add_argument('-zp', '--zero_pad', type=int, default=48000, metavar='N',
help='target sampling rate, default 48000')
parser.add_argument('-i', '--input_filedir', type=str, default='data/waves_yesno/', metavar='N',
help='input source dataset directory, default=data/waves_yesno/')
parser.add_argument('-o', '--out_filedir', type=str, default='data/processed_yesno/', metavar='N',
help='output file directory(root of .wav subdirectories and .csv file), default=data/processed_yesno/')
args = parser.parse_args()
input_file_dir = args.input_filedir
output_file_dir = args.out_filedir
#%% Function def.
def displayFeat(x_spec=np.ndarray):
import matplotlib.pyplot as plt
import librosa.display
plt.figure(figsize=(10, 4))
librosa.display.specshow(x_spec, x_axis='time')
return 0
#%% Preprocessing --> save <u-enc> <mfcc> as .npy
input_file_paths = sorted(glob.glob(input_file_dir + '*.wav'))
file_ids = [path.split('/')[-1][:-4] for path in input_file_paths]
# Load audio -> mono -> resample -> mfcc -> save
if not os.path.exists(output_file_dir):
os.makedirs(output_file_dir)
if not os.path.exists(output_file_dir + 'mulaw/'):
os.makedirs(output_file_dir + 'mulaw/')
if not os.path.exists(output_file_dir + 'mfcc/'):
os.makedirs(output_file_dir + 'mfcc/')
total_input_files = len(input_file_paths)
for i in range(total_input_files):
x_raw, sr = librosa.load(input_file_paths[i], sr=args.sr, mono=True) # Normalize?
x_raw = np.pad(x_raw, (args.zero_pad,0), mode='constant') # padding first 48,000 samples with zeros
#x_spec = librosa.feature.melspectrogram(y=x_raw, sr=sr, power=2.0, n_fft = 400, hop_length=160, n_mels=128)
x_spec = librosa.feature.melspectrogram(y=x_raw, sr=sr, power=2.0, n_fft = 400, hop_length=1, n_mels=128)
x_mfcc = librosa.feature.mfcc(S=librosa.power_to_db(x_spec), sr=args.sr, n_mfcc=25)
# displayFeat(x_spec); displayFeat(x_mfcc)
if x_mfcc.shape[1] > len(x_raw):
x_mfcc = x_mfcc[:,0:len(x_raw)]
elif x_mfcc.shape[1] < len(x_raw):
x_raw = x_raw[0:x_mfcc.shape[1]]
x_mulaw = mu_law_encode(x_raw)
# Save mulaw
save_file_path_mulaw = output_file_dir + 'mulaw/' + file_ids[i] + '.npy'
np.save(save_file_path_mulaw, x_mulaw.astype('uint8'))
# Save mfcc
save_file_path_mfcc = output_file_dir + 'mfcc/' + file_ids[i] + '.npy'
np.save(save_file_path_mfcc, x_mfcc)
print('Preprocessing: {} files completed.'.format(total_input_files))
#%% Train/test split --> generate .csv
# Train/test split : 54 files for train, 6 files for test
test_id_sel = [5,11,22,38,43,55]
train_id_sel = list(set(range(60)).difference(set(test_id_sel)))
# Prepare pandas dataframes
df_test = pd.DataFrame(columns=('file_id', 'mulaw_filepath', 'mfcc_filepath'))
df_train = pd.DataFrame(columns=('file_id', 'mulaw_filepath', 'mfcc_filepath'))
for idx in test_id_sel:
save_file_path_mulaw = output_file_dir + 'mulaw/' + file_ids[idx] + '.npy'
save_file_path_mfcc = output_file_dir + 'mfcc/' + file_ids[idx] + '.npy'
df_test.loc[len(df_test)] = [file_ids[idx], save_file_path_mulaw, save_file_path_mfcc] # add a new row into DataFrame
for idx in train_id_sel:
save_file_path_mulaw = output_file_dir + 'mulaw/' + file_ids[idx] + '.npy'
save_file_path_mfcc = output_file_dir + 'mfcc/' + file_ids[idx] + '.npy'
df_train.loc[len(df_train)] = [file_ids[idx], save_file_path_mulaw, save_file_path_mfcc] # add a new row into DataFrame
# Save .csv
df_test.to_csv(output_file_dir + 'test.csv', encoding='utf-8')
df_train.to_csv(output_file_dir + 'train.csv', encoding='utf-8')
print('Preprocessing: generated test.csv and train.csv files in {}.'.format(output_file_dir))
|
[
"[email protected]"
] | |
a3b6ab4f0b46969ac9e288517ab593a9c27966fb
|
f77a842ac3f196d1fdb6eb0c2dcbafbbdb130ac0
|
/tools/linux/Linux_SecurityAVB/avb-challenge-verify.py
|
7b6e856174eb20bed2e63fee27e22b89ae90192c
|
[] |
no_license
|
mrigendrachaubey/nanopi
|
c742fe61497c6fb01dd9723d20e474fb1c46ab91
|
d8e917c6440c672b05b0663b794c4bafc799df1d
|
refs/heads/master
| 2021-07-15T03:08:38.613135 | 2021-05-25T09:51:10 | 2021-05-25T09:51:10 | 247,435,551 | 5 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,125 |
py
|
#/user/bin/env python
"this is a test module for getting unlock challenge"
import sys
import os
from hashlib import sha256
def challenge_verify():
if (len(sys.argv) != 3) :
print "Usage: rkpublickey.py [challenge_file] [product_id_file]"
return
if ((sys.argv[1] == "-h") or (sys.argv[1] == "--h")):
print "Usage: rkpublickey.py [challenge_file] [product_id_file]"
return
try:
challenge_file = open(sys.argv[1], 'rb')
product_id_file = open(sys.argv[2], 'rb')
challenge_random_file = open('unlock_challenge.bin', 'wb')
challenge_data = challenge_file.read(52)
product_id_data = product_id_file.read(16)
product_id_hash = sha256(product_id_data).digest()
print("The challege version is %d" %ord(challenge_data[0]))
if (product_id_hash != challenge_data[4:36]) :
print("Product id verify error!")
return
challenge_random_file.write(challenge_data[36:52])
print("Success!")
finally:
if challenge_file:
challenge_file.close()
if product_id_file:
product_id_file.close()
if challenge_random_file:
challenge_random_file.close()
if __name__ == '__main__':
challenge_verify()
|
[
"[email protected]"
] | |
9c0f12c71633297f2138baedc5d8a36a3f503ace
|
7337a861cd6fc6d445bebcc0b35738c25c0e2056
|
/Luck_check.py
|
542b453cd2797da88a89e394b98bcaec3641fb54
|
[] |
no_license
|
szyymek/Python
|
556ba510709a3a1095dd19648f949422cb69ba96
|
0a6b25eeb39dcec5fef090f09a037353638f3b99
|
refs/heads/master
| 2020-07-24T01:20:53.418696 | 2019-10-04T08:05:06 | 2019-10-04T08:05:06 | 207,760,122 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 289 |
py
|
def luck_check(string):
srodek=len(string)
if srodek%2==1:
string=string[:int(srodek/2)]+string[(int(srodek/2))+1:]
lewa=prawa=0
for x in string[:int(srodek/2)]:
lewa+=int(x)
for y in string[int(srodek/2):]:
prawa+=int(y)
return prawa==lewa
|
[
"[email protected]"
] | |
6e0a89dab8282ce28c62edeed53f22fefad4d2e7
|
6dc8db56c090c814840010d215da4d9f518eb7b3
|
/rt-rank/script/get_nfeat.py
|
30ac703502cab975b5f3396f7d5d370110d342a9
|
[] |
no_license
|
fancyspeed/solution_of_kaggle_merck
|
571e4d008141bf6f161ce76af0bf2439dddfb3e9
|
f1da95bd69529395b8b370831c854f16ba1d90a3
|
refs/heads/master
| 2016-09-05T15:03:59.762071 | 2014-07-23T16:35:07 | 2014-07-23T16:35:07 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 449 |
py
|
def get_head_train(line):
arr = line.strip().split(',')[2:]
#head = [ele.split('_')[1] for ele in arr]
head = [str(i+1) for i in range(len(arr))]
return head
def trans_train(p_in):
n_cur = 0
for line in open(p_in):
n_cur += 1
if n_cur == 1:
head = get_head_train(line)
print head[-1]
if __name__ == '__main__':
import sys
if len(sys.argv) < 2:
print '<usage> in'
exit(-1)
trans_train(sys.argv[1])
|
[
"[email protected]"
] | |
fa8d5992af22569fce3cc34d5d811404061b7321
|
fa1002dd32e2332396d0b359094050825e42c343
|
/emotion-program-pi-version.py
|
0cea550ccba505b69dfebe8c3071874a0e5092a7
|
[] |
no_license
|
vanstorm9/Emotion-Recognition-DOF
|
ced912158e45636b53469b3dc0645bb4c5ab69f8
|
0d6e395bf950388a37065cb9ccf1bba44171c35f
|
refs/heads/master
| 2021-01-10T02:14:11.917489 | 2018-05-26T04:17:40 | 2018-05-26T04:17:40 | 45,325,932 | 28 | 15 | null | null | null | null |
UTF-8
|
Python
| false | false | 12,001 |
py
|
# Program implemented in the Raspberry Pi (with camera module)
#from matplotlib import pyplot as plt
#from sklearn.naive_bayes import GaussianNB
import numpy as np
import math
import cv2
import os
import os.path
import io
from time import time
import picamera
import smtplib
#camera = picamera.PiCamera()
from time import sleep
#import pyttsx
# Libraries to preform machine learning
import sys
from sklearn.svm import SVC
from sklearn.metrics import accuracy_score,accuracy_score, confusion_matrix
from sklearn.decomposition import PCA, RandomizedPCA
# from mpl_toolkits.mplot3d import Axes3D
from sklearn.externals import joblib
from sklearn import cross_validation
from sklearn.linear_model import Ridge
from sklearn.learning_curve import validation_curve, learning_curve
from sklearn.externals import joblib
def emotion_to_text(pred):
smtpUser= "(ENTER YOUR EMAIL ADDRESS)"
smtpPass= "(ENTER YOUR EMAIL ACCOUNT'S PASSWORD)"
toAdd = "[email protected]"
fromAdd = smtpUser
if pred == "Neutral":
subject = "How are you doing?"
body = "Hey! Just checking in, I was just wondering how you are doing today. \n \n - Rapiro"
elif pred == "Angry":
subject = "Are you okay? You look mad"
body = "I noticed that you are a bit red. Did something annoy or aggrivate you? /n -Rapiro"
elif pred == "Shocked":
subject = "Did something scare or surprised you?"
body = "What's wrong, you look like you have seen a ghost. . . \n Rapiro"
else:
subject = "You seem happy today"
body = "Hey there! I am very happy that you are happy ^_^ \n \n -Rapiro"
header = "To: " + toAdd + "\n" + "From: " + fromAdd + "\n" + "Subject: " + subject
#print header + "\n" + body
s = smtplib.SMTP('smtp.gmail.com', 587)
s.ehlo()
s.starttls()
s.ehlo()
s.login(smtpUser, smtpPass)
s.sendmail(fromAdd, toAdd, header + "\n" + body)
s.quit()
# Cannot use due to memory error
def pca_calc(main):
n_components = 90000
print '----------------------'
print main.shape
pca = RandomizedPCA(n_components=n_components, whiten=True).fit(main)
main = pca.transform(main)
print main.shape
return main
def motor_emotion_response(pred):
if pred == 'Smiling':
print 'Activating command. . .'
os.system("./rapirocommands 6")
sleep(5)
os.system("./rapirocommands 0")
print 'Command finished'
elif pred == 'Neutral':
print 'Activating neutral command. . .'
os.system("./hellobash")
sleep(5)
os.system("./rapirocommands 5")
sleep(5)
os.system("./rapirocommands 0")
print 'End command'
elif pred == 'Angry':
print 'Activating angry command. . .'
os.system("./rapirocommands 4")
sleep(2)
os.system("./rapirocommands 0")
print 'Command ended'
elif pred == 'Shocked':
print 'Activating shocked command'
os.system("./rapiro-commands 2")
sleep(2)
os.system("./rapiro-commands 0")
print 'Command ended'
def draw_flow(im,flow,step=16):
h,w = im.shape[:2]
y,x = np.mgrid[step/2:h:step,step/2:w:step].reshape(2,-1)
fx,fy = flow[y,x].T
# create line endpoints
lines = np.vstack([x,y,x+fx,y+fy]).T.reshape(-1,2,2)
lines = np.int32(lines)
# create image and draw
vis = cv2.cvtColor(im,cv2.COLOR_GRAY2BGR)
for (x1,y1),(x2,y2) in lines:
cv2.line(vis,(x1,y1),(x2,y2),(0,255,0),1)
cv2.circle(vis,(x1,y1),1,(0,255,0), -1)
return vis
def catch_first_frame():
ret, frame_f = capf.read()
prev_gray = cv2.cvtColor(frame_f,cv2.COLOR_BGR2GRAY)
prev_gray = cv2.resize(prev_gray, (0,0), fx=0.27, fy=0.27)
face = face_classifier.detectMultiScale(prev_gray, 1.2, 4)
if len(face) == 0:
print 'No face was detected'
print prev_gray.shape
exit()
else:
print 'Face detected'
for (x,y,w,h) in face:
prev_gray = prev_gray[y: y+h, x: x+w]
capf.release()
return (x,y,w,h, prev_gray)
def sensitive_override_check(prob_s, pred):
if pred == 'Neutral':
override_arr = [prob_s[0,3], prob_s[0,2], prob_s[0,0]]
max_comp = max(override_arr)
max_ind = [i for i, j in enumerate(override_arr) if j == max_comp][0]
qualified_override = False
if max_comp > 30:
qualified_override = True
if qualified_override:
if max_ind == 0:
pred = 'Smiling'
elif max_ind == 1:
pred = 'Shocked'
else:
pred = 'Angry'
#print 'Sensitive Override triggered. . .'
return pred
def emotion_to_speech(pred):
engine = pyttsx.init()
rate = engine.getProperty('rate')
engine.setProperty('rate', rate)
if pred == 'Neutral':
speech = 'Hello, you seem fine today'
elif pred == 'Smiling':
speech = 'You seem happy. I am very happy that you are happy!'
elif pred == 'Shocked':
speech = 'What is wrong? You look like you seen a ghost.'
elif pred == 'Angry':
speech = 'Why are you angry? Did something annoy or frustrate you?'
print speech
engine.say(speech)
engine.runAndWait()
motor_emotion_response("Smiling")
slash = '/'
folder_trans = np.array([])
target = np.array([])
label_trans = np.array([])
folder = ''
choice = ''
face_classifier = cv2.CascadeClassifier('haarcascades/haarcascade_frontalface_default.xml')
#print 'Load datasets [l] from file or create a new one [n]'
loading = 'l'
if loading == 'l':
#print 'Press [p] to predict test dataset, or else press any key'
predict_start = 'n'
else:
predict_start = 'p'
if loading=='l':
# load dataset matrix from npy file
t0 = time()
t1 = time()
if predict_start == 'p':
print 'Loading the main matrix. . .'
main = np.load('optical-main-mini.npy')
diff = diff = time() - t1
print 'Loaded main matrix in ', diff, 's of size ', main.shape
t2 = time()
print 'Loading the target vector. . .'
target = np.load('optical-target-mini.npy')
diff = time() - t2
print 'Loaded target in ', diff, 's of size ', target.shape
print 'Finished'
total_time = time() - t0
print total_time, 's'
t0 = time()
if loading == 'l':
print 'Now loading trained model. . .'
model = joblib.load('Optical-Model-Mini/optical-model-mini.pkl')
t1 = time()
print 'Loading time: ', round(time()-t0, 3), 's'
else:
features_train, features_test, labels_train, labels_test = cross_validation.train_test_split(main, target, test_size = 0.2)
print 'Now training. . .'
model = SVC(probability=True)
'''
#model = SVC(kernel='poly')
#model = GaussianNB()
'''
model.fit(features_train, labels_train)
print 'training time: ', round(time()-t0, 3), 's'
print 'Saving model. . .'
t1 = time()
joblib.dump(model, 'Optical-Model-Mini/optical-model-mini.pkl')
t3 = time()
print 'model saving time: ', round(time()-t0, 3), 's'
print 'Now predicting. . .'
if predict_start == 'p':
if loading == 'l':
features_train, features_test, labels_train, labels_test = cross_validation.train_test_split(main, target, test_size = 0.2)
# Determine amount of time to train
t1 = time()
pred = model.predict(features_test)
print 'predicting time: ', round(time()-t1, 3), 's'
accuracy = accuracy_score(labels_test, pred)
print 'Confusion Matrix: '
print confusion_matrix(labels_test, pred)
# Accuracy in the 0.9333, 9.6667, 1.0 range
print accuracy
# ---------------------------------
while True:
# Test with another video
while True:
print 'Press [n] to go into normal mode or [s] to go into sensitive mode'
sensitive_out = raw_input()
if sensitive_out == 'n' or sensitive_out == 's':
break
# Manually setting x, y, w, h values in order make more consistant test
# and training videos
x = 63
y = 35
w = 64
h = 64
#prev_gray = frame_f.copy()
#prev_gray = cv2.cvtColor(prev_gray, cv2.COLOR_BGR2GRAY)
# Start video to record the user
#cap to record user for 15 frames
cap = cv2.VideoCapture(0)
# Name of the video file
path = 'test.h264'
# Starting video
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter(path,fourcc, 20.0, (640,480))
print 'Press any key to start recording'
go = raw_input()
# New recording feature for the Raspberry Pi
with picamera.PiCamera() as camera:
print 'Starting recording. . .'
camera.vflip = True
camera.start_recording(path)
print 'Before sleep'
sleep(5)
print 'After sleep'
print 'Stopping the camera from recording. . .'
camera.stop_recording()
print 'Finished recording'
# To get a
# Cap3
cap3 = cv2.VideoCapture(path)
ret, prev_gray = cap3.read()
prev_gray = cv2.cvtColor(prev_gray,cv2.COLOR_BGR2GRAY)
prev_gray = cv2.resize(prev_gray, (0,0), fx=0.27, fy=0.27)
prev_gray = prev_gray[y: y+h, x: x+w]
cap3.release()
#face = face_classifier.detectMultiScale(prev_gray, 1.2, 4)
j = 0
# To analyze the recording and make an emotion prediction
cap4 = cv2.VideoCapture(path)
max_frame = 36
while True:
print 'j: ', j
ret, frame = cap4.read()
if frame == None:
print 'Frame failure, trying again. . .'
cap4.release()
cap4 = cv2.VideoCapture(path)
continue
if j > max_frame + 1:
cap4.release()
break
frame = cv2.resize(frame, (0,0), fx=0.35, fy=0.35)
frame = frame[y: y+h, x: x+w]
#cv2.imshow('To test with', frame)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
flow = cv2.calcOpticalFlowFarneback(prev_gray,gray,None, 0.5, 3, 15, 3, 5, 1.2, 0)
# Working with the flow matrix
flow_mat = flow.flatten()
if j == 1:
sub_main = flow_mat
elif j != 0:
sub_main = np.concatenate((sub_main, flow_mat))
prev_gray = gray
# To show us visually each video
#cv2.imshow('Optical flow',draw_flow(gray,flow))
if cv2.waitKey(1) & 0xFF == ord('q'):
break
j = j + 1
cap4.release()
#cv2.destroyAllWindows()
print 'Now predicting. . .'
### Sliding window ###
k_start = 0
k_end = 15 * flow_mat.shape[0]
max_frame = 36 * flow_mat.shape[0]
while k_end < max_frame:
count = float(k_end/max_frame)
count = np.around(count, decimals=2)
print count, '%'
model.predict(sub_main[k_start:k_end])
prob = model.predict_proba(sub_main[k_start:k_end])
prob_s = np.around(prob, decimals=5)
prob_s = prob_s* 100
# Determine amount of time to predict
t1 = time()
pred = model.predict(sub_main[k_start:k_end])
if sensitive_out == 's':
pred = sensitive_override_check(prob_s, pred)
if pred != 'Neutral':
break
k_start = k_start + (7 * flow_mat.shape[0])
k_end = k_end + (7 * flow_mat.shape[0])
######################
print 'predicting time: ', round(time()-t1, 3), 's'
print ''
print 'Prediction: '
print pred
print 'Probability: '
print 'Neutral: ', prob_s[0,1]
print 'Smiling: ', prob_s[0,3]
print 'Shocked: ', prob_s[0,2]
print 'Angry: ', prob_s[0,0]
print 'Start hello 2'
os.system("./hellobash")
print 'End hello 2'
emotion_to_text(pred)
print 'Starting robot motion response'
motor_emotion_response(pred)
print 'Motion ended'
|
[
"[email protected]"
] | |
a2f9e589693f4eda5cea8869d53759b116acfc76
|
b0e299f6ab0139b831d0ed86cc6da0c3eb80b50d
|
/hello/public/deploy/chal.py
|
3cdcfdcec0ecf9a88f3f75665084382c0d2855d2
|
[] |
no_license
|
kendricktan/paradigm-ctf
|
96768eb6a3ee76867b873e96e2f623796803361c
|
21ba8273f858d1af24d0abdb841bb019e8fa0965
|
refs/heads/main
| 2023-06-26T07:50:39.179665 | 2021-07-31T06:27:11 | 2021-07-31T06:27:11 | 387,947,845 | 5 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 191 |
py
|
import eth_sandbox
from web3 import Web3
eth_sandbox.run_launcher([
eth_sandbox.new_launch_instance_action(deploy_value=Web3.toWei(0, 'ether')),
eth_sandbox.new_get_flag_action()
])
|
[
"[email protected]"
] | |
9e4c1b0c6e3548f8390780416d6b8b139e913430
|
a07b644c3edec618d388207a15473072391496bb
|
/authentication/admin.py
|
b4122a2c3e0acf906408eb5865b577f60c778555
|
[] |
no_license
|
sahadeb1234/Ecommerce-Site-using-Django-Framework
|
f16b4c28756eea087859d099cb68504b1839c950
|
9ee09c323ed99c185a88ae3327907f27c53b3dbe
|
refs/heads/master
| 2023-08-11T11:33:54.353876 | 2021-09-27T15:45:51 | 2021-09-27T15:45:51 | 410,944,658 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 274 |
py
|
from django.contrib import admin
from django.contrib.admin.decorators import display
from .models import PreRegistration
# Register your models here.
@admin.register(PreRegistration)
class PreRegistrationAdmin(admin.ModelAdmin):
list_display=['first_name','last_name']
|
[
"[email protected]"
] | |
562c9bb35d998b8ce6627c717bf74278490a6cd0
|
d96c552ff8debe98d6bd8d1ebf376f999ec4bcd1
|
/src/UpdateWeather.py
|
0b33f4692a2d8978e8a19ff8e74ff9f9f43e83b5
|
[] |
no_license
|
shema102/AddressableLedClock
|
535cfc207d7380e92b3a57bfcbf7cfbcc169858d
|
e22703544c2254034187a68dbc0e8678b56cc3f0
|
refs/heads/master
| 2021-02-15T21:32:12.902520 | 2020-04-02T17:03:31 | 2020-04-02T17:03:31 | 244,934,071 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 699 |
py
|
import math
import urequests
apiKey = "&appid=a988988f9b8811b3d5b23b888af9a7ca"
url = "api.openweathermap.org/data/2.5/weather?q="
city = "Kyiv"
url = "https://" + url + city + "&units=metric" + apiKey
response = urequests.get(url)
parsed = response.json()
if parsed["cod"] == 200:
weather = {
"temperature": math.ceil(parsed["main"]["temp"]),
"pressure": math.ceil(parsed["main"]["pressure"] * 0.75006375541921),
"humidity": parsed["main"]["humidity"],
"clouds": parsed["clouds"]["all"],
"wind_speed": parsed["wind"]["speed"],
"wind_direction": parsed["wind"]["deg"],
}
else:
print("Error {}".format(parsed["cod"]))
response.close()
|
[
"[email protected]"
] | |
c9a9d2dda80846c0c2c7b067316cfabaf6aed24b
|
321b4ed83b6874eeb512027eaa0b17b0daf3c289
|
/1266/1266.minimum-time-visiting-all-points.289659185.Accepted.leetcode.python3.py
|
76eb95f002965918c8ee1bffff4858d8a5a97364
|
[] |
no_license
|
huangyingw/submissions
|
7a610613bdb03f1223cdec5f6ccc4391149ca618
|
bfac1238ecef8b03e54842b852f6fec111abedfa
|
refs/heads/master
| 2023-07-25T09:56:46.814504 | 2023-07-16T07:38:36 | 2023-07-16T07:38:36 | 143,352,065 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 277 |
py
|
class Solution(object):
def minTimeToVisitAllPoints(self, points):
x1, y1 = points[0]
time = 0
for x2, y2 in points[1:]:
dx, dy = abs(x1 - x2), abs(y1 - y2)
time += max(dx, dy)
x1, y1 = x2, y2
return time
|
[
"[email protected]"
] | |
7b4457f2ee91759bffa3db4f8f238845a1d7c0af
|
49a983da9b890afba6983a35536945fb8e862a76
|
/bestgroup/bestgroup/wsgi.py
|
549c220ab1fb16bdcdf73adb8f5c07c9eda1a411
|
[] |
no_license
|
Mumalo/BestGroups
|
e78d6b4967169f88bdec6f74c8881c7d200886cd
|
a2e4d1c7d60813e9a52554291f66f07b66fdbbea
|
refs/heads/master
| 2021-01-19T20:43:51.854174 | 2017-04-18T21:34:39 | 2017-04-18T21:34:39 | 88,540,777 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 495 |
py
|
"""
WSGI config for bestgroup project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
from mezzanine.utils.conf import real_project_name
os.environ.setdefault("DJANGO_SETTINGS_MODULE",
"%s.settings" % real_project_name("bestgroup"))
application = get_wsgi_application()
|
[
"[email protected]"
] | |
d8c6eb7e638620f0db30fcee4607c3f27da7d23c
|
501e9924cb19e95c32e2d168e73ea44e7c9c440c
|
/readfiles.py
|
9c0300bb83848b8231570bcef6216b1d447617f6
|
[] |
no_license
|
Miguelmargar/file-io
|
cc2790b109187dbeec87788c662aaf52d8e96c02
|
f1c6f6ccfefbc572cac83a6ddc21ba2e902ac0c1
|
refs/heads/master
| 2020-03-17T17:23:49.493484 | 2018-05-21T12:22:23 | 2018-05-21T12:22:23 | 133,786,461 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,607 |
py
|
#to read a file's data
# ONE WAY -----------------------------------------
f = open("data.txt", "r") # opens file to read it with "r"
lines = f.readlines() # stores the file info in lines variable therefore in memory
f.close() # closes the file but it is still stored in memory
print(lines)
# OTHER WAY ---------------------------------------
f = open("data.txt", "r") #opens file
lines = f.read().split("\n") #stores file in varible but as it has .split it gives each line a list without the .split it would all be one string together
f.close() #closes file but it is still stored in memory
print(lines)
# find most common word in a text file-------------
import re # imports regular expresions from a python library which is native to python - another library would be "random"
import collections
text = open("1155-0.txt").read().lower() # opens the file in question .read() reads it and .lower() makes it all lower case and converts it into a string in a variable called text
words = re.findall("\w+", text) # this line converts the string into a line - "\w+", text = finds all the words in text
long_words = []
for word in words: # this loop takes the words that are bigger than 5 characters
if len(word) > 5:
long_words.append(word)
most_common = collections.Counter(long_words).most_common(10) # this prints out the top 10 words from the list created by the loop above
print(most_common)
|
[
"[email protected]"
] | |
9b44fa322343a38acbbcec9a94de34bb8d2c3102
|
3c559d076d6a2533dc19f724ca0d1d9fed942a9a
|
/voice/figures_fpa/parse_new.py
|
eb98cbd05b1055d49523937cfd5453794d041656
|
[] |
no_license
|
YoungofNUAA/Deep-Reinforcement-Learning-for-5G-Networks
|
7e13fa6cfbf2d5f0f85829dc09189cb8cd08c3e7
|
1e8fb6f0407814b7251e443ef1626f6408d85df1
|
refs/heads/master
| 2020-09-11T12:31:22.499797 | 2019-11-11T13:46:47 | 2019-11-11T13:46:47 | 222,065,219 | 1 | 0 | null | 2019-11-16T07:47:47 | 2019-11-16T07:47:47 | null |
UTF-8
|
Python
| false | false | 1,192 |
py
|
#!/usr/local/bin/python3
# Note, if any output has NAN in it, we drop the entire episode from the calculation.
import glob
import re
import numpy as np
import pandas as pd
files = glob.glob('measurements*.txt')
f1 = open('ue_1_sinr.txt', 'a')
f2 = open('ue_2_sinr.txt', 'a')
f3 = open('ue_1_power.txt', 'a')
f4 = open('ue_2_power.txt', 'a')
episodes = []
pattern = '_([0-9]+)_'
for filename in files:
episode = re.findall(pattern, filename)
episodes.append(episode[0])
episodes = np.array(episodes).astype(int)
pd.DataFrame(episodes).to_csv("convergence.txt", index=False, header=False)
pattern = re.compile('[\[\]_ \':a-z]+') # get rid of [], colons, and words.
for file in files:
f = open(file, 'r')
lines = f.read()
sinr1 = lines.split(':')[1]
sinr2 = lines.split(':')[2]
tx1 = lines.split(':')[3]
tx2 = lines.split(':')[4]
# Clean up sinr1, 2 by replacing pattern with ''
f1.write('{},'.format(re.sub(pattern, '', sinr1)))
f2.write('{},'.format(re.sub(pattern, '', sinr2)))
f3.write('{},'.format(re.sub(pattern, '', tx1)))
f4.write('{},'.format(re.sub(pattern, '', tx2)))
f1.close()
f2.close()
f3.close()
f4.close()
f.close()
|
[
"[email protected]"
] | |
28b8669c94017d20014d6686d8a7e309277cd2be
|
47fecfd05f2ef2881437b9f4716a56f3cbfd2443
|
/unique_in_order_7.6.18.py
|
1a4ed2556a8f0b6ac6da205b9dc392af10d25673
|
[] |
no_license
|
royceduong/codewars
|
0c7d3ff0ef382b8b8eabd59807b5d495dc49d035
|
488410017873355d2dad3b125f9185a6c17a79e7
|
refs/heads/master
| 2020-03-23T00:45:49.941106 | 2018-07-21T06:16:11 | 2018-07-21T06:16:11 | 140,884,203 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,575 |
py
|
# def unique_in_order(iterable):
# array = []
# for v in iterable:
# array.append(v)
# print(array)
# return array
# def unique_in_order(iterable):
# a = list(iterable)
# i = 0
# b = []
# n = len(a)
# while i < n:
# j = 1
# while j < n:
# if j != n-1:
# if a[i] == a[j]:
# j += 1
# else:
# b.append(a[i])
# i = j
# j += 1
# else:
# if a[i] == a[j]:
# b.append(a[i])
# else:
# b.append(a[i])
# b.append(a[j])
# j += 1
# i = n
# print(b)
# def unique_in_order(iterable):
# a = list(iterable)
# n = len(a)
# i = 0
# b = []
# while i < n:
# j = i + 1
# while j < n and a[i] == a[j]:
# j += 1
# b.append(a[i])
# i = j
# print(b)
# return b
# unique_in_order('AAAABBBCCDAABBB') == ['A', 'B', 'C', 'D', 'A', 'B']
# unique_in_order('ABBCcAD') == ['A', 'B', 'C', 'c', 'A', 'D']
# unique_in_order([1,2,2,3,3]) == [1,2,3]
def unique_in_order(iterable):
a = list(iterable)
i = 0
b = []
n = len(a)
while i < n:
j = i + 1
while j < n and a[i] == a[j]:
j += 1
b.append(a[i])
i = j
print(b)
return b
unique_in_order('AAAABBBCCDAABBB')
unique_in_order('ABBCcAD')
unique_in_order([1,2,2,3,3])
|
[
"[email protected]"
] | |
6b3c1d860fb9496abc013b7c05295ca4b5aec5a6
|
b5989239ab2088123317734124305c8c3ba13de1
|
/pets.py
|
b9c849099f0b37c8c6fc7a9005b1524aac331616
|
[] |
no_license
|
iayoung85/2ndsandbox
|
d5771c24ace20290642c7c5a2870f4925d8e7b1a
|
f94f21c7e9b2576ede6d88ad025b99e7ae806bb4
|
refs/heads/master
| 2023-07-13T03:47:21.659957 | 2021-08-25T21:16:39 | 2021-08-25T21:16:39 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 226 |
py
|
color=input("What's your favorite color? ")
animal=input("What's your favorite animal? ")
number=input("What's your favorite number? ")
print("Ok, I guess your ideal pet would be a "+color,animal+" with "+str(number)+" legs!")
|
[
"[email protected]"
] | |
2209b38614cce66f22db5824dd24b4616f625049
|
2a2b2fdab63be529165ea1032eb532be9f2a1834
|
/process_seq.py
|
66f652977e64504f4727fb62e97d45ca7f3298fd
|
[
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
lucymaybio/FastSeq-pipeline
|
da64a6e47d9bb960e3841d1d5d7f423aaef1629b
|
2d9db00e76996c9619a75583bc547be535d692b1
|
refs/heads/master
| 2020-04-21T19:29:44.844101 | 2019-02-08T06:35:54 | 2019-02-08T06:35:54 | 169,808,114 | 0 | 0 |
MIT
| 2019-04-05T16:45:51 | 2019-02-08T22:37:49 |
Python
|
UTF-8
|
Python
| false | false | 13,278 |
py
|
#!/usr/bin/python3.6
"""
Sequence processing pipeline used to analyze packaged viral genomes
"""
from argparse import ArgumentParser
from csv import DictReader, DictWriter
import logging
import os
from pathlib import Path
from subprocess import run
import sys
# Argument parsing setup
parser = ArgumentParser(description='Process sequencing files '
'and collect stats')
parser.add_argument('base_dir', type=str,
help='Base of where processing takes place. All paths '
'in the csv are assumed to be relative to this path '
'and results will be placed in "Output" directory '
'within this path.')
parser.add_argument('csv_file', type=str,
help='CSV file detailing samples, and where the relevant '
'files for those samples can be found, all paths '
'are relative to the base_dir.')
# Logging setup
log = logging.getLogger("fastseq")
log.addHandler(logging.StreamHandler(sys.stdout))
logfile_formatter = logging.Formatter('%(asctime)s - %(name)s - '
'%(levelname)s - %(message)s')
logfile_handler = logging.FileHandler('fastseq.log')
logfile_handler.setFormatter(logfile_formatter)
log.addHandler(logfile_handler)
log.setLevel(logging.INFO)
args = parser.parse_args()
BASE_DIR = Path(args.base_dir)
CSV_PATH = Path(args.csv_file)
STATS_OUTPUT_PATH = BASE_DIR / "Output" / "final_stats.tsv.txt"
# ----------------------------------
# Configuration Variable Definitions
# ----------------------------------
# For simplicity, all program configurations + paths are treated as global
# These are designed to work with the corresponding docker image, can be
# tweaked to work in different contexts.
# Tool Paths
TRIMMOMATIC = "/tools/trimmomatic/trimmomatic-0.38.jar"
BWA = "/tools/bwa/bwa"
SAMTOOLS = "/tools/samtools/bin/samtools"
BCFTOOLS = "/tools/samtools/bin/bcftools"
PICARD = "/tools/picard/picard.jar"
# Configuration for Trimmomatic
LEAD_SCORE = 3
TRAIL_SCORE = 3
MIN_LEN = 50
WINDOW_SIZE = 4
WINDOW_QUALITY = 20
# Configuration for bcftools
VCF_QUAL = 20
VCF_DP = 10
# Configuration for Picard
PICARD_COVERAGE_CAP = 100000
PICARD_FAST_ALG = True
PICARD_SAMPLE_SIZE = 5000
# ----------------------------------
# Function Definitions
# ----------------------------------
def configure_paths(sample, fwd_read, rev_read, adapter_pth, ref_pth):
"""
Create all derived paths based on fwd/rev read, adapter, reference
Also sets up an output directory with sample name to output files
Args:
sample (str): Sample name
fwd_read (str): Path to forward read rel. to docker base in .fastq.gz
rev_read (str): Path to forward read rel. to docker base in .fastq.gz
adapter_pth (str): Path to adapter rel. to docker base in .fasta
see trimmomatic documentation for what to name the sequences in the
.fasta file
ref_pth (str): Path to reference rel. to docker base in .fasta
Returns:
dict: A dictionary with keys of type str, values of type Path,
See function for what keys map to what.
"""
sample_base = BASE_DIR / "Output" / sample
os.makedirs(sample_base)
return {
"output_base": sample_base,
"fwd_read": BASE_DIR / fwd_read,
"rev_read": BASE_DIR / rev_read,
"adapter_pth": BASE_DIR / adapter_pth,
"ref_pth": BASE_DIR / ref_pth,
# Derived Sample Paths
"fwd_trimmed": BASE_DIR / f"{fwd_read}.trimmed.fastq",
"rev_trimmed": BASE_DIR / f"{rev_read}.trimmed.fastq",
"fwd_unpaired": BASE_DIR / f"{fwd_read}.unpaired.fastq",
"rev_unpaired": BASE_DIR / f"{rev_read}.unpaired.fastq",
"sam_file": sample_base / f"{sample}.sam",
"bam_file": sample_base / f"{sample}.bam",
"mpileup_file": sample_base / f"{sample}.mpileup",
"vcf_file": sample_base / f"{sample}.vcf",
"vcf_stats_file": sample_base / f"{sample}.vcf.stats.txt",
"wgs_metrics_file": sample_base / f"{sample}.picard_wgs.txt",
"size_metrics_file": sample_base / f"{sample}.picard_size.txt",
"size_histogram_file": sample_base / f"{sample}.picard_size_hist.pdf"
}
def trimmomatic(sample, paths):
"""
Simple wrapper for applying trimmomatic, trims adapters and cleans
sequence ends. Uses phred33 quality threshold.
Args:
sample (str): Name of sample
paths (dict): Paths collection
Returns: None
"""
log.info(f"Starting trimmomatic for {sample}...")
run(["java", "-jar", TRIMMOMATIC, "PE", "-phred33",
paths["fwd_read"], paths["rev_read"], # Input Files
# Output Files
paths["fwd_trimmed"], paths["fwd_unpaired"],
paths["rev_trimmed"], paths["rev_unpaired"],
f"ILLUMINACLIP:{paths['adapter_pth']}:4:20:10",
f"LEADING:{LEAD_SCORE}",
f"TRAILING:{TRAIL_SCORE}",
f"SLIDINGWINDOW:{WINDOW_SIZE}:{WINDOW_QUALITY}",
f"MINLEN:{MIN_LEN}"])
log.info(f"...end trimmomatic for {sample}.")
def bwa(sample, paths):
"""
Simple wrapper for applying BWA. First indexes then applys mem algorithm
Args:
sample (str): Name of sample
paths (dict): Paths collection
Returns: None
"""
# index reference
log.info(f"Starting BWA Index for {sample}...")
run([BWA, "index", paths["ref_pth"]])
log.info(f"...end BWA Index for {sample}.")
# mem algorithm to align reads + generate .sam file
log.info(f"Starting BWA mem for {sample}...")
with open(paths["sam_file"], "w") as f:
run([BWA, "mem",
paths["ref_pth"], paths["fwd_trimmed"], paths["rev_trimmed"]],
stdout=f) # output to SAMPLE_SAM_PTH for samtools
log.info(f"...end BWA mem for {sample}.")
def samtools(sample, paths):
"""
Wrapper for applying samtools/bcftools.
First converts BAM file to SAM format, then generates a read pileup.
Finally creates a VCF file and filters it (though filtering may not be
working properly).
Args:
sample (str): Name of sample
paths (dict): Paths collection
Returns: None
"""
# convert .sam to .bam
log.info(f"Starting samtools indexing for {sample}...")
with open(paths["bam_file"], "w") as f:
run([SAMTOOLS, "sort", paths["sam_file"]],
stdout=f) # output to SAMPLE_BAM_PTH
run([SAMTOOLS, "index", paths["bam_file"]])
log.info(f"...end samtools indexing for {sample}.")
# generate read pileup
log.info(f"Starting mpileup for {sample}...")
with open(paths["mpileup_file"], "w") as f:
run([BCFTOOLS, "mpileup", "-f",
paths["ref_pth"], paths["bam_file"]],
stdout=f) # output to SAMPLE_MPILEUP_PTH
log.info(f"...end mpileup for {sample}.")
# generate variant calling file (.vcf) for calling SNPs and indels
log.info(f"Starting VCF generation for {sample}...")
with open(paths["vcf_file"], "w") as f:
run([BCFTOOLS, "call", "-c", paths["mpileup_file"]],
stdout=f) # output to SAMPLE_VCF_PTH
log.info(f"...end VCF generation for {sample}.")
# filter .vcf file by quality thresholds
log.info(f"Starting VCF filter for {sample}...")
run([BCFTOOLS, "filter", "-i",
f"QUAL>{VCF_QUAL} && DP>{VCF_DP}",
paths["vcf_file"]])
log.info(f"...end VCF filter for {sample}.")
def generate_stats(sample, paths):
"""
Wrapper to compute stats from bcf tools and from picard.
Gets VCF stats from BCF tools then collects WGS and Size metrics using
Picard.
Args:
sample (str): Name of sample
paths (dict): Paths collection
Returns: None
"""
# BCFTOOLS stats
log.info(f"Starting VCF stats for {sample}...")
with open(paths["vcf_stats_file"], "w") as f:
run([BCFTOOLS, "stats", paths["vcf_file"]],
stdout=f) # output to SAMPLE_VCF_STATS_PTH
log.info(f"...end VCF stats for {sample}.")
# Picard CollectWgsMetrics (library alignment stats)
log.info(f"Starting picard WGS stats for {sample}...")
run(["java", "-Xmx2048m", "-jar", PICARD, "CollectWgsMetrics",
f"COVERAGE_CAP={PICARD_COVERAGE_CAP}",
f"USE_FAST_ALGORITHM={PICARD_FAST_ALG}",
f"SAMPLE_SIZE={PICARD_SAMPLE_SIZE}",
f"I={paths['bam_file']}", # Input file
f"R={paths['ref_pth']}", # Reference file
f"O={paths['wgs_metrics_file']}"]) # Output file
log.info(f"...end picard WGS stats for {sample}.")
# Picard CollectInsertSizeMetrics (fragment size stats)
log.info(f"Starting picard size stats for {sample}...")
run(["java", "-Xmx2048m", "-jar", PICARD, "CollectInsertSizeMetrics",
f"I={paths['bam_file']}",
f"H={paths['size_histogram_file']}",
f"O={paths['size_metrics_file']}"])
log.info(f"...end picard size stats for {sample}.")
def extract_bcf_stats(path):
"""
Extract relevant information from BCF Stats file for a single sample
Specifically extract SNPs, MNPs, indels, "others", multiallelic sites,
and multiallelic SNPsites.
No effort is made to convert strings to numbers for the stat values.
Args:
path (str): path to the BCF stats file
Returns:
dict: keys as stat names and the values as stat values
"""
# Not ideal to hardcode here nor below, but gets the job done
stats_of_interest = {"number of SNPs:",
"number of MNPs:",
"number of indels:",
"number of others:",
"number of multiallelic sites:",
"number of multiallelic SNP sites:"}
stats = {}
with open(path) as statsf:
for line in statsf:
if line.startswith("SN"):
parts = line.strip().split("\t")
stat = parts[-2]
num = parts[-1]
if stat in stats_of_interest:
stats[stat.strip(":")] = num
return stats
def extract_picard_stats(path):
"""
Extract relevant information from picard wgs or size stats file.
This is assumed to be for a single sample and that there will only be
two lines in the "METRICS CLASS" section, which is the only section we'll
extract.
No effort is made to convert strings to numbers for the stat values.
Args:
path (str): path to the picard wgs stats file
Returns:
dict: keys as stat names and the values as stat values
"""
with open(path) as statsf:
split_lines = []
keep_line = False
for line in statsf:
if keep_line:
split_lines.append(line.strip().split("\t"))
# if we see metrics label, set flag to start collecting data
if line.startswith("## METRICS CLASS"):
keep_line = True
# stop at first empty line, though in practice we expect this
# to happen after exactly 2 lines read
if keep_line and not line.strip():
break
# expecting only 2 lines, header row and values row
stats = dict(zip(split_lines[0], split_lines[1]))
return stats
# ----------------------------------
# Main Code Execution
# ----------------------------------
with open(CSV_PATH) as csvfile:
reader = DictReader(csvfile)
final_stats = []
for entry in reader:
sample_name = entry["Sample"]
fwd_pth = entry["Forward Read Path"]
rev_pth = entry["Reverse Read Path"]
ad_pth = entry["Adapter Path"]
rf_pth = entry["Reference Path"]
path_dict = configure_paths(sample_name, fwd_pth, rev_pth,
ad_pth, rf_pth)
# 1. Trimmomatic (trim adapters and filter by quality threshold) PE
# (paired end algorithm) with -phred33 (quality threshold)
trimmomatic(sample_name, path_dict)
# 2. BWA (align to reference)
bwa(sample_name, path_dict)
# 3. SAMTOOLS/BCFTOOLS (call SNPS/indels)
samtools(sample_name, path_dict)
# 4. Generate statistics
generate_stats(sample_name, path_dict)
# 5. Extract statistics and collate into a single row
vcf_st = extract_bcf_stats(path_dict["vcf_stats_file"])
picard_wgs_st = extract_picard_stats(path_dict["wgs_metrics_file"])
picard_size_st = extract_picard_stats(path_dict["size_metrics_file"])
# Assuming no overlap in stat names
vcf_st.update(picard_wgs_st)
vcf_st.update(picard_size_st)
vcf_st["Sample Name"] = sample_name
final_stats.append(vcf_st)
log.info(f"Starting writing final stats...")
with open(STATS_OUTPUT_PATH, "w") as statsout:
# Assumes all stat entries will have exactly the same headers
writer = DictWriter(statsout, final_stats[0].keys(), delimiter="\t")
writer.writeheader()
writer.writerows(final_stats)
log.info(f"...end writing stats.")
|
[
"[email protected]"
] | |
d13ca45229ac72fcabdb232a12ede0fc2c022130
|
30e52ca4bb782396c94b56a67cb5a3d3d1da89fb
|
/pages/main_page.py
|
9058fc68efb336c5b7bd7d335040accc705c0abf
|
[] |
no_license
|
Daniil0555/final-project-stepik
|
0efcfd07076fbc60a1018de81183383e18c650fa
|
be727bd60dc023f7eed5ef2053d36b3c77f6c473
|
refs/heads/main
| 2022-12-27T12:43:12.795613 | 2020-10-12T17:19:18 | 2020-10-12T17:19:18 | 302,731,302 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 593 |
py
|
from .base_page import BasePage
from .locators import MainPageLocators
class MainPage(BasePage):
def __init__(self, *args, **kwargs):
super(MainPage, self).__init__(*args, **kwargs)
def go_to_login_page(self):
link = self.browser.find_element(*MainPageLocators.LOGIN_LINK)
link.click()
def should_be_login_link(self):
assert self.is_element_present(*MainPageLocators.LOGIN_LINK), "Login link is not presented"
def go_to_see_basket(self):
button = self.browser.find_element(*MainPageLocators.LINK_SEE_BASKET)
button.click()
|
[
"[email protected]"
] | |
5e41747f40d11f76f99a1e32c1a1325790c104b9
|
1fc092e26377798f33dbc39b4e469df5a72a672f
|
/Fibonacci.py
|
f2eabbfa27ea7da5a9acc24212bef6ee8407e2ae
|
[
"MIT"
] |
permissive
|
lelouch0125/Fibonacci-Numbers
|
8646192249016190fcc4495abb516bf677a345c7
|
3649c5d991b887e0b5bafb6dc39f9b8c752c1e56
|
refs/heads/master
| 2022-11-14T17:49:51.560414 | 2020-07-08T17:38:29 | 2020-07-08T17:38:29 | 278,146,456 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 347 |
py
|
#Program to print Fibonacci numbers
n1=0
n2=1
i=0
n=int(input("Enter the number of terms "))
if n<=0:
print("Enter positive numbers")
elif n==1:
print("Fibonacci series:\n")
print(n1)
else:
print("Fibonacci series:\n")
while i<n:
print(n1)
s=n1+n2
n1=n2
n2=s
i=i+1
|
[
"[email protected]"
] | |
e969823ef0cead302c9668f1da26a031f717fbd4
|
58f649415d23c4ff4d86226564de699493f63d61
|
/Day1.py
|
68256325512b0284297b817b703208eded450f75
|
[] |
no_license
|
Aselian-Gull/Advent2017
|
d34a9fb49a7ca8582da5104d312b861ed4fd65ae
|
6bd6f4ea7f6a36335be6921e3e8f7b8dfad0cad3
|
refs/heads/master
| 2021-09-01T05:41:10.329756 | 2017-12-25T05:42:18 | 2017-12-25T05:42:47 | 113,138,615 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 762 |
py
|
#import math
#import hashlib
print("Good evening. Advent of Code 2017, Day 1.")
f = open("/home/smacgil/Development/Advent/input2017_1.txt", "r")
inputfile = f.read()
f.close()
totalone = 0
totalhalf = 0
maxval = len(inputfile) - 1 # EOL is a thing.
halfval = int(maxval / 2) # Convenience.
for digit in range(0, maxval):
nextone = (digit + 1) % maxval
nexthalf = (digit + halfval) % maxval
# Compare chars, and convert if adding.
if(inputfile[digit] == inputfile[nextone]):
totalone = totalone + int(inputfile[digit])
if(inputfile[digit] == inputfile[nexthalf]):
totalhalf = totalhalf + int(inputfile[digit])
print("I am not a human. Proof one: %d" % totalone)
print(" Proof half: %d" % totalhalf)
|
[
"[email protected]"
] | |
73782f3ba66ecf7f99c21522cdbbf9118fadd0e6
|
32e2ba212d39e022bea40f12cdd6b3c138a62ac0
|
/mizani/tests/test_breaks.py
|
b1e61d60f512ce503f985284c50ce6a24b8c473b
|
[
"BSD-3-Clause"
] |
permissive
|
vals/mizani
|
148dd985d25796c25346a3fac106c1c5c7f40d05
|
6b288fe6061e36add001cc5f8ffb147154e7ca62
|
refs/heads/master
| 2020-09-11T09:59:07.672839 | 2017-06-16T08:03:30 | 2017-06-16T08:03:30 | 94,454,967 | 1 | 0 | null | 2017-06-15T15:47:21 | 2017-06-15T15:47:21 | null |
UTF-8
|
Python
| false | false | 5,432 |
py
|
from __future__ import division
from datetime import datetime, timedelta
import pandas as pd
import numpy as np
import numpy.testing as npt
import pytest
from mizani.breaks import (mpl_breaks, log_breaks, minor_breaks,
trans_minor_breaks, date_breaks,
timedelta_breaks, extended_breaks)
from mizani.transforms import trans
def test_mpl_breaks():
x = np.arange(100)
limits = min(x), max(x)
for nbins in (5, 7, 10, 13, 31):
breaks = mpl_breaks(nbins=nbins)
assert len(breaks(limits)) <= nbins+1
limits = float('-inf'), float('inf')
breaks = mpl_breaks(n=5)
assert len(breaks(limits)) == 0
# Zero range discrete
limits = [1, 1]
assert len(breaks(limits)) == 1
assert breaks(limits)[0] == limits[1]
# Zero range continuous
limits = [np.pi, np.pi]
assert len(breaks(limits)) == 1
assert breaks(limits)[0] == limits[1]
def test_log_breaks():
x = [2, 20, 2000]
limits = min(x), max(x)
breaks = log_breaks()(limits)
npt.assert_array_equal(breaks, [1, 10, 100, 1000, 10000])
breaks = log_breaks(3)(limits)
npt.assert_array_equal(breaks, [1, 100, 10000])
breaks = log_breaks()((10000, 10000))
npt.assert_array_equal(breaks, [10000])
breaks = log_breaks()((float('-inf'), float('inf')))
assert len(breaks) == 0
def test_minor_breaks():
# equidistant breaks
major = [1, 2, 3, 4]
limits = [0, 5]
breaks = minor_breaks()(major, limits)
npt.assert_array_equal(breaks, [.5, 1.5, 2.5, 3.5, 4.5])
minor = minor_breaks(3)(major, [2, 3])
npt.assert_array_equal(minor, [2.25, 2.5, 2.75])
# non-equidistant breaks
major = [1, 2, 4, 8]
limits = [0, 10]
minor = minor_breaks()(major, limits)
npt.assert_array_equal(minor, [1.5, 3, 6])
# single major break
minor = minor_breaks()([2], limits)
assert len(minor) == 0
def test_trans_minor_breaks():
class identity_trans(trans):
minor_breaks = trans_minor_breaks()
class square_trans(trans):
transform = staticmethod(np.square)
inverse = staticmethod(np.sqrt)
minor_breaks = trans_minor_breaks()
class weird_trans(trans):
dataspace_is_numerical = False
minor_breaks = trans_minor_breaks()
major = [1, 2, 3, 4]
limits = [0, 5]
regular_minors = trans.minor_breaks(major, limits)
npt.assert_allclose(
regular_minors,
identity_trans.minor_breaks(major, limits))
# Transform the input major breaks and check against
# the inverse of the output minor breaks
squared_input_minors = square_trans.minor_breaks(
np.square(major), np.square(limits))
npt.assert_allclose(regular_minors,
np.sqrt(squared_input_minors))
t = weird_trans()
with pytest.raises(TypeError):
t.minor_breaks(major)
def test_date_breaks():
# cpython
x = [datetime(year, 1, 1) for year in [2010, 2026, 2015]]
limits = min(x), max(x)
breaks = date_breaks('5 Years')
years = [d.year for d in breaks(limits)]
npt.assert_array_equal(
years, [2010, 2015, 2020, 2025, 2030])
breaks = date_breaks('10 Years')
years = [d.year for d in breaks(limits)]
npt.assert_array_equal(years, [2010, 2020, 2030])
# numpy
x = [np.datetime64(i*10, 'D') for i in range(1, 10)]
breaks = date_breaks('10 Years')
limits = min(x), max(x)
with pytest.raises(AttributeError):
breaks(limits)
# NaT
limits = np.datetime64('NaT'), datetime(2017, 1, 1)
breaks = date_breaks('10 Years')
assert len(breaks(limits)) == 0
def test_timedelta_breaks():
breaks = timedelta_breaks()
# cpython
x = [timedelta(days=i*365) for i in range(25)]
limits = min(x), max(x)
major = breaks(limits)
years = [val.total_seconds()/(365*24*60*60)for val in major]
npt.assert_array_equal(
years, [0, 5, 10, 15, 20, 25])
x = [timedelta(microseconds=i) for i in range(25)]
limits = min(x), max(x)
major = breaks(limits)
mseconds = [val.total_seconds()*10**6 for val in major]
npt.assert_array_equal(
mseconds, [0, 5, 10, 15, 20, 25])
# pandas
x = [pd.Timedelta(seconds=i*60) for i in range(10)]
limits = min(x), max(x)
major = breaks(limits)
minutes = [val.total_seconds()/60 for val in major]
npt.assert_allclose(
minutes, [0, 2, 4, 6, 8])
# numpy
x = [np.timedelta64(i*10, unit='D') for i in range(1, 10)]
limits = min(x), max(x)
with pytest.raises(ValueError):
breaks(limits)
# NaT
limits = pd.NaT, pd.Timedelta(seconds=9*60)
assert len(breaks(limits)) == 0
def test_extended_breaks():
x = np.arange(100)
limits = min(x), max(x)
for n in (5, 7, 10, 13, 31):
breaks = extended_breaks(n=n)
assert len(breaks(limits)) <= n+1
# Reverse limits
breaks = extended_breaks(n=7)
npt.assert_array_equal(breaks((0, 6)), breaks((6, 0)))
# Infinite limits
limits = float('-inf'), float('inf')
breaks = extended_breaks(n=5)
assert len(breaks(limits)) == 0
# Zero range discrete
limits = [1, 1]
assert len(breaks(limits)) == 1
assert breaks(limits)[0] == limits[1]
# Zero range continuous
limits = [np.pi, np.pi]
assert len(breaks(limits)) == 1
assert breaks(limits)[0] == limits[1]
|
[
"[email protected]"
] | |
9361aa700700b980f8e9441d56dee3d2730b8b05
|
8c09764a1258696c51ef9ef9c1c3bdc1b931b00d
|
/pylons__shootout/shootout/tests/test_views.py
|
a2226095f946dd0ef87a8ab37d4b07a6f4f23cf8
|
[] |
no_license
|
mindreframer/python-pyramid-stuff
|
2178c4922adca15b7905bb0916c8f035ca495564
|
4b9034bdde63ac8dd799ae2050506edd164a96b7
|
refs/heads/master
| 2021-01-18T16:09:52.835796 | 2013-04-09T20:09:44 | 2013-04-09T20:09:44 | 9,325,528 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 14,887 |
py
|
import unittest
from pyramid import testing
def init_db():
from shootout.models import DBSession
from shootout.models import Base
from sqlalchemy import create_engine
engine = create_engine('sqlite://')
DBSession.configure(bind=engine)
Base.metadata.bind = engine
Base.metadata.create_all(engine)
session = DBSession()
return session
def register_templates(config):
config.testing_add_renderer('templates/login.pt')
config.testing_add_renderer('templates/toolbar.pt')
config.testing_add_renderer('templates/cloud.pt')
config.testing_add_renderer('templates/latest.pt')
class ViewTests(unittest.TestCase):
def setUp(self):
self.session = init_db()
self.config = testing.setUp()
def tearDown(self):
import transaction
from shootout.models import DBSession
transaction.abort()
DBSession.remove()
testing.tearDown()
def _addUser(self, username=u'username'):
from shootout.models import User
user = User(username=username, password=u'password', name=u'name',
email=u'email')
self.session.add(user)
self.session.flush()
return user
def _addIdea(self, target=None, user=None):
from shootout.models import Idea
if not user:
user = self._addUser()
idea = Idea(target=target, author=user, title=u'title',
text=u'text')
self.session.add(idea)
self.session.flush()
return idea
def test_main_view(self):
from shootout.views import main_view
self.config.testing_securitypolicy(u'username')
self.config.include(register_templates)
request = testing.DummyRequest()
result = main_view(request)
self.assertEqual(result['username'], u'username')
self.assertEqual(len(result['toplists']), 4)
def test_idea_add_nosubmit_idea(self):
from shootout.views import idea_add
self.config.testing_securitypolicy(u'username')
self.config.include(register_templates)
request = testing.DummyRequest()
result = idea_add(request)
self.assertEqual(result['target'], None)
self.assertEqual(result['kind'], 'idea')
def test_idea_add_nosubmit_comment(self):
from shootout.views import idea_add
self.config.testing_securitypolicy(u'username')
self.config.include(register_templates)
idea = self._addIdea()
request = testing.DummyRequest(params={'target': idea.idea_id})
result = idea_add(request)
self.assertEqual(result['target'], idea)
self.assertEqual(result['kind'], 'comment')
def test_idea_add_not_existing_target(self):
from shootout.views import idea_add
self.config.testing_securitypolicy(u'username')
self.config.include(register_templates)
request = testing.DummyRequest(params={'target': 100})
result = idea_add(request)
self.assertEqual(result.code, 404)
def test_idea_add_submit_schema_fail_empty_params(self):
from shootout.views import idea_add
self.config.testing_securitypolicy(u'username')
self.config.include(register_templates)
self.config.include('shootout.addroutes')
request = testing.DummyRequest(post={'form.submitted': 'Shoot'})
result = idea_add(request)
self.assertEqual(
result['form'].form.errors,
{
'text': u'Missing value',
'tags': u'Missing value',
'title': u'Missing value'
}
)
def test_idea_add_submit_schema_succeed(self):
from shootout.views import idea_add
from shootout.models import Idea
self.config.testing_securitypolicy(u'username')
self.config.include('shootout.addroutes')
request = testing.DummyRequest(
post={
'form.submitted': u'Shoot',
'tags': u'abc def, bar',
'text': u'My idea is cool',
'title': u'My idea',
}
)
user = self._addUser(u'username')
result = idea_add(request)
self.assertEqual(result.location, 'http://example.com/ideas/1')
ideas = self.session.query(Idea).all()
self.assertEqual(len(ideas), 1)
idea = ideas[0]
self.assertEqual(idea.idea_id, 1)
self.assertEqual(idea.text, u'My idea is cool')
self.assertEqual(idea.title, u'My idea')
self.assertEqual(idea.author, user)
self.assertEqual(len(idea.tags), 3)
self.assertEqual(idea.tags[0].name, u'abc')
self.assertEqual(idea.tags[1].name, u'bar')
self.assertEqual(idea.tags[2].name, u'def')
def test_comment_add_submit_schema_succeed(self):
from shootout.views import idea_add
from shootout.models import Idea
idea = self._addIdea()
self.config.testing_securitypolicy(u'commentator')
self.config.include('shootout.addroutes')
request = testing.DummyRequest(
params={
'form.submitted': u'Shoot',
'tags': u'abc def, bar',
'text': u'My comment is cool',
'title': u'My comment',
'target': unicode(idea.idea_id),
}
)
request.method = 'POST'
user = self._addUser(u'commentator')
result = idea_add(request)
self.assertEqual(result.location, 'http://example.com/ideas/2')
ideas = self.session.query(Idea).all()
self.assertEqual(len(ideas), 2)
comment = ideas[1]
self.assertEqual(comment.idea_id, 2)
self.assertEqual(comment.target_id, 1)
self.assertEqual(comment.text, u'My comment is cool')
self.assertEqual(comment.title, u'My comment')
self.assertEqual(comment.author, user)
self.assertEqual(len(comment.tags), 3)
self.assertEqual(comment.tags[0].name, u'abc')
self.assertEqual(comment.tags[1].name, u'bar')
self.assertEqual(comment.tags[2].name, u'def')
def test_vote_on_own_idea(self):
from shootout.views import idea_vote
from shootout.models import User
self.config.include('shootout.addroutes')
idea = self._addIdea()
self.session.query(User).one()
self.assertEqual(idea.user_voted(u'username'), False)
self.config.testing_securitypolicy(u'username')
post_data = {
'form.vote_hit': u'Hit',
'target': 1,
}
request = testing.DummyRequest(post=post_data)
idea_vote(request)
self.assertEqual(idea.hits, 0)
self.assertEqual(idea.misses, 0)
self.assertEqual(idea.hit_percentage, 0)
self.assertEqual(idea.total_votes, 0)
self.assertEqual(idea.vote_differential, 0)
self.assertEqual(idea.author.hits, 0)
self.assertEqual(len(idea.voted_users.all()), 0)
self.assertEqual(idea.user_voted(u'username'), False)
def test_positive_idea_voting(self):
from shootout.views import idea_vote
self.config.include('shootout.addroutes')
user = self._addUser()
idea = self._addIdea(user=user)
voter = self._addUser(u'votername')
self.assertEqual(idea.user_voted(u'votername'), False)
self.config.testing_securitypolicy(u'votername')
post_data = {
'form.vote_hit': u'Hit',
'target': 1,
}
request = testing.DummyRequest(post=post_data)
idea_vote(request)
self.assertEqual(idea.hits, 1)
self.assertEqual(idea.misses, 0)
self.assertEqual(idea.hit_percentage, 100)
self.assertEqual(idea.total_votes, 1)
self.assertEqual(idea.vote_differential, 1)
self.assertEqual(idea.author.hits, 1)
self.assertEqual(len(idea.voted_users.all()), 1)
self.assertEqual(idea.voted_users.one(), voter)
self.assertTrue(idea.user_voted(u'votername'))
def test_negative_idea_voting(self):
from shootout.views import idea_vote
self.config.include('shootout.addroutes')
user = self._addUser()
idea = self._addIdea(user=user)
voter = self._addUser(u'votername')
self.assertEqual(idea.user_voted(u'votername'), False)
self.config.testing_securitypolicy(u'votername')
post_data = {
'form.vote_miss': u'Miss',
'target': 1,
}
request = testing.DummyRequest(post=post_data)
idea_vote(request)
self.assertEqual(idea.hits, 0)
self.assertEqual(idea.misses, 1)
self.assertEqual(idea.hit_percentage, 0)
self.assertEqual(idea.total_votes, 1)
self.assertEqual(idea.vote_differential, -1)
self.assertEqual(idea.author.hits, 0)
self.assertEqual(len(idea.voted_users.all()), 1)
self.assertEqual(idea.voted_users.one(), voter)
self.assertTrue(idea.user_voted(u'votername'))
def test_registration_nosubmit(self):
from shootout.views import user_add
self.config.include(register_templates)
request = testing.DummyRequest()
result = user_add(request)
self.assertTrue('form' in result)
def test_registration_submit_empty(self):
from shootout.views import user_add
self.config.include(register_templates)
request = testing.DummyRequest()
result = user_add(request)
self.assertTrue('form' in result)
request = testing.DummyRequest(post={'form.submitted': 'Shoot'})
result = user_add(request)
self.assertEqual(
result['form'].form.errors,
{
'username': u'Missing value',
'confirm_password': u'Missing value',
'password': u'Missing value',
'email': u'Missing value',
'name': u'Missing value'
}
)
def test_registration_submit_schema_succeed(self):
from shootout.views import user_add
from shootout.models import User
self.config.include('shootout.addroutes')
request = testing.DummyRequest(
post={
'form.submitted': u'Register',
'username': u'username',
'password': u'secret',
'confirm_password': u'secret',
'email': u'[email protected]',
'name': u'John Doe',
}
)
user_add(request)
users = self.session.query(User).all()
self.assertEqual(len(users), 1)
user = users[0]
self.assertEqual(user.username, u'username')
self.assertEqual(user.name, u'John Doe')
self.assertEqual(user.email, u'[email protected]')
self.assertEqual(user.hits, 0)
self.assertEqual(user.misses, 0)
self.assertEqual(user.delivered_hits, 0)
self.assertEqual(user.delivered_misses, 0)
self.assertEqual(user.ideas, [])
self.assertEqual(user.voted_ideas, [])
def test_user_view(self):
from shootout.views import user_view
self.config.testing_securitypolicy(u'username')
self.config.include('shootout.addroutes')
self.config.include(register_templates)
request = testing.DummyRequest()
request.matchdict = {'username': u'username'}
self._addUser()
result = user_view(request)
self.assertEqual(result['user'].username, u'username')
self.assertEqual(result['user'].user_id, 1)
def test_idea_view(self):
from shootout.views import idea_view
self.config.testing_securitypolicy(u'username')
self.config.include('shootout.addroutes')
self.config.include(register_templates)
self._addIdea()
request = testing.DummyRequest()
request.matchdict = {'idea_id': 1}
result = idea_view(request)
self.assertEqual(result['idea'].title, u'title')
self.assertEqual(result['idea'].idea_id, 1)
self.assertEqual(result['viewer_username'], u'username')
def test_tag_view(self):
from shootout.views import tag_view
from shootout.models import Tag
self.config.testing_securitypolicy(u'username')
self.config.include('shootout.addroutes')
self.config.include(register_templates)
user = self._addUser()
tag1 = Tag(u'bar')
tag2 = Tag(u'foo')
self.session.add_all([tag1, tag2])
idea1 = self._addIdea(user=user)
idea1.tags.append(tag1)
idea2 = self._addIdea(user=user)
idea2.tags.append(tag1)
idea3 = self._addIdea(user=user)
idea3.tags.append(tag2)
self.session.flush()
request = testing.DummyRequest()
request.matchdict = {'tag_name': u'bar'}
result = tag_view(request)
ideas = result['ideas'].all()
self.assertEqual(ideas[0].idea_id, idea1.idea_id)
self.assertEqual(ideas[1].idea_id, idea2.idea_id)
self.assertEqual(result['tag'], u'bar')
request = testing.DummyRequest()
request.matchdict = {'tag_name': u'foo'}
result = tag_view(request)
self.assertEqual(result['ideas'].one().idea_id, idea3.idea_id)
self.assertEqual(result['tag'], u'foo')
def test_about_view(self):
from shootout.views import about_view
self.config.include(register_templates)
request = testing.DummyRequest()
about_view(request)
def test_login_view_submit_fail(self):
from shootout.views import login_view
self.config.include('shootout.addroutes')
self._addUser()
request = testing.DummyRequest(
post={
'submit': u'Login',
'login': u'username',
'password': u'wrongpassword',
}
)
login_view(request)
messages = request.session.peek_flash()
self.assertEqual(messages, [u'Failed to login.'])
def test_login_view_submit_success(self):
from shootout.views import login_view
self.config.include('shootout.addroutes')
self._addUser()
request = testing.DummyRequest(
post={
'submit': u'Login',
'login': u'username',
'password': u'password',
}
)
login_view(request)
messages = request.session.peek_flash()
self.assertEqual(messages, [u'Logged in successfully.'])
def test_logout_view(self):
from shootout.views import logout_view
self.config.include('shootout.addroutes')
request = testing.DummyRequest()
logout_view(request)
messages = request.session.peek_flash()
self.assertEqual(messages, [u'Logged out successfully.'])
|
[
"[email protected]"
] | |
9205e79d281ae4f2c7b9d5297ae15e2a7e9864b7
|
ff88948139d3de275388d8747cafec9dba1f136d
|
/calameo-downloader.py
|
6017d7ee0ec49c5003e5f5952a28a3ba2146b59f
|
[] |
no_license
|
quatrejuin/Calameo-SVG-Downloader
|
94d8c1893bd8e27ba888066fa6ce9bda7bf8e450
|
ae2cce295af267d6c542724a8c57149d074e5c9a
|
refs/heads/master
| 2022-12-14T13:10:42.695967 | 2020-09-16T20:05:48 | 2020-09-16T20:05:48 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,166 |
py
|
# --------------------------------------------------------------------------------------------------
# Calameoassets Downloader -
# Copyright (c) 2020. Dr Watthanasak Jeamwatthanachai - All Rights Reserved -
# --------------------------------------------------------------------------------------------------
import time
from pathlib import Path
import requests
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.wait import WebDriverWait
BASE_DIR = Path(__file__).parent.absolute()
driver = webdriver.Chrome(f"{BASE_DIR}/data/drivers/chromedriver")
driver.implicitly_wait(5)
wait = WebDriverWait(driver, 5)
calameoassets_url = 'https://p.calameoassets.com/'
header_curl = {
'user-agent': driver.execute_script("return navigator.userAgent;")
}
driver.get('https://global.oup.com/education/support-learning-anywhere/key-resources-online/?region=uk')
book_tables = driver.find_elements(By.XPATH, '//div[@class="content_block full_width"]/div/table/tbody')
print('''
************************************
Collect list of books on the website
************************************
''')
books_list = []
for table in book_tables:
tr = table.find_elements(By.TAG_NAME, 'tr')
books = tr[-1].find_elements(By.TAG_NAME, 'a')
for book in books:
url = book.get_attribute('href')
name = book.text
books_list.append({'name': name, 'url': url})
print(f'> {name} - {url}')
# In cases you want to download some particular books, you can manually define books_list
# books_list = [
# {'name': 'Book A', 'url': 'https://www.calameo.com/read/00077772151d39c98fbab?authid=5OmdpYZALnKk®ion=uk'},
# {'name': 'Book B', 'url': 'https://www.calameo.com/read/00077772164411330cf35?authid=K0Yqvcafhmlu®ion=uk'},
# ]
print('''
************************************
Download all books
************************************
''')
for book in books_list:
print(f'> Go to {book["url"]}')
driver.get(book['url'])
iframe = driver.find_element_by_tag_name("iframe")
driver.switch_to.frame(iframe)
imgs = []
counter = 0
while len(imgs) == 0:
imgs = driver.find_elements(By.XPATH, '//img[@class="page"]')
time.sleep(1)
counter += 1
if counter > 20:
raise Exception("Book ID is unreachable")
imgs = driver.find_elements(By.XPATH, '//img[@class="page"]')
book_id = imgs[0].get_attribute('src').replace(calameoassets_url, '').split('/')[0]
print(f'\t* Book ID: {book_id}')
Path(f'{BASE_DIR}/books/{book["name"]}').mkdir(parents=True, exist_ok=True)
for page in range(1, 9999):
filename = f'p{page}.svgz'
url = f'{calameoassets_url}{book_id}/{filename}'
response = requests.get(url, allow_redirects=True, headers=header_curl)
if response.status_code != 200:
break
print(f'\t* {url}', end='\t...\t')
open(f'{BASE_DIR}/books/{book["name"]}/{filename}', 'wb').write(response.content)
print('saved')
driver.close()
driver.quit()
|
[
"[email protected]"
] | |
d3f3963754a68023200fa1fc0ba465a86508387f
|
8324e8db16cdd8797b57b5a514c173614c6e94cc
|
/Praktikum-Protek-05/Latihan 2/latihan06.2.py
|
bdd44d95e3739361f6fc3bd6a4f9592f8fef2b6a
|
[] |
no_license
|
itsme-dazainaki/Praktikum-Protek-05
|
f19681837b031199183fcdc50931bedabe045ac1
|
21b77fb85059709f27955b39033171045b737ae0
|
refs/heads/main
| 2023-01-05T07:23:52.268240 | 2020-10-31T15:03:30 | 2020-10-31T15:03:30 | 308,905,022 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 839 |
py
|
#intro
print("Hai.. nama saya Mr. Lappie, saya telah memilih sebuat bilangan bulat secara acak anatara 0 - 100. Silakan tebak ya !!!")
#mari mendeklarasikan variabel buat nampung skor
skor = 0
#pengecekan menggunakan perulangan
while True:
skor +=1
bil=int(input("Tebakan Anda : ")) #ini buat menampilkan tulisan, nanti buat nginput juga
if(bil<0) or (bil>100):
print("error")
exit
elif(bil<10):
print("Hehehe...Bilangan tebakan anda terlalu kecil")
elif(bil>10):
print("Hehehe...Bilangan tebakan anda terlalu besar")
elif(bil==10):
print("Yee...Bilangan tebakan anda benar :)")
break #ini buat menghentikan perulangan
#ini rumusnya buat cetak hasil Skor dari jumlah perulangan
print(" ")
print("Score anda : ", 100-(skor-1)*2)
|
[
"[email protected]"
] | |
325f50720e6a0ef2fe6a1ebfeb94d424c5f73687
|
bb065d599a2d39f4b5df520ac9e8ce2ee7adc7c7
|
/src/py/var_int_to_var_ubin.py
|
f67f88b1d6f591ffeed0dc96e70f40cc3dbdeb8b
|
[] |
no_license
|
arq5x/gqt
|
ad60441e4abcb2eb3585595dea22e851f274d664
|
2d6dbde100c255526134dfee1767459b482d0ddc
|
refs/heads/master
| 2021-01-18T09:28:27.972850 | 2014-09-23T13:29:21 | 2014-09-23T13:29:21 | 24,371,487 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,779 |
py
|
#!/usr/bin/env python
import sys
import numpy as np
import array
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-v",
"--var_int_file",
dest="var_int_file",
help="Variant int file")
parser.add_option("-b",
"--ubin_file",
dest="ubin_file",
help="Uncompressed variant binary output file")
parser.add_option("-p",
"--print",
action="store_true", default=False,
dest="print_to_screen",
help="Print ints to screen")
(options, args) = parser.parse_args()
if not options.var_int_file:
parser.error('Variant int file not given')
if not options.print_to_screen and not options.ubin_file:
parser.error('Uncompressed varaint binary output file not given')
f = open(options.var_int_file,'r')
if options.ubin_file:
f_out = open(options.ubin_file, 'wb')
tot_vars = -1
if options.ubin_file:
data = array.array('I')
data.append(1)
for l in f:
output_int = 0
num_vars = 0
A = l.rstrip().split(' ')
if tot_vars == -1:
tot_vars = len(A)
if options.print_to_screen:
print tot_vars
else:
data.append(tot_vars)
for a in A:
output_int |= int(a) << (30 - num_vars * 2)
num_vars += 1
if num_vars == 16:
if options.print_to_screen:
print output_int,
else:
data.append(output_int)
output_int = 0
num_vars = 0
if num_vars > 0:
if options.print_to_screen:
print output_int,
else:
data.append(output_int)
if options.print_to_screen:
print
else:
data.tofile(f_out)
data = array.array('I')
f.close()
if not options.print_to_screen:
f_out.close()
|
[
"[email protected]"
] | |
042cbf52143196b868afdd9abf034bc2a4ed1dd5
|
a3a3183bc3ae9d3d4bad2f4923e8297bce0ff7d3
|
/final/Python-3.10.0/Lib/ensurepip/__init__.py
|
f28ab11ed400828f39a629a46d2708f9de616bad
|
[] |
no_license
|
Nuitka/Nuitka-references
|
4b78831e6947f1c4b32ef034435a88ecfd27f701
|
f20d1b5728ec00cf8a5b23d650101c288b2594e9
|
refs/heads/main
| 2023-08-06T19:12:11.795836 | 2023-08-03T14:54:16 | 2023-08-03T14:55:22 | 169,884,560 | 2 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 9,487 |
py
|
import collections
import os
import os.path
import subprocess
import sys
import sysconfig
import tempfile
from importlib import resources
__all__ = ["version", "bootstrap"]
_PACKAGE_NAMES = ('setuptools', 'pip')
_SETUPTOOLS_VERSION = "57.4.0"
_PIP_VERSION = "21.2.3"
_PROJECTS = [
("setuptools", _SETUPTOOLS_VERSION, "py3"),
("pip", _PIP_VERSION, "py3"),
]
# Packages bundled in ensurepip._bundled have wheel_name set.
# Packages from WHEEL_PKG_DIR have wheel_path set.
_Package = collections.namedtuple('Package',
('version', 'wheel_name', 'wheel_path'))
# Directory of system wheel packages. Some Linux distribution packaging
# policies recommend against bundling dependencies. For example, Fedora
# installs wheel packages in the /usr/share/python-wheels/ directory and don't
# install the ensurepip._bundled package.
_WHEEL_PKG_DIR = sysconfig.get_config_var('WHEEL_PKG_DIR')
def _find_packages(path):
packages = {}
try:
filenames = os.listdir(path)
except OSError:
# Ignore: path doesn't exist or permission error
filenames = ()
# Make the code deterministic if a directory contains multiple wheel files
# of the same package, but don't attempt to implement correct version
# comparison since this case should not happen.
filenames = sorted(filenames)
for filename in filenames:
# filename is like 'pip-20.2.3-py2.py3-none-any.whl'
if not filename.endswith(".whl"):
continue
for name in _PACKAGE_NAMES:
prefix = name + '-'
if filename.startswith(prefix):
break
else:
continue
# Extract '20.2.2' from 'pip-20.2.2-py2.py3-none-any.whl'
version = filename.removeprefix(prefix).partition('-')[0]
wheel_path = os.path.join(path, filename)
packages[name] = _Package(version, None, wheel_path)
return packages
def _get_packages():
global _PACKAGES, _WHEEL_PKG_DIR
if _PACKAGES is not None:
return _PACKAGES
packages = {}
for name, version, py_tag in _PROJECTS:
wheel_name = f"{name}-{version}-{py_tag}-none-any.whl"
packages[name] = _Package(version, wheel_name, None)
if _WHEEL_PKG_DIR:
dir_packages = _find_packages(_WHEEL_PKG_DIR)
# only used the wheel package directory if all packages are found there
if all(name in dir_packages for name in _PACKAGE_NAMES):
packages = dir_packages
_PACKAGES = packages
return packages
_PACKAGES = None
def _run_pip(args, additional_paths=None):
# Run the bootstraping in a subprocess to avoid leaking any state that happens
# after pip has executed. Particulary, this avoids the case when pip holds onto
# the files in *additional_paths*, preventing us to remove them at the end of the
# invocation.
code = f"""
import runpy
import sys
sys.path = {additional_paths or []} + sys.path
sys.argv[1:] = {args}
runpy.run_module("pip", run_name="__main__", alter_sys=True)
"""
return subprocess.run([sys.executable, '-W', 'ignore::DeprecationWarning',
"-c", code], check=True).returncode
def version():
"""
Returns a string specifying the bundled version of pip.
"""
return _get_packages()['pip'].version
def _disable_pip_configuration_settings():
# We deliberately ignore all pip environment variables
# when invoking pip
# See http://bugs.python.org/issue19734 for details
keys_to_remove = [k for k in os.environ if k.startswith("PIP_")]
for k in keys_to_remove:
del os.environ[k]
# We also ignore the settings in the default pip configuration file
# See http://bugs.python.org/issue20053 for details
os.environ['PIP_CONFIG_FILE'] = os.devnull
def bootstrap(*, root=None, upgrade=False, user=False,
altinstall=False, default_pip=False,
verbosity=0):
"""
Bootstrap pip into the current Python installation (or the given root
directory).
Note that calling this function will alter both sys.path and os.environ.
"""
# Discard the return value
_bootstrap(root=root, upgrade=upgrade, user=user,
altinstall=altinstall, default_pip=default_pip,
verbosity=verbosity)
def _bootstrap(*, root=None, upgrade=False, user=False,
altinstall=False, default_pip=False,
verbosity=0):
"""
Bootstrap pip into the current Python installation (or the given root
directory). Returns pip command status code.
Note that calling this function will alter both sys.path and os.environ.
"""
if altinstall and default_pip:
raise ValueError("Cannot use altinstall and default_pip together")
sys.audit("ensurepip.bootstrap", root)
_disable_pip_configuration_settings()
# By default, installing pip and setuptools installs all of the
# following scripts (X.Y == running Python version):
#
# pip, pipX, pipX.Y, easy_install, easy_install-X.Y
#
# pip 1.5+ allows ensurepip to request that some of those be left out
if altinstall:
# omit pip, pipX and easy_install
os.environ["ENSUREPIP_OPTIONS"] = "altinstall"
elif not default_pip:
# omit pip and easy_install
os.environ["ENSUREPIP_OPTIONS"] = "install"
with tempfile.TemporaryDirectory() as tmpdir:
# Put our bundled wheels into a temporary directory and construct the
# additional paths that need added to sys.path
additional_paths = []
for name, package in _get_packages().items():
if package.wheel_name:
# Use bundled wheel package
from ensurepip import _bundled
wheel_name = package.wheel_name
whl = resources.read_binary(_bundled, wheel_name)
else:
# Use the wheel package directory
with open(package.wheel_path, "rb") as fp:
whl = fp.read()
wheel_name = os.path.basename(package.wheel_path)
filename = os.path.join(tmpdir, wheel_name)
with open(filename, "wb") as fp:
fp.write(whl)
additional_paths.append(filename)
# Construct the arguments to be passed to the pip command
args = ["install", "--no-cache-dir", "--no-index", "--find-links", tmpdir]
if root:
args += ["--root", root]
if upgrade:
args += ["--upgrade"]
if user:
args += ["--user"]
if verbosity:
args += ["-" + "v" * verbosity]
return _run_pip([*args, *_PACKAGE_NAMES], additional_paths)
def _uninstall_helper(*, verbosity=0):
"""Helper to support a clean default uninstall process on Windows
Note that calling this function may alter os.environ.
"""
# Nothing to do if pip was never installed, or has been removed
try:
import pip
except ImportError:
return
# If the installed pip version doesn't match the available one,
# leave it alone
available_version = version()
if pip.__version__ != available_version:
print(f"ensurepip will only uninstall a matching version "
f"({pip.__version__!r} installed, "
f"{available_version!r} available)",
file=sys.stderr)
return
_disable_pip_configuration_settings()
# Construct the arguments to be passed to the pip command
args = ["uninstall", "-y", "--disable-pip-version-check"]
if verbosity:
args += ["-" + "v" * verbosity]
return _run_pip([*args, *reversed(_PACKAGE_NAMES)])
def _main(argv=None):
import argparse
parser = argparse.ArgumentParser(prog="python -m ensurepip")
parser.add_argument(
"--version",
action="version",
version="pip {}".format(version()),
help="Show the version of pip that is bundled with this Python.",
)
parser.add_argument(
"-v", "--verbose",
action="count",
default=0,
dest="verbosity",
help=("Give more output. Option is additive, and can be used up to 3 "
"times."),
)
parser.add_argument(
"-U", "--upgrade",
action="store_true",
default=False,
help="Upgrade pip and dependencies, even if already installed.",
)
parser.add_argument(
"--user",
action="store_true",
default=False,
help="Install using the user scheme.",
)
parser.add_argument(
"--root",
default=None,
help="Install everything relative to this alternate root directory.",
)
parser.add_argument(
"--altinstall",
action="store_true",
default=False,
help=("Make an alternate install, installing only the X.Y versioned "
"scripts (Default: pipX, pipX.Y, easy_install-X.Y)."),
)
parser.add_argument(
"--default-pip",
action="store_true",
default=False,
help=("Make a default pip install, installing the unqualified pip "
"and easy_install in addition to the versioned scripts."),
)
args = parser.parse_args(argv)
return _bootstrap(
root=args.root,
upgrade=args.upgrade,
user=args.user,
verbosity=args.verbosity,
altinstall=args.altinstall,
default_pip=args.default_pip,
)
|
[
"[email protected]"
] | |
62c0360071a15ade3e6a6b3f38a577416759847b
|
7160e632d88bf49492616f8152c91cb9f1d40d8d
|
/testcases/statistical_form2/test_case_166_statistical_form_alarm_detail.py
|
53c29adc336cc3d9a149c60941a9e7a5f1d2954e
|
[] |
no_license
|
huangqiming123/tuqiangol_test1
|
ad5ddf22ce61b5b6daad55f684be5da160a64e59
|
75722812260590480320910c4ad6f6c1251a2def
|
refs/heads/master
| 2021-03-30T23:29:08.478494 | 2018-03-12T03:45:11 | 2018-03-12T03:45:11 | 124,832,890 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,802 |
py
|
import unittest
from time import sleep
from automate_driver.automate_driver import AutomateDriver
from pages.alarm_info.alarm_info_page import AlarmInfoPage
from pages.base.base_page import BasePage
from pages.base.lon_in_base import LogInBase
from pages.statistical_form.statistical_form_page import StatisticalFormPage
from pages.statistical_form.statistical_form_page_read_csv import StatisticalFormPageReadCsv
class TestCase166StatisticalFormAlarmDetail(unittest.TestCase):
def setUp(self):
# 前置条件
# 实例化对象
self.driver = AutomateDriver()
self.base_url = self.driver.base_url
self.base_page = BasePage(self.driver, self.base_url)
self.alarm_info_page = AlarmInfoPage(self.driver, self.base_url)
self.statistical_form_page_read_csv = StatisticalFormPageReadCsv()
self.log_in_base = LogInBase(self.driver, self.base_url)
self.statistical_form_page = StatisticalFormPage(self.driver, self.base_url)
# 打开页面,填写用户名、密码、点击登录
self.base_page.open_page()
self.driver.set_window_max()
self.driver.implicitly_wait(5)
self.log_in_base.log_in_jimitest()
# 登录之后点击控制台,然后点击指令管理
self.statistical_form_page.click_control_after_click_statistical_form_page()
sleep(3)
def tearDown(self):
self.driver.quit_browser()
def test_case_statistical_form_alarm_detail(self):
# 断言url
expect_url = self.base_url + '/deviceReport/statisticalReport'
self.assertEqual(expect_url, self.alarm_info_page.actual_url_click_alarm())
# 点击告警详情
self.alarm_info_page.click_alarm_detail_list()
for n in range(5):
self.statistical_form_page.click_customer_in_alarm_detail_form(n)
# 点击搜索设备按钮
self.statistical_form_page.click_search_dev_button_in_alarm_detail()
# 获取有多少组
number = self.statistical_form_page.get_group_number_in_alarm_detail_form()
if number == 0:
pass
else:
for m in range(number):
# 收起默认组
self.statistical_form_page.click_defalut_group_in_alarm_detail_form()
# 获取每个组设备的数量
dev_number = self.statistical_form_page.get_dev_number_in_alarm_detail_form(m)
# 点开每一个分组
self.statistical_form_page.click_per_group_in_alarm_detail_form(m)
dev_number_list = self.statistical_form_page.get_dev_number_list_in_alarm_detail_form(m)
self.assertEqual(str(dev_number_list), dev_number)
|
[
"[email protected]"
] | |
fe955cbfd83504fea6956a13b9d40c1f5a88b5a8
|
9051a89f6e849adf68225d862483f8bd218a54a6
|
/sql/convert.py
|
582757cb652b80ba4be06ddc40d55899812a5dc8
|
[] |
no_license
|
witalosk/chemical
|
9deb2741db7358ddb163239e9372f64c554f1950
|
f6c1e69fb50344a8cf3e54d5f638de1c6c21afc1
|
refs/heads/master
| 2021-04-18T18:52:02.307949 | 2018-06-12T15:00:19 | 2018-06-12T15:00:19 | 126,440,023 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,111 |
py
|
#! /usr/bin/env python
import sys
def main():
print("SET sql_mode='NO_BACKSLASH_ESCAPES';")
lines = sys.stdin.read().splitlines()
for line in lines:
processLine(line)
def processLine(line):
if (
line.startswith("PRAGMA") or
line.startswith("BEGIN TRANSACTION;") or
line.startswith("COMMIT;") or
line.startswith("DELETE FROM sqlite_sequence;") or
line.startswith("INSERT INTO \"sqlite_sequence\"")
):
return
line = line.replace("AUTOINCREMENT", "AUTO_INCREMENT")
line = line.replace("DEFAULT 't'", "DEFAULT '1'")
line = line.replace("DEFAULT 'f'", "DEFAULT '0'")
line = line.replace(",'t'", ",'1'")
line = line.replace(",'f'", ",'0'")
in_string = False
newLine = ''
for c in line:
if not in_string:
if c == "'":
in_string = True
elif c == '"':
newLine = newLine + '`'
continue
elif c == "'":
in_string = False
newLine = newLine + c
print(newLine)
if __name__ == "__main__":
main()
|
[
"[email protected]"
] | |
dcdddf53bab181771e04c9a9bf3252384665391b
|
3330426fe8ccd5d313e12fbf53e4423b0428560b
|
/crossover6.spec
|
30f91a58e9b76774020dfbcd5cc457e9b95b46b2
|
[] |
no_license
|
ryanjhbae/ics3u-culminating
|
75ed5f660a594e412e00f34bf9a3ea0dd7079ccf
|
c3baa8f87c7d90e040991b2a176abbbf2b44c828
|
refs/heads/main
| 2023-04-24T01:27:05.333864 | 2021-05-06T15:52:50 | 2021-05-06T15:52:50 | 364,963,721 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 871 |
spec
|
# -*- mode: python -*-
block_cipher = None
a = Analysis(['crossover6.py'],
pathex=['E:\\Python\\Culminating5'],
binaries=[],
datas=[],
hiddenimports=[],
hookspath=[],
runtime_hooks=[],
excludes=[],
win_no_prefer_redirects=False,
win_private_assemblies=False,
cipher=block_cipher,
noarchive=False)
pyz = PYZ(a.pure, a.zipped_data,
cipher=block_cipher)
exe = EXE(pyz,
a.scripts,
a.binaries,
a.zipfiles,
a.datas,
[],
name='crossover6',
debug=False,
bootloader_ignore_signals=False,
strip=False,
upx=True,
runtime_tmpdir=None,
console=False , icon='E:\\Python\\Culminating5\\crossover_logo.ico')
|
[
"[email protected]"
] | |
7a74a41d8122d6130dccc0e194bd291246fac564
|
749f096d2146bdb37f11e5face2b7a1b51fc7beb
|
/centerloss.py
|
3b18997efd86c12cdb4bde3e54665af5ad78c892
|
[] |
no_license
|
WangBenHui/fgcmr
|
2129eca7d3ea4f059b70710c458891967d0496bf
|
1568d67817a8bbcb537b556f34285f17654601fa
|
refs/heads/master
| 2022-04-24T17:23:58.564203 | 2020-04-30T14:25:30 | 2020-04-30T14:25:30 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,307 |
py
|
import torch
import torch.nn as nn
import scipy.spatial
class CenterLoss(nn.Module):
"""Center loss.
Reference:
Wen et al. A Discriminative Feature Learning Approach for Deep Face Recognition. ECCV 2016.
Args:
num_classes (int): number of classes.
feat_dim (int): feature dimension.#特征维度
"""
def __init__(self, num_classes=10, feat_dim=2, use_gpu=True):
super(CenterLoss, self).__init__()
self.num_classes = num_classes
self.feat_dim = feat_dim
self.use_gpu = use_gpu
if self.use_gpu:
self.centers = nn.Parameter(torch.randn(self.num_classes, self.feat_dim).cuda())
else:
self.centers = nn.Parameter(torch.randn(self.num_classes, self.feat_dim))#生成10行2列的向量
def forward(self, x, labels):
"""
Args:
x: feature matrix with shape (batch_size, feat_dim).
labels: ground truth labels with shape (batch_size).
"""
batch_size = x.size(0)#x的size为(16,200)
###dismat为x和centers的欧氏距离。
# .expand()返回tensor的一个新视图,单个维度扩大为更大的尺寸。.t()是转置
#前面是把特征维度总和扩大为(batch_size,类别)
distmat = torch.pow(x, 2).sum(dim=1, keepdim=True).expand(batch_size, self.num_classes) + \
torch.pow(self.centers, 2).sum(dim=1, keepdim=True).expand(self.num_classes, batch_size).t()
distmat.addmm_(1, -2, x, self.centers.t()) #dismat是[16,200]
classes = torch.arange(self.num_classes).long()
if self.use_gpu:
classes = classes.cuda()
labels = labels.unsqueeze(1).expand(batch_size, self.num_classes)#原先的labels是16,现在的是[16,200]
mask = labels.eq(classes.expand(batch_size, self.num_classes))
#mask为每个标签的类别向量张量,即一行为一个[0,0,...,1,....]这样的类别向量。
dist = distmat * mask.float()#保留下对应类的dismat中每行对应类别列,其他为0
loss = dist.clamp(min=1e-12, max=1e+12).sum() / batch_size #loss = dist中大于0的元素的和 / batch_size,想离中心越近越好
return loss
|
[
"[email protected]"
] | |
bc852715d8fa163594392bc2797185b171e26502
|
c91100ba78470286ec305a790b7a73747b0cc5c8
|
/WeatherHarvester.py
|
e0b31ebe6cb2acf626028400fcd91c0b6d985054
|
[] |
no_license
|
oze4/oPyn_weather_map
|
d70ef4707f174f84a0579238d7e145fe5667f63f
|
353ccef78438b2947a8d1d958c996d41873fdfa0
|
refs/heads/master
| 2020-03-27T14:22:08.299948 | 2018-08-30T03:29:25 | 2018-08-30T03:29:25 | 146,658,630 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 29 |
py
|
# TODO: put classes to work
|
[
"[email protected]"
] | |
ed86654502c4aba575c6e6a700d207d96b6f6a5e
|
acdf28f00143f4b9f21494fe08abf2b32a04e2c5
|
/comment/migrations/0002_auto_20181214_0037.py
|
8b845764eeedb1286d64a0274b74fa27210adcea
|
[] |
no_license
|
ITuDous/myblog
|
5341ff8afba0b624a83c7874b8a6a8791314cc53
|
c3a84ec34647a4907434c94c80739ee99c11f78d
|
refs/heads/master
| 2020-04-11T12:26:03.355313 | 2018-12-17T16:15:14 | 2018-12-17T16:15:14 | 161,780,084 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 485 |
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.16 on 2018-12-13 16:37
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('comment', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='comment',
name='created_time',
field=models.DateTimeField(auto_now_add=True, verbose_name='创建日期'),
),
]
|
[
"[email protected]"
] | |
ae973368e63b0ad138b2cd102e4f343956afe41d
|
d6cc3073f85f962e717e706abf83091be3bf44dd
|
/easemyshopping/urls.py
|
5ffcc63648d72102c4b8dda700e0f0b2c0115c68
|
[] |
no_license
|
pranav-katlana-56/Ease-My-Shopping
|
358d9cfdd5ed9818477a96c80409101b021276eb
|
480bd69466d06aef56b8331c1f47ca67ceb07704
|
refs/heads/main
| 2023-06-23T03:47:33.490227 | 2021-07-24T18:54:51 | 2021-07-24T18:54:51 | 376,473,662 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 801 |
py
|
"""easemyshopping URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path("" , include('shop.urls'))
]
|
[
"[email protected]"
] | |
068777999b31a9511eaf61a997af405208261bad
|
09aedd16bee72d68ecfc84c86e8eaa9a9a00a1b9
|
/Sprint_Challenge/northwind.py
|
f27555c2382acafb5ef149173936c82e6b8d5d1c
|
[
"MIT"
] |
permissive
|
albert-h-wong/DS-Unit-3-Sprint-2-SQL-and-Databases
|
d911ddabce11533a326268048af708ad82ad0f02
|
4220f5ced48080da338e99d4fa9da3fb878b6daf
|
refs/heads/master
| 2020-04-24T11:37:30.227299 | 2019-02-28T00:47:05 | 2019-02-28T00:47:05 | 171,931,476 | 0 | 0 |
MIT
| 2019-02-21T19:25:04 | 2019-02-21T19:25:04 | null |
UTF-8
|
Python
| false | false | 2,674 |
py
|
#!/usr/bin/env python
""" Unit3 Sprint2 Challenge - Northwind
"""
import sqlite3
conn = sqlite3.connect('northwind_small.sqlite3')
curs = conn.cursor()
curs.execute("""SELECT ProductName, UnitPrice FROM Product ORDER BY UnitPrice
DESC LIMIT 10;""")
results1 = curs.fetchall()
print(results1)
# [('Côte de Blaye', 263.5), ('Thüringer Rostbratwurst', 123.79),
# ('Mishi Kobe Niku', 97), ("Sir Rodney's Marmalade", 81),
# ('Carnarvon Tigers', 62.5), ('Raclette Courdavault', 55),
# ('Manjimup Dried Apples', 53), ('Tarte au sucre', 49.3),
# ('Ipoh Coffee', 46), ('Rössle Sauerkraut', 45.6)]
curs.execute("""SELECT avg(HireDate - BirthDate) AS AverageAge
FROM Employee;""")
results2 = curs.fetchall()
print(results2)
# [(37.22222222222222,)] Average age of employees at the hire date
curs.execute("""SELECT City, avg(HireDate - BirthDate) AS AverageAGE
FROM Employee GROUP BY City ORDER BY AverageAGE;""")
results3 = curs.fetchall()
print(results3)
# [('Kirkland', 29.0), ('London', 32.5), ('Seattle', 40.0),
# ('Tacoma', 40.0), ('Redmond', 56.0)]
curs.execute("""SELECT CompanyName,ProductName, UnitPrice FROM Product AS p,
Supplier AS sup WHERE p.SupplierId = sup.Id ORDER BY UnitPrice
DESC LIMIT 10;""")
results4 = curs.fetchall()
print(results4)
# [('Aux joyeux ecclésiastiques', 'Côte de Blaye', 263.5),
# ('Plutzer Lebensmittelgroßmärkte AG', 'Thüringer Rostbratwurst', 123.79),
# ('Tokyo Traders', 'Mishi Kobe Niku', 97), ('Specialty Biscuits, Ltd.',
# "Sir Rodney's Marmalade", 81), ('Pavlova, Ltd.', 'Carnarvon Tigers', 62.5),
# ('Gai pâturage', 'Raclette Courdavault', 55), ("G'day, Mate",
# 'Manjimup Dried Apples', 53), ("Forêts d'érables", 'Tarte au sucre', 49.3),
# ('Leka Trading', 'Ipoh Coffee', 46), ('Plutzer Lebensmittelgroßmärkte AG',
# 'Rössle Sauerkraut', 45.6)]
curs.execute("""SELECT cat.CategoryName, Count(p.Id)as NumberOfProducts
FROM Product AS p, Category AS cat WHERE cat.Id = p.CategoryId
GROUP BY cat.CategoryName ORDER BY NumberOfProducts DESC
LIMIT 1;""")
results5 = curs.fetchall()
print(results5)
# [('Confections', 13)] The max category is Confections which has 13 products
curs.execute("""SELECT FirstName, LastName, et.EmployeeId, Count(TerritoryId)
AS NumberOfTerritories FROM Employee AS emp, EmployeeTerritory
AS et WHERE emp.Id = et.EmployeeId GROUP BY et.EmployeeId
ORDER BY NumberOfTerritories DESC LIMIT 1;""")
results6 = curs.fetchall()
print(results6)
# [('Robert', 'King', 7, 10)] max territories is 10 by employeeID 7 Robert King
curs.close()
CONN.close()
|
[
"albe.h.wong.com"
] |
albe.h.wong.com
|
8482e6db69da923cddef863c3d9bd5a5fa84fe6c
|
660d24268591ecb54e9f7885a195bb7f37f6836a
|
/DataStructure/Set/Prob6Union.py
|
999c8e253f2fd0b02e7af0d0d6a0ecbbdb8704f5
|
[] |
no_license
|
mandar-degvekar/DataEngineeringGCP
|
3e924c3438fcdb3db7aa3fe14ab5b60a9a796554
|
086a82d4fb66c4ed70337d7715dc9ad32ddd46df
|
refs/heads/master
| 2022-07-15T19:37:53.052570 | 2019-06-25T09:59:44 | 2019-06-25T09:59:44 | 192,692,729 | 0 | 0 | null | 2022-06-21T22:11:24 | 2019-06-19T08:37:21 |
Python
|
UTF-8
|
Python
| false | false | 137 |
py
|
s=set(['abc','xyz','ss','bb','ss','dd'])
print('Set1:',s)
m=set(['abc','xyz','coco','mocha'])
print('Set2:',m)
print('union:',s.union(m))
|
[
"[email protected]"
] | |
09c5c0f500049f682537e17e758566cd5a346d59
|
bc01e1d158e7d8f28451a7e108afb8ec4cb7d5d4
|
/sage/src/sage/combinat/species/functorial_composition_species.py
|
6c84368ba4dfa192538a5c7946a0850b4b801bd3
|
[] |
no_license
|
bopopescu/geosci
|
28792bda1ec1f06e23ba8dcb313769b98f793dad
|
0d9eacbf74e2acffefde93e39f8bcbec745cdaba
|
refs/heads/master
| 2021-09-22T17:47:20.194233 | 2018-09-12T22:19:36 | 2018-09-12T22:19:36 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,158 |
py
|
"""
Functorial composition species
"""
from __future__ import absolute_import
#*****************************************************************************
# Copyright (C) 2008 Mike Hansen <[email protected]>,
#
# Distributed under the terms of the GNU General Public License (GPL)
#
# This code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# The full text of the GPL is available at:
#
# http://www.gnu.org/licenses/
#*****************************************************************************
from .species import GenericCombinatorialSpecies
from .structure import GenericSpeciesStructure
from sage.misc.cachefunc import cached_function
from sage.structure.unique_representation import UniqueRepresentation
class FunctorialCompositionStructure(GenericSpeciesStructure):
pass
class FunctorialCompositionSpecies(GenericCombinatorialSpecies):
def __init__(self, F, G, min=None, max=None, weight=None):
"""
Returns the functorial composition of two species.
EXAMPLES::
sage: E = species.SetSpecies()
sage: E2 = species.SetSpecies(size=2)
sage: WP = species.SubsetSpecies()
sage: P2 = E2*E
sage: G = WP.functorial_composition(P2)
sage: G.isotype_generating_series().coefficients(5)
[1, 1, 2, 4, 11]
sage: G = species.SimpleGraphSpecies()
sage: c = G.generating_series().coefficients(2)
sage: type(G)
<class 'sage.combinat.species.functorial_composition_species.FunctorialCompositionSpecies'>
sage: G == loads(dumps(G))
True
sage: G._check() #False due to isomorphism types not being implemented
False
"""
self._F = F
self._G = G
self._state_info = [F, G]
self._name = "Functorial composition of (%s) and (%s)"%(F, G)
GenericCombinatorialSpecies.__init__(self, min=None, max=None, weight=None)
_default_structure_class = FunctorialCompositionStructure
def _structures(self, structure_class, s):
"""
EXAMPLES::
sage: G = species.SimpleGraphSpecies()
sage: G.structures([1,2,3]).list()
[{},
{{1, 2}*{3}},
{{1, 3}*{2}},
{{2, 3}*{1}},
{{1, 2}*{3}, {1, 3}*{2}},
{{1, 2}*{3}, {2, 3}*{1}},
{{1, 3}*{2}, {2, 3}*{1}},
{{1, 2}*{3}, {1, 3}*{2}, {2, 3}*{1}}]
"""
gs = self._G.structures(s).list()
for f in self._F.structures(gs):
yield f
def _isotypes(self, structure_class, s):
"""
There is no known algorithm for efficiently generating the
isomorphism types of the functorial composition of two species.
EXAMPLES::
sage: G = species.SimpleGraphSpecies()
sage: G.isotypes([1,2,3]).list()
Traceback (most recent call last):
...
NotImplementedError
"""
raise NotImplementedError
def _gs(self, series_ring, base_ring):
"""
EXAMPLES::
sage: G = species.SimpleGraphSpecies()
sage: G.generating_series().coefficients(5)
[1, 1, 1, 4/3, 8/3]
"""
return self._F.generating_series(base_ring).functorial_composition(self._G.generating_series(base_ring))
def _itgs(self, series_ring, base_ring):
"""
EXAMPLES::
sage: G = species.SimpleGraphSpecies()
sage: G.isotype_generating_series().coefficients(5)
[1, 1, 2, 4, 11]
"""
return self.cycle_index_series(base_ring).isotype_generating_series()
def _cis(self, series_ring, base_ring):
"""
EXAMPLES::
sage: G = species.SimpleGraphSpecies()
sage: G.cycle_index_series().coefficients(5)
[p[],
p[1],
p[1, 1] + p[2],
4/3*p[1, 1, 1] + 2*p[2, 1] + 2/3*p[3],
8/3*p[1, 1, 1, 1] + 4*p[2, 1, 1] + 2*p[2, 2] + 4/3*p[3, 1] + p[4]]
"""
return self._F.cycle_index_series(base_ring).functorial_composition(self._G.cycle_index_series(base_ring))
def weight_ring(self):
"""
Returns the weight ring for this species. This is determined by
asking Sage's coercion model what the result is when you multiply
(and add) elements of the weight rings for each of the operands.
EXAMPLES::
sage: G = species.SimpleGraphSpecies()
sage: G.weight_ring()
Rational Field
"""
from sage.structure.element import get_coercion_model
cm = get_coercion_model()
f_weights = self._F.weight_ring()
g_weights = self._G.weight_ring()
return cm.explain(f_weights, g_weights, verbosity=0)
#Backward compatibility
FunctorialCompositionSpecies_class = FunctorialCompositionSpecies
|
[
"valber@HPC"
] |
valber@HPC
|
163265522ac5b1d53899d5d114cb4432cf72522d
|
1548ce77537dcd50ab04b0eaee050b5d30553e23
|
/tests/test_pipeline/components/classification/test_lda.py
|
f78f133407c5e5dff1614b0807339f117fb6d6e8
|
[
"Apache-2.0"
] |
permissive
|
Shamoo100/AutoTabular
|
4a20e349104246bf825ebceae33dca0a79928f2e
|
7d71bf01d2b7d84fcf5f65c9f45c5cea1255d8a2
|
refs/heads/main
| 2023-08-13T21:34:34.329888 | 2021-10-02T07:06:00 | 2021-10-02T07:06:00 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 768 |
py
|
import sklearn.discriminant_analysis
from autotabular.pipeline.components.classification.lda import LDA
from .test_base import BaseClassificationComponentTest
class LDAComponentTest(BaseClassificationComponentTest):
__test__ = True
res = dict()
res['default_iris'] = 1.0
res['default_iris_iterative'] = -1
res['default_iris_proba'] = 0.5614481896257509
res['default_iris_sparse'] = -1
res['default_digits'] = 0.88585306618093507
res['default_digits_iterative'] = -1
res['default_digits_binary'] = 0.9811778992106861
res['default_digits_multilabel'] = 0.82204896441795205
res['default_digits_multilabel_proba'] = 0.9833070018235553
sk_mod = sklearn.discriminant_analysis.LinearDiscriminantAnalysis
module = LDA
|
[
"[email protected]"
] | |
d37e35abfbf2c3e77023a18d970272c7300ff78e
|
a83708c948e47c4c259a7b36162f03e3a94de623
|
/blog/migrations/0009_post_photo.py
|
741eb18998de47554f96e1ed508c83817577c49c
|
[] |
no_license
|
thingwithgrace/askdjango1
|
b6e79a5f0f7443562fec445e2fc3eb6342cf406b
|
b6dd45c7cee4da2de6a2486e321656df898e66de
|
refs/heads/master
| 2021-08-15T19:56:52.545201 | 2017-11-18T05:48:19 | 2017-11-18T05:48:19 | 111,119,372 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 455 |
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-11-12 05:10
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0008_auto_20171104_0938'),
]
operations = [
migrations.AddField(
model_name='post',
name='photo',
field=models.ImageField(blank=True, upload_to=''),
),
]
|
[
"[email protected]"
] | |
ac6a0ca371b731aea7262b3985218e9426f0ad47
|
bab72bfcdb7fa3bd982220ce1fb4865a4cf3612f
|
/app/Database/website.py
|
0735be0909b59ed1dd924b1e830a087508684230
|
[] |
no_license
|
AtulPhadke/Energy
|
45e815cbce8f5d2e353a2abf162b2ae1f6446e53
|
860cb8061b5158cf215102f32bc1d25cbfed4d65
|
refs/heads/master
| 2020-11-27T20:27:05.226903 | 2019-12-22T16:01:57 | 2019-12-22T16:01:57 | 229,590,598 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 346 |
py
|
from flask import Flask
from flask import render_template, request
import time
import os
app = Flask(__name__)
@app.route('/')
def function():
humidity = 0
temperature = 2
wateranalog = 3
print ("something for testing")
return render_template("Data.html", humidity=humidity, temperature=temperature, wateranalog=wateranalog)
|
[
"[email protected]"
] | |
afbe88d7eb97cb8bd5ee6d07a3d91df5a0d6ddef
|
909bbd1bf3e52db2027e6f8075a8e1587cec0470
|
/req-client.py
|
38619eececf0d04bafba36975e14bf47a3a68c33
|
[
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause"
] |
permissive
|
lkarsten/libvmod-policy
|
c85c16e99b015d9fcde0a55bd8155180703ef5e7
|
7447ed73dc654cccf981d6c1795ebe3c9971c004
|
HEAD
| 2016-08-07T17:02:34.776618 | 2013-12-11T15:16:45 | 2013-12-11T15:16:45 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,045 |
py
|
#!/usr/bin/env python
#
# This would be what Varnish does.
#
import struct
import socket
from time import sleep, time
# no empty ending lines.
req = ["""xid: 12345
vcl_method: 1
client_ip: 127.0.0.1
t_open: %s
http_method: 1
URL: /
proto: HTTP/1.1
""" % time(),
"""Host: localhost
Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8
Accept-Language: nb-NO,nb;q=0.8,no;q=0.6,nn;q=0.4,en-US;q=0.2,en;q=0.2
Accept-Charset: ISO-8859-1,utf-8;q=0.7,*;q=0.3
User-Agent: curl 1.2
Cache-Control: no-cache
Cookie: __utma=253898641.2098534993.1348749499.1374491627.1374580772.70; __utmz=2538986 41.1348749499.1.1.utmcsr=(direct)|utmccn=(direct)|utmcmd=(none)
""",
"this is the post body"]
class ServerError(Exception):
pass
if __name__ == "__main__":
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.connect("/tmp/foo.sock")
# sock.setsockopt(socket.SOL_SOCKET, socket.SO_RCVTIMEO, 2)
# sock.setsockopt(socket.SOL_SOCKET, socket.SO_RCVTIMEO, 2)
# sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
headercontent = (len(req[0]), len(req[1]), len(req[2]))
# print headercontent
header = "VPOL01" + struct.pack("!III", *headercontent)
# print len(header)
sock.send(header)
sock.send(req[0])
sock.send(req[1])
sock.send(req[2])
response = ''
waited = 0.0
while True:
try:
r = sock.recv(1500, socket.MSG_DONTWAIT)
except Exception as e:
if e.errno == 11: # not yet
waited += 0.01
sleep(0.01)
else:
print dir(e)
print str(e)
else:
if len(r) == 0:
waited += 0.01
sleep(0.01)
else:
#print "got %i bytes" % len(r)
#print r
response += r
if len(r) >= 3:
break
if waited >= 2:
raise ServerError("timeout after %ss" % waited)
print "response: %s" % response.strip()
|
[
"[email protected]"
] | |
262fc3846844f497b0bab68f27751c6f64640fdc
|
4b62abbdc2a37ec7b38ad09d287acb1f868c9389
|
/skimage/color/__init__.py
|
29f36aeced04a598b59474cc226c989d9a314b6b
|
[
"LicenseRef-scancode-unknown-license-reference",
"MIT",
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
retsyo/scikit-image
|
98b3472e569dc40d2373ad282c8d9bcf93ac02ad
|
821c9f249df5c3cd0a5e885db8fbf3f659b65ef0
|
refs/heads/master
| 2023-06-08T15:55:05.840330 | 2023-05-24T22:03:21 | 2023-05-24T22:03:21 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,937 |
py
|
"""Utilities for color conversion, color labeling, and color difference
calculations.
"""
from .colorconv import (convert_colorspace,
xyz_tristimulus_values,
rgba2rgb,
rgb2hsv,
hsv2rgb,
rgb2xyz,
xyz2rgb,
rgb2rgbcie,
rgbcie2rgb,
rgb2gray,
gray2rgb,
gray2rgba,
xyz2lab,
lab2xyz,
lab2rgb,
rgb2lab,
xyz2luv,
luv2xyz,
luv2rgb,
rgb2luv,
rgb2hed,
hed2rgb,
lab2lch,
lch2lab,
rgb2yuv,
yuv2rgb,
rgb2yiq,
yiq2rgb,
rgb2ypbpr,
ypbpr2rgb,
rgb2ycbcr,
ycbcr2rgb,
rgb2ydbdr,
ydbdr2rgb,
separate_stains,
combine_stains,
rgb_from_hed,
hed_from_rgb,
rgb_from_hdx,
hdx_from_rgb,
rgb_from_fgx,
fgx_from_rgb,
rgb_from_bex,
bex_from_rgb,
rgb_from_rbd,
rbd_from_rgb,
rgb_from_gdx,
gdx_from_rgb,
rgb_from_hax,
hax_from_rgb,
rgb_from_bro,
bro_from_rgb,
rgb_from_bpx,
bpx_from_rgb,
rgb_from_ahx,
ahx_from_rgb,
rgb_from_hpx,
hpx_from_rgb)
from .colorlabel import color_dict, label2rgb
from .delta_e import (deltaE_cie76,
deltaE_ciede94,
deltaE_ciede2000,
deltaE_cmc,
)
__all__ = ['convert_colorspace',
'xyz_tristimulus_values',
'rgba2rgb',
'rgb2hsv',
'hsv2rgb',
'rgb2xyz',
'xyz2rgb',
'rgb2rgbcie',
'rgbcie2rgb',
'rgb2gray',
'gray2rgb',
'gray2rgba',
'xyz2lab',
'lab2xyz',
'lab2rgb',
'rgb2lab',
'rgb2hed',
'hed2rgb',
'lab2lch',
'lch2lab',
'rgb2yuv',
'yuv2rgb',
'rgb2yiq',
'yiq2rgb',
'rgb2ypbpr',
'ypbpr2rgb',
'rgb2ycbcr',
'ycbcr2rgb',
'rgb2ydbdr',
'ydbdr2rgb',
'separate_stains',
'combine_stains',
'rgb_from_hed',
'hed_from_rgb',
'rgb_from_hdx',
'hdx_from_rgb',
'rgb_from_fgx',
'fgx_from_rgb',
'rgb_from_bex',
'bex_from_rgb',
'rgb_from_rbd',
'rbd_from_rgb',
'rgb_from_gdx',
'gdx_from_rgb',
'rgb_from_hax',
'hax_from_rgb',
'rgb_from_bro',
'bro_from_rgb',
'rgb_from_bpx',
'bpx_from_rgb',
'rgb_from_ahx',
'ahx_from_rgb',
'rgb_from_hpx',
'hpx_from_rgb',
'color_dict',
'label2rgb',
'deltaE_cie76',
'deltaE_ciede94',
'deltaE_ciede2000',
'deltaE_cmc',
]
|
[
"[email protected]"
] | |
cc042b33e9836bf1c995f6cbc4bb6b74aaab5ce0
|
c25fe18d30e3d4fdaf78b65608b8997c8b05ddca
|
/myweb/guestbook/migrations/0001_initial.py
|
44c7eae0610c7b144e0d30e28583735d871c3901
|
[] |
no_license
|
qmakzl/Python-Django
|
aafb8005da9e5ecd6dfcd6591ac0052e88342150
|
b9d5cf7fc5caf288cb91daf144d4eb2bc71b1eb0
|
refs/heads/main
| 2023-07-27T14:05:07.645794 | 2021-09-02T07:39:40 | 2021-09-02T07:39:40 | 320,154,381 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 759 |
py
|
# Generated by Django 3.1.3 on 2020-11-26 02:00
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Guestbook',
fields=[
('idx', models.AutoField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=50)),
('email', models.CharField(max_length=50)),
('passwd', models.CharField(max_length=50)),
('content', models.TextField()),
('post_date', models.DateTimeField(blank=True, default=datetime.datetime.now)),
],
),
]
|
[
"[email protected]"
] | |
969b43e6469219c4cc7b935c8ba80b364f6e5e5a
|
21e9b5b658d8190b793b75ce74d6b5a2f96c20d5
|
/magma/lte/gateway/python/magma/pipelined/datapath_setup.py
|
347334e093a85fc93fe17ac4088cc32085780ce5
|
[
"BSD-3-Clause"
] |
permissive
|
shivesh-wavelabs/magma-main
|
e483a2e9640e31f890ebe23cd17a3b190f1ab2da
|
66bc17004a07c26028562b328b07539ecc3d2f9c
|
refs/heads/main
| 2023-07-18T20:06:28.202153 | 2021-07-23T07:35:45 | 2021-07-23T07:35:45 | 400,152,493 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,358 |
py
|
"""
Copyright 2021 The Magma Authors.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
import os
import subprocess
irq_utility = '/usr/local/bin/set_irq_affinity'
ethtool_utility = '/usr/sbin/ethtool'
'''
Following function sets various tuning parameters related
interface queue.
1. RX queue size
2. TX queue size
3. queue CPU assignment
'''
def tune_datapath(config_dict):
# TODO move this to mconfig
if 'dp_irq' not in config_dict:
logging.info("DP Tuning not enabled.")
return
if _check_util_failed(irq_utility):
return
if _check_util_failed(ethtool_utility):
return
tune_dp_irqs = config_dict['dp_irq']
logging.info("set tuning params: %s", tune_dp_irqs)
# stop irq-balance
stop_irq_balance = ['service', 'irqbalance', 'stop']
logging.debug("cmd: %s", stop_irq_balance)
try:
subprocess.check_call(stop_irq_balance)
except subprocess.CalledProcessError as ex:
logging.debug('%s failed with: %s', stop_irq_balance, ex)
# set_irq_affinity -X 1-2 eth1
s1_interface = config_dict['enodeb_iface']
s1_cpu = tune_dp_irqs['S1_cpu']
set_s1_cpu_command = [irq_utility, '-X', s1_cpu, s1_interface]
logging.debug("cmd: %s", set_s1_cpu_command)
try:
subprocess.check_call(set_s1_cpu_command)
except subprocess.CalledProcessError as ex:
logging.debug('%s failed with: %s', set_s1_cpu_command, ex)
sgi_interface = config_dict['nat_iface']
sgi_cpu = tune_dp_irqs['SGi_cpu']
set_sgi_cpu_command = [irq_utility, '-X', sgi_cpu, sgi_interface]
logging.debug("cmd: %s", set_sgi_cpu_command)
try:
subprocess.check_call(set_sgi_cpu_command)
except subprocess.CalledProcessError as ex:
logging.debug('%s failed with: %s', set_sgi_cpu_command, ex)
# ethtool -G eth1 rx 1024 tx 1024
s1_queue_size = tune_dp_irqs['S1_queue_size']
set_s1_queue_sz = [
ethtool_utility, '-G', s1_interface,
'rx', str(s1_queue_size), 'tx', str(s1_queue_size),
]
logging.debug("cmd: %s", set_s1_queue_sz)
try:
subprocess.check_call(set_s1_queue_sz)
except subprocess.CalledProcessError as ex:
logging.debug('%s failed with: %s', set_s1_queue_sz, ex)
sgi_queue_size = tune_dp_irqs['SGi_queue_size']
set_sgi_queue_sz = [
ethtool_utility, '-G', sgi_interface,
'rx', str(sgi_queue_size), 'tx', str(sgi_queue_size),
]
logging.debug("cmd: %s", set_sgi_queue_sz)
try:
subprocess.check_call(set_sgi_queue_sz)
except subprocess.CalledProcessError as ex:
logging.debug('%s failed with: %s', set_sgi_queue_sz, ex)
def _check_util_failed(path: str):
if not os.path.isfile(path) or not os.access(path, os.X_OK):
logging.info(
"missing %s: path: %s perm: %s", path,
os.path.isfile(path),
os.access(path, os.X_OK),
)
return True
return False
|
[
"[email protected]"
] | |
fae38ea09e1beb8207caeb28a4f7c4996b13a758
|
af179f861c423a27ed4539882b1b17202c4833b5
|
/algorithms/curious_a2c/agent_curious_a2c.py
|
5c24f7f2685aa3ba6dedb53e35ac26524afdf941
|
[
"MIT"
] |
permissive
|
alex-petrenko/curious-rl
|
1666251076859304b55969d590447fc6b5c3b2f6
|
6cd0eb78ab409c68f8dad1a8542d625f0dd39114
|
refs/heads/master
| 2020-04-08T01:32:28.271135 | 2019-06-13T20:54:22 | 2019-06-13T20:54:22 | 158,899,170 | 22 | 3 | null | null | null | null |
UTF-8
|
Python
| false | false | 24,992 |
py
|
"""
Implementation of the curious variant of the Advantage Actor-Critic algorithm.
"""
import time
import numpy as np
import tensorflow as tf
from tensorflow.contrib import slim
from algorithms.algo_utils import RunningMeanStd, EPS, extract_keys
from algorithms.baselines.a2c.agent_a2c import AgentA2C
from algorithms.env_wrappers import has_image_observations
from algorithms.multi_env import MultiEnv
from algorithms.tf_utils import dense, count_total_parameters, conv
from algorithms.utils import summaries_dir
from utils.distributions import CategoricalProbabilityDistribution
from utils.utils import log, AttrDict, put_kernels_on_grid
class CuriousA2CPolicy:
"""A class that represents both the actor's policy and the value estimator."""
def __init__(self, env, observations, timer, params):
self.regularizer = tf.contrib.layers.l2_regularizer(scale=1e-10)
img_model_name = params.image_model_name
fc_layers = params.fc_layers
fc_size = params.fc_size
lowdim_model_name = params.lowdim_model_name
past_frames = params.stack_past_frames
image_obs = has_image_observations(env.observation_space.spaces['obs'])
num_actions = env.action_space.n
if image_obs:
# convolutions
if img_model_name == 'convnet_simple':
conv_filters = self._convnet_simple(observations, [(32, 3, 2)] * 4)
else:
raise Exception('Unknown model name')
encoded_input = tf.contrib.layers.flatten(conv_filters)
else:
# low-dimensional input
if lowdim_model_name == 'simple_fc':
frames = tf.split(observations, past_frames, axis=1)
fc_encoder = tf.make_template('fc_encoder', self._fc_frame_encoder, create_scope_now_=True)
encoded_frames = [fc_encoder(frame) for frame in frames]
encoded_input = tf.concat(encoded_frames, axis=1)
else:
raise Exception('Unknown lowdim model name')
if params.ignore_timer:
timer = tf.multiply(timer, 0.0)
encoded_input_with_timer = tf.concat([encoded_input, tf.expand_dims(timer, 1)], axis=1)
fc = encoded_input_with_timer
for _ in range(fc_layers - 1):
fc = dense(fc, fc_size, self.regularizer)
# fully-connected layers to generate actions
actions_fc = dense(fc, fc_size // 2, self.regularizer)
self.actions = tf.contrib.layers.fully_connected(actions_fc, num_actions, activation_fn=None)
self.best_action_deterministic = tf.argmax(self.actions, axis=1)
self.actions_prob_distribution = CategoricalProbabilityDistribution(self.actions)
self.act = self.actions_prob_distribution.sample()
value_fc = dense(fc, fc_size // 2, self.regularizer)
self.value = tf.squeeze(tf.contrib.layers.fully_connected(value_fc, 1, activation_fn=None), axis=[1])
if image_obs:
# summaries
with tf.variable_scope('conv1', reuse=True):
weights = tf.get_variable('weights')
with tf.name_scope('a2c_agent_summary_conv'):
if weights.shape[2].value in [1, 3, 4]:
tf.summary.image('conv1/kernels', put_kernels_on_grid(weights), max_outputs=1)
log.info('Total parameters in the model: %d', count_total_parameters())
def _fc_frame_encoder(self, x):
return dense(x, 128, self.regularizer)
def _conv(self, x, filters, kernel, stride, scope=None):
return conv(x, filters, kernel, stride=stride, regularizer=self.regularizer, scope=scope)
def _convnet_simple(self, x, convs):
"""Basic stacked convnet."""
layer = x
layer_idx = 1
for filters, kernel, stride in convs:
layer = self._conv(layer, filters, kernel, stride, 'conv' + str(layer_idx))
layer_idx += 1
return layer
class Model:
"""Single class for inverse and forward dynamics model."""
def __init__(self, env, obs, next_obs, actions, past_frames, forward_fc):
"""
:param obs - placeholder for observations
:param actions - placeholder for selected actions
"""
self.regularizer = tf.contrib.layers.l2_regularizer(scale=1e-10)
image_obs = has_image_observations(env.observation_space.spaces['obs'])
num_actions = env.action_space.n
if image_obs:
# convolutions
conv_encoder = tf.make_template(
'conv_encoder',
self._convnet_simple,
create_scope_now_=True,
convs=[(32, 3, 2)] * 4,
)
encoded_obs = conv_encoder(obs=obs)
encoded_obs = tf.contrib.layers.flatten(encoded_obs)
encoded_next_obs = conv_encoder(obs=next_obs)
self.encoded_next_obs = tf.contrib.layers.flatten(encoded_next_obs)
else:
# low-dimensional input
lowdim_encoder = tf.make_template(
'lowdim_encoder',
self._lowdim_encoder,
create_scope_now_=True,
past_frames=past_frames,
)
encoded_obs = lowdim_encoder(obs=obs)
self.encoded_next_obs = lowdim_encoder(obs=next_obs)
self.feature_vector_size = encoded_obs.get_shape().as_list()[-1]
log.info('Feature vector size in ICM: %d', self.feature_vector_size)
actions_one_hot = tf.one_hot(actions, num_actions)
# forward model
forward_model_input = tf.concat(
[encoded_obs, actions_one_hot],
axis=1,
)
forward_model_hidden = dense(forward_model_input, forward_fc, self.regularizer)
forward_model_hidden = dense(forward_model_hidden, forward_fc, self.regularizer)
forward_model_output = tf.contrib.layers.fully_connected(
forward_model_hidden, self.feature_vector_size, activation_fn=None,
)
self.predicted_obs = forward_model_output
# inverse model
inverse_model_input = tf.concat([encoded_obs, self.encoded_next_obs], axis=1)
inverse_model_hidden = dense(inverse_model_input, 256, self.regularizer)
inverse_model_output = tf.contrib.layers.fully_connected(
inverse_model_hidden, num_actions, activation_fn=None,
)
self.predicted_actions = inverse_model_output
log.info('Total parameters in the model: %d', count_total_parameters())
def _fc_frame_encoder(self, x):
return dense(x, 128, self.regularizer)
def _lowdim_encoder(self, obs, past_frames):
frames = tf.split(obs, past_frames, axis=1)
fc_encoder = tf.make_template('fc_encoder', self._fc_frame_encoder, create_scope_now_=True)
encoded_frames = [fc_encoder(frame) for frame in frames]
encoded_input = tf.concat(encoded_frames, axis=1)
return encoded_input
def _conv(self, x, filters, kernel, stride, scope=None):
return conv(x, filters, kernel, stride=stride, regularizer=self.regularizer, scope=scope)
def _convnet_simple(self, convs, obs):
"""Basic stacked convnet."""
layer = obs
layer_idx = 1
for filters, kernel, stride in convs:
layer = self._conv(layer, filters, kernel, stride, 'conv' + str(layer_idx))
layer_idx += 1
return layer
class AgentCuriousA2C(AgentA2C):
"""Agent based on A2C algorithm."""
class Params(AgentA2C.Params):
"""Hyperparams for the algorithm and the training process."""
def __init__(self, experiment_name):
"""Default parameter values set in ctor."""
super(AgentCuriousA2C.Params, self).__init__(experiment_name)
self.icm_beta = 0.5 # in ICM, importance of training forward model vs inverse model
self.model_lr_scale = 10.0 # in ICM, importance of model loss vs actor-critic loss
self.prediction_bonus_coeff = 0.05 # scaling factor for prediction bonus vs env rewards
self.clip_bonus = 0.1
self.clip_advantage = 10
self.ignore_timer = False # whether or not policy uses the remaining episode time
self.forward_fc = 512
self.train_for_env_steps = 10 * 1000 * 1000 * 1000
# noinspection PyMethodMayBeStatic
def filename_prefix(self):
return 'curious_a2c_'
def __init__(self, make_env_func, params):
"""Initialize A2C computation graph and some auxiliary tensors."""
super(AgentA2C, self).__init__(params) # calling grandparent ctor, skipping parent
global_step = tf.train.get_or_create_global_step()
self.make_env_func = make_env_func
self.selected_actions = tf.placeholder(tf.int32, [None]) # action selected by the policy
self.value_estimates = tf.placeholder(tf.float32, [None])
self.discounted_rewards = tf.placeholder(tf.float32, [None]) # estimate of total reward (rollout + value)
self.advantages = tf.placeholder(tf.float32, [None])
env = make_env_func() # we need it to query observation shape, number of actions, etc.
obs_shape = list(env.observation_space.spaces['obs'].shape)
input_shape = [None] + obs_shape # add batch dimension
self.observations = tf.placeholder(tf.float32, shape=input_shape)
self.next_obs = tf.placeholder(tf.float32, shape=input_shape)
self.timer = tf.placeholder(tf.float32, shape=[None])
self.policy = CuriousA2CPolicy(env, self.observations, self.timer, params)
self.model = Model(
env, self.observations, self.next_obs, self.selected_actions, params.stack_past_frames, params.forward_fc,
)
env.close()
# negative logarithm of the probabilities of actions
neglogp_actions = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=self.policy.actions, labels=self.selected_actions,
)
# maximize probabilities of actions that give high advantage
action_losses = tf.clip_by_value(self.advantages * neglogp_actions, -20.0, 20.0)
action_loss = tf.reduce_mean(action_losses)
# penalize for inaccurate value estimation
value_losses = tf.square(self.discounted_rewards - self.policy.value)
value_losses = tf.clip_by_value(value_losses, -20.0, 20.0)
value_loss = self.params.value_loss_coeff * tf.reduce_mean(value_losses)
# penalize the agent for being "too sure" about it's actions (to prevent converging to the suboptimal local
# minimum too soon)
entropy_loss = -tf.reduce_mean(self.policy.actions_prob_distribution.entropy())
entropy_loss_coeff = tf.train.exponential_decay(
self.params.initial_entropy_loss_coeff, tf.cast(global_step, tf.float32), 20.0, 0.95, staircase=True,
)
entropy_loss_coeff = tf.maximum(entropy_loss_coeff, self.params.min_entropy_loss_coeff)
entropy_loss = entropy_loss_coeff * entropy_loss
# total actor-critic loss
a2c_loss = action_loss + entropy_loss + value_loss
# model losses
forward_loss_batch = 0.5 * tf.square(self.model.encoded_next_obs - self.model.predicted_obs)
forward_loss_batch = tf.reduce_mean(forward_loss_batch, axis=1) * self.model.feature_vector_size
forward_loss = tf.reduce_mean(forward_loss_batch)
inverse_loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=self.model.predicted_actions, labels=self.selected_actions,
))
icm_beta = self.params.icm_beta
model_loss = forward_loss * icm_beta + inverse_loss * (1.0 - icm_beta)
model_loss = self.params.model_lr_scale * model_loss
# regularization
regularization_loss = tf.reduce_sum(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))
# total loss
loss = a2c_loss + model_loss + regularization_loss
# training
self.train = tf.contrib.layers.optimize_loss(
loss=loss,
global_step=global_step,
learning_rate=self.params.learning_rate,
optimizer=tf.train.AdamOptimizer,
clip_gradients=self.params.clip_gradients,
)
bonus = self.params.prediction_bonus_coeff * forward_loss_batch
self.prediction_curiosity_bonus = tf.clip_by_value(bonus, -self.params.clip_bonus, self.params.clip_bonus)
# summaries for the agent and the training process
with tf.name_scope('a2c_agent_summary'):
if len(self.observations.shape) >= 4:
tf.summary.image(
'observations',
self.observations[:, :, :, :3], # first three channels
max_outputs=8,
)
# output also last channel
if self.observations.shape[-1].value > 4:
tf.summary.image('observations_last_channel', self.observations[:, :, :, -1:])
tf.summary.scalar('disc_rewards_avg', tf.reduce_mean(self.discounted_rewards))
tf.summary.scalar('disc_rewards_max', tf.reduce_max(self.discounted_rewards))
tf.summary.scalar('disc_rewards_min', tf.reduce_min(self.discounted_rewards))
tf.summary.scalar('bonus_avg', tf.reduce_mean(self.prediction_curiosity_bonus))
tf.summary.scalar('bonus_max', tf.reduce_max(self.prediction_curiosity_bonus))
tf.summary.scalar('bonus_min', tf.reduce_min(self.prediction_curiosity_bonus))
tf.summary.scalar('value', tf.reduce_mean(self.policy.value))
tf.summary.scalar('adv_avg_abs', tf.reduce_mean(tf.abs(self.advantages)))
tf.summary.scalar('adv_max', tf.reduce_max(self.advantages))
tf.summary.scalar('adv_min', tf.reduce_min(self.advantages))
tf.summary.scalar('selected_action_avg', tf.reduce_mean(tf.to_float(self.selected_actions)))
tf.summary.scalar('policy_entropy', tf.reduce_mean(self.policy.actions_prob_distribution.entropy()))
tf.summary.scalar('entropy_coeff', entropy_loss_coeff)
with tf.name_scope('a2c_losses'):
tf.summary.scalar('action_loss', action_loss)
tf.summary.scalar('max_action_loss', tf.reduce_max(action_losses))
tf.summary.scalar('value_loss', value_loss)
tf.summary.scalar('max_value_loss', tf.reduce_max(value_losses))
tf.summary.scalar('entropy_loss', entropy_loss)
tf.summary.scalar('a2c_loss', a2c_loss)
tf.summary.scalar('forward_loss', forward_loss)
tf.summary.scalar('inverse_loss', inverse_loss)
tf.summary.scalar('model_loss', model_loss)
tf.summary.scalar('regularization_loss', regularization_loss)
tf.summary.scalar('loss', loss)
summary_dir = summaries_dir(self.params.experiment_dir())
self.summary_writer = tf.summary.FileWriter(summary_dir)
self.all_summaries = tf.summary.merge_all()
with tf.name_scope('a2c_aux_summary'):
tf.summary.scalar('training_steps', global_step, collections=['aux'])
# if it's not "initialized" yet, just report 0 to preserve tensorboard plot scale
best_reward_report = tf.cond(
tf.equal(self.best_avg_reward, self.initial_best_avg_reward),
true_fn=lambda: 0.0,
false_fn=lambda: self.best_avg_reward,
)
tf.summary.scalar('best_reward_ever', best_reward_report, collections=['aux'])
tf.summary.scalar('avg_reward', self.avg_reward_placeholder, collections=['aux'])
self.avg_length_placeholder = tf.placeholder(tf.float32, [])
tf.summary.scalar('avg_lenght', self.avg_length_placeholder, collections=['aux'])
self.aux_summaries = tf.summary.merge_all(key='aux')
self.saver = tf.train.Saver(max_to_keep=3)
all_vars = tf.trainable_variables()
log.warn('curious a2c variables:')
slim.model_analyzer.analyze_vars(all_vars, print_info=True)
def best_action(self, observation, deterministic=False):
obs, timer = extract_keys([observation], 'obs', 'timer')
actions, _ = self._policy_step_timer(obs, timer, deterministic)
return actions[0]
def _policy_step_timer(self, observations, timer, deterministic=False):
"""
Select the best action by sampling from the distribution generated by the policy. Also estimate the
value for the currently observed environment state.
"""
ops = [
self.policy.best_action_deterministic if deterministic else self.policy.act,
self.policy.value,
]
actions, values = self.session.run(
ops,
feed_dict={
self.observations: observations,
self.timer: timer,
},
)
return actions, values
def _estimate_values_timer(self, observations, timer):
values = self.session.run(
self.policy.value,
feed_dict={
self.observations: observations,
self.timer: timer,
},
)
return values
def _prediction_curiosity_bonus(self, observations, actions, next_obs):
bonuses = self.session.run(
self.prediction_curiosity_bonus,
feed_dict={
self.selected_actions: actions,
self.observations: observations,
self.next_obs: next_obs,
}
)
return bonuses
def _curious_train_step(
self, step, env_steps, observations, timer, actions, values, discounted_rewards, advantages, next_obs
):
"""
Actually do a single iteration of training. See the computational graph in the ctor to figure out
the details.
"""
with_summaries = self._should_write_summaries(step)
summaries = [self.all_summaries] if with_summaries else []
result = self.session.run(
[self.train] + summaries,
feed_dict={
self.observations: observations,
self.timer: timer,
self.selected_actions: actions,
self.value_estimates: values,
self.discounted_rewards: discounted_rewards,
self.advantages: advantages,
self.next_obs: next_obs,
},
)
step = tf.train.global_step(self.session, tf.train.get_global_step())
if with_summaries:
summary = result[1]
self.summary_writer.add_summary(summary, global_step=env_steps)
return step
def _learn_loop(self, multi_env, step_callback=None):
"""
Main training loop.
:param step_callback: a hacky callback that takes a dictionary with all local variables as an argument.
Allows you too look inside the training process.
"""
step = initial_step = tf.train.global_step(self.session, tf.train.get_global_step())
env_steps = self.total_env_steps.eval(session=self.session)
batch_size = self.params.rollout * self.params.num_envs
img_obs, timer_obs = extract_keys(multi_env.initial_obs(), 'obs', 'timer')
adv_running_mean_std = RunningMeanStd(max_past_samples=10000)
def end_of_training(s, es):
return s >= self.params.train_for_steps or es > self.params.train_for_env_steps
while not end_of_training(step, env_steps):
timing = AttrDict({'experience': time.time(), 'batch': time.time()})
experience_start = time.time()
env_steps_before_batch = env_steps
batch_obs, batch_timer = [img_obs], [timer_obs]
env_steps += len(img_obs)
batch_actions, batch_values, batch_rewards, batch_dones, batch_next_obs = [], [], [], [], []
for rollout_step in range(self.params.rollout):
actions, values = self._policy_step_timer(img_obs, timer_obs)
batch_actions.append(actions)
batch_values.append(values)
# wait for all the workers to complete an environment step
next_obs, rewards, dones, infos = multi_env.step(actions)
next_img_obs, next_timer = extract_keys(next_obs, 'obs', 'timer')
# calculate curiosity bonus
bonuses = self._prediction_curiosity_bonus(img_obs, actions, next_img_obs)
rewards += bonuses
batch_rewards.append(rewards)
batch_dones.append(dones)
batch_next_obs.append(next_img_obs)
img_obs = next_img_obs
timer_obs = next_timer
if infos is not None and 'num_frames' in infos[0]:
env_steps += sum((info['num_frames'] for info in infos))
else:
env_steps += multi_env.num_envs
if rollout_step != self.params.rollout - 1:
# we don't need the newest observation in the training batch, already have enough
batch_obs.append(img_obs)
batch_timer.append(timer_obs)
assert len(batch_obs) == len(batch_rewards)
assert len(batch_obs) == len(batch_next_obs)
batch_rewards = np.asarray(batch_rewards, np.float32).swapaxes(0, 1)
batch_dones = np.asarray(batch_dones, np.bool).swapaxes(0, 1)
batch_values = np.asarray(batch_values, np.float32).swapaxes(0, 1)
# Last value won't be valid for envs with done=True (because env automatically resets and shows 1st
# observation of the next episode. But that's okay, because we should never use last_value in this case.
last_values = self._estimate_values_timer(img_obs, timer_obs)
gamma = self.params.gamma
disc_rewards = []
for i in range(len(batch_rewards)):
env_rewards = self._calc_discounted_rewards(gamma, batch_rewards[i], batch_dones[i], last_values[i])
disc_rewards.extend(env_rewards)
disc_rewards = np.asarray(disc_rewards, np.float32)
# convert observations and estimations to meaningful n-step batches
batch_obs_shape = (self.params.rollout * multi_env.num_envs,) + img_obs[0].shape
batch_obs = np.asarray(batch_obs, np.float32).swapaxes(0, 1).reshape(batch_obs_shape)
batch_next_obs = np.asarray(batch_next_obs, np.float32).swapaxes(0, 1).reshape(batch_obs_shape)
batch_actions = np.asarray(batch_actions, np.int32).swapaxes(0, 1).flatten()
batch_timer = np.asarray(batch_timer, np.float32).swapaxes(0, 1).flatten()
batch_values = batch_values.flatten()
advantages = disc_rewards - batch_values
if self.params.normalize_adv:
adv_running_mean_std.update(advantages)
advantages = (advantages - adv_running_mean_std.mean) / (np.sqrt(adv_running_mean_std.var) + EPS)
advantages = np.clip(advantages, -self.params.clip_advantage, self.params.clip_advantage)
timing.experience = time.time() - timing.experience
timing.train = time.time()
step = self._curious_train_step(
step,
env_steps,
batch_obs,
batch_timer,
batch_actions,
batch_values,
disc_rewards,
advantages,
batch_next_obs,
)
self._maybe_save(step, env_steps)
timing.train = time.time() - timing.train
avg_reward = multi_env.calc_avg_rewards(n=self.params.stats_episodes)
avg_length = multi_env.calc_avg_episode_lengths(n=self.params.stats_episodes)
fps = (env_steps - env_steps_before_batch) / (time.time() - timing.batch)
self._maybe_print(step, avg_reward, avg_length, fps, timing)
self._maybe_aux_summaries(step, env_steps, avg_reward, avg_length)
self._maybe_update_avg_reward(avg_reward, multi_env.stats_num_episodes())
if step_callback is not None:
step_callback(locals(), globals())
def learn(self, step_callback=None):
try:
multi_env = MultiEnv(
self.params.num_envs,
self.params.num_workers,
make_env_func=self.make_env_func,
stats_episodes=self.params.stats_episodes,
)
self._learn_loop(multi_env, step_callback)
except Exception as exc:
log.exception(exc)
finally:
log.info('Closing env...')
multi_env.close()
|
[
"[email protected]"
] | |
2489b3cb627d4f223f765feef5d1da637afe9945
|
dcd83aeb799143b58956612fb0bfc0258d30f229
|
/util/InjectProduction.py
|
080e7bd5f38e06ae65983f3e5b91a4e0a5d453bf
|
[] |
no_license
|
giffels/PRODAGENT
|
67e3e841cfca7421caa505d03417b663a62d321b
|
c99608e3e349397fdd1b0b5c011bf4f33a1c3aad
|
refs/heads/master
| 2021-01-01T05:51:52.200716 | 2012-10-24T13:22:34 | 2012-10-24T13:22:34 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,545 |
py
|
#!/usr/bin/env python
"""
Generate jobs for the workflow provided
"""
from MessageService.MessageService import MessageService
from ProdAgentCore.Configuration import loadProdAgentConfiguration
import sys,os,getopt,time
usage="\n Usage: python InjectTest.py <options> \n Options: \n --workflow=<workflow.xml> \t\t workflow file \n --nevts=<NumberofEvent> \t\t number of events per job \n --njobs=<NumberofEvent> \t\t number of jobs \n --plugin=<Submission type> \t type of creation/submission plugin \n --site-pref=<StorageElement name> storage element name \n [ --run=<firstRun> \t\t\t first run number effective only for New Workflow]\n\n *Note* that the run number option is effective only when a New workflow is created and it overwrites the FirstRun default in $PRODAGENT_CONFIG if set"
valid = ['workflow=', 'run=', 'nevts=' , 'njobs=', 'site-pref=','plugin=']
admitted_vals = ['LCGAdvanced', 'LCG','GliteBulk','T0LSF','GLITE', 'GLiteBulkResCon', 'BossLite']
try:
opts, args = getopt.getopt(sys.argv[1:], "", valid)
except getopt.GetoptError, ex:
print usage
print str(ex)
sys.exit(1)
workflow = None
run = "None"
nevts = None
njobs = None
sitePref = None
submissiontype = "LCG"
for opt, arg in opts:
if opt == "--workflow":
workflow = arg
if opt == "--run":
run = int(arg)
if opt == "--nevts":
nevts = int(arg)
if opt == "--njobs":
njobs = int(arg)
if opt == "--site-pref":
sitePref = arg
if opt == "--plugin":
submissiontype = arg
if submissiontype not in admitted_vals :
print "Submission plugin: %s is not supported \nSupported values are: %s" % (submissiontype, admitted_vals)
sys.exit(1)
if workflow == None:
print "--workflow option not provided"
print usage
sys.exit(1)
workflow=os.path.expandvars(os.path.expanduser(workflow))
if not os.path.exists(workflow):
print "Workflow not found: %s" % workflow
sys.exit(1)
if nevts == None:
print "--nevts option not provided."
print usage
sys.exit(1)
if njobs == None:
print "--njobs option not provided."
print usage
sys.exit(1)
if submissiontype == "GliteBulk":
if int(njobs) <= 1 :
print "--njobs need to be greater than 1 for GliteBulk submission"
sys.exit(1)
def getRequestInjectorConfig():
"""
get the RequestInjector Component dir and the optional FirstRun
"""
try:
config = loadProdAgentConfiguration()
except StandardError, ex:
msg = "Error: error reading configuration:\n"
msg += str(ex)
print msg
sys.exit(1)
if not config.has_key("RequestInjector"):
msg = "Error: Configuration block RequestInjector is missing from $PRODAGENT_CONFIG"
print msg
sys.exit(1)
ReqInjConfig = config.getConfig("RequestInjector")
#if not ReqInjConfig.has_key("ComponentDir"):
# msg = "Error: Configuration block RequestInjector is missing ComponentDir in $PRODAGENT_CONFIG"
# print msg
# sys.exit(1)
return ReqInjConfig.get("ComponentDir", None),ReqInjConfig.get("FirstRun", "None"),ReqInjConfig.get("QueueJobMode", "None")
def checkWorkflow(workflow):
"""
Check if the provided workflow already exists in WorkflowCache area
"""
WorkflowExists=False
workflowBase = os.path.basename(workflow)
RequestDir,firstrun,queuemode = getRequestInjectorConfig()
workflowCache="%s/WorkflowCache"%RequestDir
if not os.path.exists(workflowCache):
msg = "Error: there is no WorkflowCache area ==> %s"%workflowCache
print msg
sys.exit(1)
workflowCacheFile = os.path.join(workflowCache, "%s"%workflowBase)
if os.path.exists(workflowCacheFile):
WorkflowExists=True
msg=" Workflow %s already exists"%(workflowBase,)
print msg
else:
msg=" Workflow %s is NEW since the %s doesn't exist"%(workflowBase,workflowCacheFile)
print msg
return WorkflowExists,firstrun,queuemode
## use MessageService
ms = MessageService()
## register message service instance as "Test"
ms.registerAs("Test")
## Debug level
ms.publish("RequestInjector:StartDebug","none")
ms.publish("JobCreator:StartDebug","none")
ms.publish("JobSubmitter:StartDebug","none")
ms.commit()
ms.publish("TrackingComponent:StartDebug","none")
ms.commit()
## Set Creator/Submitter
if submissiontype == "LCG":
ms.publish("JobCreator:SetCreator","LCGCreator")
ms.publish("JobSubmitter:SetSubmitter","LCGSubmitter")
if submissiontype == "GLITE":
ms.publish("JobCreator:SetCreator","LCGCreator")
ms.publish("JobSubmitter:SetSubmitter","GLITESubmitter")
if submissiontype == "BossLite":
ms.publish("JobCreator:SetGenerator","Bulk")
ms.commit()
time.sleep(0.1)
ms.publish("JobCreator:SetCreator","LCGBulkCreator")
ms.publish("JobSubmitter:SetSubmitter","BlGLiteBulkSubmitter")
ms.publish("RequestInjector:SetBulkMode",'')
if submissiontype == "GliteBulk":
ms.publish("JobCreator:SetGenerator","Bulk")
ms.commit()
time.sleep(0.1)
ms.publish("JobCreator:SetCreator","LCGBulkCreator")
ms.publish("JobSubmitter:SetSubmitter","GLiteBulkSubmitter")
ms.publish("RequestInjector:SetBulkMode",'')
if submissiontype == "T0LSF":
ms.publish("JobCreator:SetCreator","T0LSFCreator")
ms.publish("JobSubmitter:SetSubmitter","T0LSFSubmitter")
if submissiontype == 'GLiteBulkResCon':
ms.publish("JobCreator:SetGenerator","Bulk")
ms.commit()
time.sleep(0.1)
ms.publish("JobCreator:SetCreator","LCGBulkCreator")
ms.publish("JobSubmitter:SetSubmitter","GLiteBulkResConSubmitter")
ms.publish("RequestInjector:SetBulkMode",'')
ms.commit()
## Set Workflow and run
WorkflowExists,firstrun,queuemode=checkWorkflow(workflow)
if str(queuemode).lower() in ("true", "yes"):
queuemode = True
ms.publish("JobQueue:StartDebug","none")
ms.commit()
else:
queuemode = False
if WorkflowExists:
## reload the Workflow and start from last run
run="None"
ms.publish("RequestInjector:LoadWorkflows",'')
ms.commit()
time.sleep(0.1)
workflowBase=os.path.basename(workflow)
ms.publish("RequestInjector:SelectWorkflow", workflowBase)
ms.commit()
else:
## set the workflow for the first time and set the compulsory the initial run
if run == "None": run = firstrun
if run == "None":
msg="Error: This is a NEW Workflow so it's compulsory to provide an initial Run number! You can: \n a) use the --run option \n b) set FirstRun in the RequestInjector configuration block in $PRODAGENT_CONFIG"
print msg
sys.exit(1)
ms.publish("RequestInjector:SetWorkflow", workflow)
ms.commit()
time.sleep(0.1)
ms.publish("RequestInjector:SetInitialRun", str(run))
ms.commit()
if sitePref != None:
ms.publish("RequestInjector:SetSitePref", sitePref)
ms.commit()
time.sleep(0.1)
ms.publish("RequestInjector:SetEventsPerJob", str(nevts))
ms.commit()
time.sleep(2)
## Set New Dataset
ms.publish("RequestInjector:NewDataset",'')
ms.commit()
## Loop over jobs
if run != "None":
runcomment=" run %s"%str(run)
else:
runcomment=" last run for %s "%workflowBase
if queuemode:
print " Trying to insert in JobQueue %s jobs with %s events each starting from %s "%(njobs,str(nevts),runcomment)
else:
print " Trying to submit %s jobs with %s events each starting from %s"%(njobs,str(nevts),runcomment)
if submissiontype == "GliteBulk" or submissiontype == "BossLite":
ms.publish("RequestInjector:ResourcesAvailable", str(njobs) )
ms.commit()
else:
njobs=njobs+1
for i in range(1, njobs):
ms.publish("RequestInjector:ResourcesAvailable","none")
ms.commit()
|
[
""
] | |
9db249d2ef36520a60d54f9e60ae144e9e92038e
|
932ce227a7b641cf5243a61dc97ea7c40455ef89
|
/api/celery.py
|
d0261c6928ed44368604cc9382ac64e711fa5492
|
[] |
no_license
|
aserguie/SeelkCoinAPI
|
d1d3b8a8bc3f86f2680753ecca88c265a795d363
|
084007153c3614e830874fc8ceade30975b4927c
|
refs/heads/master
| 2022-12-10T05:53:25.158661 | 2019-11-19T10:40:42 | 2019-11-19T10:40:42 | 222,402,223 | 0 | 0 | null | 2022-04-22T22:41:55 | 2019-11-18T08:45:09 |
Python
|
UTF-8
|
Python
| false | false | 714 |
py
|
import os
from celery import Celery
# set the default Django settings module for the 'celery' program.
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "api.settings")
app = Celery("api", broker="redis://", backend="redis://", include=["api.tasks"])
# Using a string here means the worker doesn't have to serialize
# the configuration object to child processes.
# - namespace='CELERY' means all celery-related configuration keys
# should have a `CELERY_` prefix.
app.config_from_object("django.conf:settings", namespace="CELERY")
# Load task modules from all registered Django app configs.
app.autodiscover_tasks()
@app.task(bind=True)
def debug_task(self):
print("Request: {0!r}".format(self.request))
|
[
"[email protected]"
] | |
8704bbaf901d8a35e0ee5512cc626afd639f0d60
|
bed0d23d35b42b7316dee35f9fa06d4d2cc9de26
|
/src/custom_admin/__init__.py
|
5ec402089bc364c75d9685df1a8d89ebdb5cca66
|
[] |
no_license
|
Bloodlettinger/meandre
|
b55911c93faf6c279f496394137def21ec181e6a
|
f9a8c5dc709fcdda808fc1329264724c7b8d951e
|
refs/heads/master
| 2020-05-17T23:01:15.326103 | 2012-10-11T17:22:48 | 2012-10-11T17:22:48 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 199 |
py
|
# -*- coding: utf-8 -*-
from datetime import date, datetime
def ddmmyy(value):
if isinstance(value, (date, datetime)):
return value.strftime('%d.%m.%y')
else:
return u'--'
|
[
"[email protected]"
] | |
efe434213dc08aa37d70206d5ba0731cffc4253b
|
3ddd6915d480b3e69d95d3970ed22cd64ce5cb2b
|
/Monster.py
|
81965497b40fe0c979f81702fbe6ebebec2d0352
|
[] |
no_license
|
batescol/Zeaurk
|
8b299f6ff498bc53c6a198620185eb679cfadca7
|
b748baec1e7800113dce6587bcdaf0a5d6ac41d1
|
refs/heads/master
| 2021-04-15T14:20:10.344945 | 2018-03-23T03:41:43 | 2018-03-23T03:41:43 | 126,427,464 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 821 |
py
|
import Observer
import random
# This class represents a generic monster
class Monster(Observer.Observable):
def __init__(self, house):
super().__init__()
self.house = house
# set the label to something memorable so we can
# catch weird bugs
self.label = "__MON__"
self.setObser(house)
# Called to attack the player, in accordance with the monster's
# attack range
def attack(self, player):
damage = random.uniform(self.attrange[0], self.attrange[1])
player.getHit(damage)
return damage
# Called when the player attacks the monster
def getHit(self, damage, weapon):
# Apply any vulnerabilities
if weapon in self.vuln:
damage = damage * self.vuln[weapon]
self.health = self.health - damage
# If we die, let the house know
if self.health <= 0:
self.show()
return damage
|
[
"[email protected]"
] | |
9c94a6ae985e0ffbcc4884ebef338fa1f8d357d0
|
b7a2a80843fa5141ffb9c7b4439f1d2ac713af30
|
/Version2/U7.2_Threads_Alt.py
|
6e674dc4ae02171ef537759fd638fb0b727f2a73
|
[] |
no_license
|
wunnox/python_grundlagen
|
df1bc2b9b1b561bd6733ccc25305e799a48e714e
|
fa84d7aae7332a7acbb3ba7ff0fe2216cc345fc0
|
refs/heads/master
| 2023-05-01T12:19:23.208445 | 2023-04-16T11:29:01 | 2023-04-16T11:29:01 | 222,099,539 | 2 | 3 | null | 2019-12-19T10:56:43 | 2019-11-16T12:57:54 |
Python
|
UTF-8
|
Python
| false | false | 1,241 |
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import print_function
####################################################
#
# Uebung:
# Erstellen Sie ein Programm, welches drei Threads startet
# Der erste Thread läuft 8 Sekunden, der zweite 4 Sekunden und der dritte 6 Sekunden
# Nehmen Sie als Vorlage , die vorhergehenden Folie.
#
####################################################
#### Lösung: ####
import time
import _thread
t = []
def show(c, s):
t.append(c)
print("Starte Thread", c, "mit", s, "Sek.")
time.sleep(s)
t.remove(c)
_thread.start_new_thread(show, (1, 12,))
time.sleep(0.5)
_thread.start_new_thread(show, (2, 22,))
time.sleep(0.5)
_thread.start_new_thread(show, (3, 18,))
time.sleep(0.5)
_thread.start_new_thread(show, (4, 14,))
time.sleep(0.5)
_thread.start_new_thread(show, (5, 21,))
time.sleep(0.5)
_thread.start_new_thread(show, (6, 19,))
time.sleep(0.5)
_thread.start_new_thread(show, (7, 15,))
time.sleep(0.5)
_thread.start_new_thread(show, (8, 18,))
time.sleep(0.5)
_thread.start_new_thread(show, (9, 13,))
time.sleep(0.5)
_thread.start_new_thread(show, (10, 14,))
time.sleep(0.5)
while t:
print("Warte auf Ende der Threads", t)
time.sleep(1)
print("Ende der Threads")
|
[
"[email protected]"
] | |
b20ec919b3bf275ed1bcbe843963d49d1abfdeae
|
d6a87864028abde8da69b0a1075e3d4c483ed73c
|
/base/baseheap.py
|
6db645d2a85ffa7480fc4454289c7144d0ee5942
|
[] |
no_license
|
Windsooon/LeetCode
|
7ef78c7e001c1e6924244869a7ba5491d33eb246
|
409d7db811d41dbcc7ce8cda82b77eff35585657
|
refs/heads/master
| 2021-01-10T15:26:16.986357 | 2020-01-01T14:57:58 | 2020-01-01T14:57:58 | 54,531,267 | 1 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 105 |
py
|
class BaseHeap:
def upheap(self, p):
pass
def downheap(self, p):
pass
def
|
[
"[email protected]"
] | |
043db3e45d99d02a4a0ebd409673a173cd7d82e1
|
bd298cf70991a97f896c2be7d0c255d822166b54
|
/Servidor/client.py
|
09991814f19d399b26858f7b6cc25e5735cc2d17
|
[
"MIT"
] |
permissive
|
wesbdss/GameFabrica
|
97a2671cd6da49e1122d24007ecc1d06e3a2fb71
|
6b0940237bcfc08e43b389e910ae72936f7ebeda
|
refs/heads/master
| 2020-08-07T15:15:05.248991 | 2019-12-04T14:34:22 | 2019-12-04T14:34:22 | 213,501,969 | 1 | 0 |
MIT
| 2019-11-08T06:56:53 | 2019-10-07T22:57:29 |
Dart
|
UTF-8
|
Python
| false | false | 697 |
py
|
from websocket import create_connection
import json
ws = create_connection("ws://localhost:8080/event")
msg = ""
while msg != "sair":
print("Teste de funções (wey): 1 - jogar 2 - ingame 3 - end")
msg = input("Digita o função >> ")
username = input("Digita o username >> ")
if msg == "sair":
quit()
if msg == "1":
ws.send(json.dumps({"function":"jogar","username":username}))
if msg == "2":
ws.send(json.dumps({"function":"ingame","username":username}))
if msg == "3":
ws.send(json.dumps({"function":"end","username":username}))
else quit()
result = ws.recv()
print("Reposta do server >> '%s'" % result)
ws.close()
|
[
"[email protected]"
] | |
4be89123e49ddac69d783cd58a65464869343d44
|
3f9e0b03c86fa4f4e28b5e28bcb9bb2e737fe7e1
|
/env/Lib/site-packages/pip/_internal/network/auth.py
|
74d225472f6f62727fd2e4d698f77cf3137725e8
|
[
"Apache-2.0"
] |
permissive
|
sinha-debojyoti/Ookla-Speedtest.net-Crawler
|
58c5b9d535b9f10f54eecbc656a6d62c50cc19b7
|
02e54f5679de74f732a34a37fac260d2ac34eb12
|
refs/heads/master
| 2022-07-18T10:27:35.020386 | 2022-07-03T03:53:11 | 2022-07-03T03:53:11 | 218,542,102 | 17 | 33 |
Apache-2.0
| 2022-07-03T03:55:41 | 2019-10-30T14:08:16 |
Python
|
UTF-8
|
Python
| false | false | 11,645 |
py
|
"""Network Authentication Helpers
Contains interface (MultiDomainBasicAuth) and associated glue code for
providing credentials in the context of network requests.
"""
import urllib.parse
from typing import Any, Dict, List, Optional, Tuple
from pip._vendor.requests.auth import AuthBase, HTTPBasicAuth
from pip._vendor.requests.models import Request, Response
from pip._vendor.requests.utils import get_netrc_auth
from pip._internal.utils.logging import getLogger
from pip._internal.utils.misc import (
ask,
ask_input,
ask_password,
remove_auth_from_url,
split_auth_netloc_from_url,
)
from pip._internal.vcs.versioncontrol import AuthInfo
logger = getLogger(__name__)
Credentials = Tuple[str, str, str]
try:
import keyring
except ImportError:
keyring = None
except Exception as exc:
logger.warning(
"Keyring is skipped due to an exception: %s",
str(exc),
)
keyring = None
def get_keyring_auth(url: Optional[str], username: Optional[str]) -> Optional[AuthInfo]:
"""Return the tuple auth for a given url from keyring."""
global keyring
if not url or not keyring:
return None
try:
try:
get_credential = keyring.get_credential
except AttributeError:
pass
else:
logger.debug("Getting credentials from keyring for %s", url)
cred = get_credential(url, username)
if cred is not None:
return cred.username, cred.password
return None
if username:
logger.debug("Getting password from keyring for %s", url)
password = keyring.get_password(url, username)
if password:
return username, password
except Exception as exc:
logger.warning(
"Keyring is skipped due to an exception: %s",
str(exc),
)
keyring = None
return None
class MultiDomainBasicAuth(AuthBase):
def __init__(
self, prompting: bool = True, index_urls: Optional[List[str]] = None
) -> None:
self.prompting = prompting
self.index_urls = index_urls
self.passwords: Dict[str, AuthInfo] = {}
# When the user is prompted to enter credentials and keyring is
# available, we will offer to save them. If the user accepts,
# this value is set to the credentials they entered. After the
# request authenticates, the caller should call
# ``save_credentials`` to save these.
self._credentials_to_save: Optional[Credentials] = None
def _get_index_url(self, url: str) -> Optional[str]:
"""Return the original index URL matching the requested URL.
Cached or dynamically generated credentials may work against
the original index URL rather than just the netloc.
The provided url should have had its username and password
removed already. If the original index url had credentials then
they will be included in the return value.
Returns None if no matching index was found, or if --no-index
was specified by the user.
"""
if not url or not self.index_urls:
return None
for u in self.index_urls:
prefix = remove_auth_from_url(u).rstrip("/") + "/"
if url.startswith(prefix):
return u
return None
def _get_new_credentials(
self,
original_url: str,
allow_netrc: bool = True,
allow_keyring: bool = False,
) -> AuthInfo:
"""Find and return credentials for the specified URL."""
# Split the credentials and netloc from the url.
url, netloc, url_user_password = split_auth_netloc_from_url(
original_url,
)
# Start with the credentials embedded in the url
username, password = url_user_password
if username is not None and password is not None:
logger.debug("Found credentials in url for %s", netloc)
return url_user_password
# Find a matching index url for this request
index_url = self._get_index_url(url)
if index_url:
# Split the credentials from the url.
index_info = split_auth_netloc_from_url(index_url)
if index_info:
index_url, _, index_url_user_password = index_info
logger.debug("Found index url %s", index_url)
# If an index URL was found, try its embedded credentials
if index_url and index_url_user_password[0] is not None:
username, password = index_url_user_password
if username is not None and password is not None:
logger.debug("Found credentials in index url for %s", netloc)
return index_url_user_password
# Get creds from netrc if we still don't have them
if allow_netrc:
netrc_auth = get_netrc_auth(original_url)
if netrc_auth:
logger.debug("Found credentials in netrc for %s", netloc)
return netrc_auth
# If we don't have a password and keyring is available, use it.
if allow_keyring:
# The index url is more specific than the netloc, so try it first
# fmt: off
kr_auth = (
get_keyring_auth(index_url, username) or
get_keyring_auth(netloc, username)
)
# fmt: on
if kr_auth:
logger.debug("Found credentials in keyring for %s", netloc)
return kr_auth
return username, password
def _get_url_and_credentials(
self, original_url: str
) -> Tuple[str, Optional[str], Optional[str]]:
"""Return the credentials to use for the provided URL.
If allowed, netrc and keyring may be used to obtain the
correct credentials.
Returns (url_without_credentials, username, password). Note
that even if the original URL contains credentials, this
function may return a different username and password.
"""
url, netloc, _ = split_auth_netloc_from_url(original_url)
# Try to get credentials from original url
username, password = self._get_new_credentials(original_url)
# If credentials not found, use any stored credentials for this netloc
if username is None and password is None:
username, password = self.passwords.get(netloc, (None, None))
if username is not None or password is not None:
# Convert the username and password if they're None, so that
# this netloc will show up as "cached" in the conditional above.
# Further, HTTPBasicAuth doesn't accept None, so it makes sense to
# cache the value that is going to be used.
username = username or ""
password = password or ""
# Store any acquired credentials.
self.passwords[netloc] = (username, password)
assert (
# Credentials were found
(username is not None and password is not None)
# Credentials were not found
or (username is None and password is None)
), f"Could not load credentials from url: {original_url}"
return url, username, password
def __call__(self, req: Request) -> Request:
# Get credentials for this request
url, username, password = self._get_url_and_credentials(req.url)
# Set the url of the request to the url without any credentials
req.url = url
if username is not None and password is not None:
# Send the basic auth with this request
req = HTTPBasicAuth(username, password)(req)
# Attach a hook to handle 401 responses
req.register_hook("response", self.handle_401)
return req
# Factored out to allow for easy patching in tests
def _prompt_for_password(
self, netloc: str
) -> Tuple[Optional[str], Optional[str], bool]:
username = ask_input(f"User for {netloc}: ")
if not username:
return None, None, False
auth = get_keyring_auth(netloc, username)
if auth and auth[0] is not None and auth[1] is not None:
return auth[0], auth[1], False
password = ask_password("Password: ")
return username, password, True
# Factored out to allow for easy patching in tests
def _should_save_password_to_keyring(self) -> bool:
if not keyring:
return False
return ask("Save credentials to keyring [y/N]: ", ["y", "n"]) == "y"
def handle_401(self, resp: Response, **kwargs: Any) -> Response:
# We only care about 401 responses, anything else we want to just
# pass through the actual response
if resp.status_code != 401:
return resp
# We are not able to prompt the user so simply return the response
if not self.prompting:
return resp
parsed = urllib.parse.urlparse(resp.url)
# Query the keyring for credentials:
username, password = self._get_new_credentials(
resp.url,
allow_netrc=False,
allow_keyring=True,
)
# Prompt the user for a new username and password
save = False
if not username and not password:
username, password, save = self._prompt_for_password(parsed.netloc)
# Store the new username and password to use for future requests
self._credentials_to_save = None
if username is not None and password is not None:
self.passwords[parsed.netloc] = (username, password)
# Prompt to save the password to keyring
if save and self._should_save_password_to_keyring():
self._credentials_to_save = (parsed.netloc, username, password)
# Consume content and release the original connection to allow our new
# request to reuse the same one.
resp.content
resp.raw.release_conn()
# Add our new username and password to the request
req = HTTPBasicAuth(username or "", password or "")(resp.request)
req.register_hook("response", self.warn_on_401)
# On successful request, save the credentials that were used to
# keyring. (Note that if the user responded "no" above, this member
# is not set and nothing will be saved.)
if self._credentials_to_save:
req.register_hook("response", self.save_credentials)
# Send our new request
new_resp = resp.connection.send(req, **kwargs)
new_resp.history.append(resp)
return new_resp
def warn_on_401(self, resp: Response, **kwargs: Any) -> None:
"""Response callback to warn about incorrect credentials."""
if resp.status_code == 401:
logger.warning(
"401 Error, Credentials not correct for %s",
resp.request.url,
)
def save_credentials(self, resp: Response, **kwargs: Any) -> None:
"""Response callback to save credentials on success."""
assert keyring is not None, "should never reach here without keyring"
if not keyring:
return
creds = self._credentials_to_save
self._credentials_to_save = None
if creds and resp.status_code < 400:
try:
logger.info("Saving credentials to keyring")
keyring.set_password(*creds)
except Exception:
logger.exception("Failed to save credentials")
|
[
"[email protected]"
] | |
6eb14329982905dd577294ff85fde1af3c9295af
|
427e2c921704e1064db85ffca63f887b865d8f60
|
/src/extract_chromosomes.py
|
8fa547c8a6cbaa84ed38793906e3b0bbfe953aaa
|
[] |
no_license
|
Shannon-E-Taylor/apis-numb
|
62a9bc7139af358fe4b403e1dc9713d8047a7bbc
|
8289fa3b8c4e4ed3573d5566d984e0463805df26
|
refs/heads/master
| 2021-01-25T10:35:38.948680 | 2018-09-28T02:54:09 | 2018-09-28T02:54:09 | 123,363,414 | 0 | 1 | null | 2018-03-08T22:48:48 | 2018-03-01T01:14:21 |
Python
|
UTF-8
|
Python
| false | false | 401 |
py
|
#!/usr/bin/env python3
from Bio import SeqIO
###########
# GLOBALS #
###########
fa = snakemake.input['fa']
output_fa = snakemake.output['fa']
target_chr = ['Group1.4', 'Group3.5']
########
# MAIN #
########
scaffolds = [x for x in SeqIO.parse(fa, 'fasta')]
kept_scaffolds = [x for x in scaffolds if x.id in target_chr]
#kept_scaffolds = scaffolds
SeqIO.write(kept_scaffolds, output_fa, 'fasta')
|
[
"[email protected]"
] | |
0c677d9e518612c3fc5e0bacb9933ba4d2590d55
|
9ed9e15c380b442175e56cf8dfdb22e2b34481a9
|
/task4.py
|
d87fe0e74fc54d5fd5d3e2cb238f9353392201d4
|
[] |
no_license
|
Kolyan78/Zachet1
|
20e2af45ac3a07e0a985a5ffbb8f19b905dcd959
|
9978216e8e33dde1fd06943d4a346d91d1ebfc27
|
refs/heads/master
| 2023-09-06T09:16:14.813445 | 2021-11-06T19:18:05 | 2021-11-06T19:18:05 | 417,811,641 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,117 |
py
|
'''
4. На входе имеем список строк разной длины.
Необходимо написать функцию all_eq(lst), которая вернет новый список из строк одинаковой длины.
Длину итоговой строки определяем исходя из самой большой из них.
Если конкретная строка короче самой длинной, дополнить ее нижними подчеркиваниями с правого края до требуемого количества символов.
Расположение элементов начального списка не менять.
'''
def all_eq(lst):
max_len = len(max(lst, key=len))
for i in range(len(lst)):
cur_len = len(lst[i])
if cur_len < max_len:
lst[i] += ("_" * (max_len - cur_len))
return lst
lst_ = ["Андрей", "Александр", "Константин", "Владислав", "Даниил", "Роман", "Лев", "Ян", "Синхрофазотрон"]
print(all_eq(lst_))
|
[
"[email protected]"
] | |
bb00b04ea2af5bfbb5cba1eaff0af1be4450a0e7
|
d965d74c37a519c4e1b3fc34c81dfdb86748c21c
|
/IA/Fibonnaci/Busca Gulosa.py
|
f2962b14ed0e90c65fe4b717da5275ac430299d6
|
[] |
no_license
|
igorveridiano/faculdade
|
a5acbc3223e5a9b1347a18820a449f0e01d94ef1
|
f9fc97f75ca174e196697b7dc000492ffc573ea0
|
refs/heads/master
| 2021-10-15T23:06:42.586429 | 2019-02-06T13:11:51 | 2019-02-06T13:11:51 | 169,293,798 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,301 |
py
|
map_maze = {
'A': [('B', 5)],
'B': [('A', 5), ('C', 7), ('F', 2)],
'C': [('B', 7), ('L', 8)],
'D': [('E', 3)],
'E': [('D', 3), ('I', 6)],
'F': [('B', 2), ('G', 5), ('J', 6)],
'G': [('F', 5), ('K', 6)],
'H': [('I', 3)],
'I': [('E', 6), ('J', 2), ('H', 3)],
'J': [('F', 6), ('I', 2), ('K', 5), ('O', 2)],
'K': [('G', 6), ('J', 5), ('L', 2), ('T', 9)],
'L': [('C', 8), ('K', 2), ('U', 9)],
'M': [('N', 3)],
'N': [('M', 3), ('O', 2), ('R', 7)],
'O': [('J', 2), ('N', 2), ('P', 3)],
'P': [('O', 3), ('S', 7)],
'Q': [('R', 3)],
'R': [('N', 7), ('Q', 3), ('S', 5)],
'S': [('P', 7), ('R', 5), ('T', 2)],
'T': [('K', 9), ('S', 2), ('U', 2)],
'U': [('L', 9), ('T', 2)]
}
first_state = 'A'
objective_state = 'Q'
state = first_state
way = []
count_cost = 0
parametro = False
visited = [first_state]
def get_adjacent_not_visited(state):
global visited
global map_maze
states = map_maze[state]
return_ = []
for s in states:
if s[0] not in visited:
return_.append(s)
return return_
def get_min_way(state):
global vertice
global count_cost
global map_maze
list_vertice = get_adjacent_not_visited(state)
if (len(list_vertice) == 0):
aux1 = way[len(way) - 1]
way.remove(way[len(way) - 1])
aux2 = way[len(way) - 1]
states = map_maze[aux2]
for s in states:
if aux1.__eq__(s[0]):
h = s[1]
count_cost = count_cost - h
return aux2
aux = True
for x in list_vertice:
if(aux):
vertice = x
aux = False
else:
if (x[1] < vertice[1]):
vertice = x
return vertice
way.append(first_state)
y = get_min_way(state)
way.append(y[0])
count_cost = count_cost + y[1]
visited.append(y[0])
state = y[0]
while parametro is not True:
if not (y[0].__eq__(way[len(way) -1])):
way.append(y[0])
count_cost = count_cost + y[1]
visited.append(y[0])
state = y[0]
else:
state = y[0]
if(objective_state.__eq__(y[0])):
parametro = True
break;
else:
y = get_min_way(state)
print("Caminho resultante: %s" % way)
print("Custo do caminho: %s" % count_cost)
|
[
"[email protected]"
] | |
904648d859ac06f42d2c1b82922651494faa5529
|
5b565d3b1eb96f3f70782bf2d3c97c47fcc9db17
|
/oops/constructor.py
|
a86bfe3416b2eb77d6a700a1f167a0768a0ba4db
|
[] |
no_license
|
vinodkkumarr/PythonAdvance
|
46a49eefd7b66b765dc7d43963979b7c1db06f2e
|
af7b1f939a38fb7166f67e7f25be1c8db1625ca1
|
refs/heads/master
| 2020-05-19T05:58:06.752271 | 2019-05-11T11:31:11 | 2019-05-11T11:31:11 | 184,861,385 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,193 |
py
|
# -*- coding: utf-8 -*-
"""
Created on Sat May 4 11:07:21 2019
@author: Aarav
"""
class calculator:
a=5
b=10
def __init__(self):
self.a=10
self.b=20
def addition(self):
return self.a+self.b
def subtraction(self):
return self.a - self.b
def multiplication(self):
return self.a*self.b
def divison(self):
try:
return self.a/self.b
except:
print("Exception occured")
def all(self):
add=self.a+self.b
sub=self.a-self.b
mul=self.a*self.b
div=self.a/+self.b
return add,sub,mul,div
def printall(self):
print("Arithematic opertion on the numbers: {0} {1}" .format(self.a,self.b))
print("Addition : {} " . format(self.addition()))
print("Subtraction is :" + str(self.subtraction()))
print("Multiplication is :" + str(self.multiplication()))
print("Division is :" + str(self.divison()))
print("Addition,subtraction,multiplication,division"+ str(self.all()))
c=calculator()
c.printall()
print(c.__module__)
print(__name__)
|
[
"[email protected]"
] | |
077a8eab9da148e455511ab157c33a420305bc9d
|
ce2e72a45446699917a306654a7f826c054858a2
|
/placetoplay/models.py
|
dfac32cd60628ac780dd0b1c032ff840e4fba7f0
|
[] |
no_license
|
camiry/Placetoplay-Student_Project
|
de574460cac6fd807175cd7f7ab21bf1798eb78f
|
d4baeb0e35b102f8b2d49fb3fdb7fca2f215aeb8
|
refs/heads/master
| 2020-06-04T17:07:20.763927 | 2014-01-10T22:02:45 | 2014-01-10T22:02:45 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,633 |
py
|
from django.db import models
from django.contrib.auth.models import User
SKILL_CHOICES = (
("No prior experience", "No prior experience"),
("Novice", "Novice"),
("Intermediate", "Intermediate"),
("Skilled", "Skilled"),
("Expert", "Expert")
)
class Games(models.Model):#all max_length and other limiters subject to change
name = models.TextField(default="No name avaliable")
description = models.TextField(default= "No description avaliable")
maker = models.CharField(max_length=50, default=" ")
category = models.CharField(max_length=50, default="No data")
mechanics = models.TextField(default="No data")
date_published = models.CharField(max_length=15, default=" ")#should I just change this to a string(charfield)?
amount_owned = models.PositiveIntegerField(default="0")
average_rating = models.DecimalField(max_digits=5, decimal_places=1, default=0.00000)
comments = models.TextField(default="This game has no comments yet.")
playtime = models.PositiveIntegerField(default="0")
optimal_age = models.PositiveIntegerField(default="0")
image_path = models.TextField(default="/static/mtg.jpeg")
class Groups(models.Model):
name = models.CharField(max_length=90, verbose_name="group name")
region = models.CharField(max_length=30, blank=True)
address = models.CharField(max_length=60, default="Placeholder address")
games = models.TextField(default=" ", verbose_name="Games we play")
special_rules = models.TextField(blank=True, verbose_name="house rules")
skill_level = models.CharField(default="No skill level selected", max_length=30)
#members = models.ManyToManyField(User, related_name="memberlist")
email = models.EmailField(blank=True)
phone = models.TextField(blank=True)
schedule_date = models.DateField(auto_now=False, default='2013-10-13', verbose_name="event date")
schedule_time = models.TimeField(auto_now=False, verbose_name="event time", default='00:00:00')
schedule_event = models.TextField(default="Please check back soon for our first scheduled event!", verbose_name="event")
image_path = models.CharField(default="/static/mtg.jpg", max_length=70, verbose_name="group picture", blank=True)
private_group = models.BooleanField(default=False)
games_link = models.ManyToManyField(Games, related_name="group games")
admin_id = models.IntegerField(default=0)
class UserExtension(models.Model):
user_link = models.OneToOneField(User, related_name="extension")
friends = models.ManyToManyField('self')
group_link = models.ManyToManyField(Groups, related_name="members")
games_link = models.ManyToManyField(Games, related_name="link_to_games")
city = models.CharField(max_length=30, blank=True)
characteristics = models.TextField(max_length=255, blank=True)
game_pref1 = models.CharField(max_length=30, verbose_name="Game preference")
game_pref2 = models.CharField(max_length=30, blank=True, verbose_name="Second preference")
game_pref3 = models.CharField(max_length=30, blank=True, verbose_name="Third preference")
skill = models.CharField(blank=True, max_length=30, choices=SKILL_CHOICES, verbose_name="Experience")
phone = models.CharField(blank=True, max_length=10)
facebook = models.URLField(blank=True)
image_path = models.CharField(default="/static/mtg.jpg", max_length=100, verbose_name="Profile picture")
#class User(models.Model)
#extension = link back to all extended user fields
#DON'T FORGET ABOUT MANY-TO-MANY BETWEEN GROUPS AND GAMES
#DON'T FORGET ABOUT POSITIVE INTERGER FIELDS FOR LATER
|
[
"[email protected]"
] | |
71ce47dbb7b39722811ea20222f23d88399b72e6
|
0476de1f17b2968e7639aa2e953594edd675a160
|
/CRUD(Success)/js/admin.py
|
fdc25e8b98f79f098bcaec84fb4437824bb88276
|
[] |
no_license
|
hinomoto/Django
|
f50e98660631a61d7c3c8c79c6557ba3e816c31e
|
a52c8ab14049e553520956413e428041eed40b12
|
refs/heads/master
| 2023-08-03T22:29:28.651647 | 2020-01-06T11:19:09 | 2020-01-06T11:19:09 | 231,688,064 | 0 | 0 | null | 2023-07-23T01:55:01 | 2020-01-04T00:44:05 |
Python
|
UTF-8
|
Python
| false | false | 120 |
py
|
from django.contrib import admin
# Register your models here.
from .models import Color
admin.site.register(Color)
|
[
"[email protected]"
] | |
132238d5761dd0b15e7f41c87f1a002bdc31ab4a
|
1e783783668c46113e594cab9883a45cebc77142
|
/tools/IndicatorCalculation.py
|
cb7e5f5c6cbd60e0074e4074584296cb66269311
|
[] |
no_license
|
shimq99/quant-trading-project
|
efbea4675adfa25c009e19f79b486386d4f1ba72
|
78334ea9eb926de74d1f3f25cfe4226964c994d3
|
refs/heads/main
| 2023-07-20T16:06:09.744719 | 2021-08-24T04:36:23 | 2021-08-24T04:36:23 | 364,751,660 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 10,082 |
py
|
#encoding:UTF-8
import math
import numpy as np
import pandas as pd
import datetime
class IndicatorCalculation(object):
@staticmethod
def calculateMaxDD(ytdYieldList):
#highestYield = -float("inf")
highestYield = ytdYieldList[0]
maxDD = 0.0
currentDD = 0.0
winDays = 0.0
lossDays = 0.0
avgWinPnl = 0.0
avgLossPnl = 0.0
sumWinPnl = 0.0
sumLossPnl = 0.0
yesterdayPnl = 0.0
winRatio = 0.0
lossRatio = 0.0
for ytd in ytdYieldList:
if(ytd > highestYield):
highestYield = ytd
currentDD = (1 + ytd)/(1 + highestYield) - 1
if(currentDD < maxDD):
maxDD = currentDD
todayPnl = ytd - yesterdayPnl
if(todayPnl > 0):
sumWinPnl += todayPnl
winDays += 1
elif(todayPnl < 0):
sumLossPnl += todayPnl
lossDays += 1
yesterdayPnl = ytd
if(winDays > 0):
avgWinPnl = sumWinPnl/winDays
if(lossDays > 0):
avgLossPnl = sumLossPnl/lossDays
if(len(ytdYieldList) >= 2):
dtdYield = ytdYieldList[-1] - ytdYieldList[-2]
elif(len(ytdYieldList) == 1):
dtdYield = ytdYieldList[0]
else:
dtdYield = 0.0
if(len(ytdYieldList) > 0):
winRatio = winDays/len(ytdYieldList)
lossRatio = lossDays/len(ytdYieldList)
return (dtdYield, highestYield, maxDD, currentDD, avgWinPnl, winRatio, avgLossPnl, lossRatio)
@staticmethod
def calculateRecovery(bookYtdGrossReturnDataframe,fundId,bookId):
if bookId == 'None':
bookId=0
fundAndBookData = bookYtdGrossReturnDataframe[(bookYtdGrossReturnDataframe['FundId'] == int(fundId)) & (bookYtdGrossReturnDataframe['BookId'] == int(bookId))].copy()
recovered = 'Not Yet'
fundAndBookData.index = pd.to_datetime(fundAndBookData['Date'])
fundAndBookData['Date'] = pd.to_datetime(fundAndBookData['Date'])
fundAndBookData.sort_index(ascending=True, inplace=True)
firstDate = fundAndBookData['Date'].iloc[0]
previousDateData = pd.DataFrame([0], columns={'YtdGrossReturn'}, index=[firstDate - datetime.timedelta(days=1)])
#离散return 公式 : (p_t / p_t - 1) - 1
#离散累计return: (1 + 离散return).cumprod()
fundAndBookData = pd.concat([fundAndBookData, previousDateData], axis=0)
fundAndBookData.sort_index(ascending=True, inplace=True)
fundAndBookData.dropna(subset=['YtdGrossReturn'], how='all', inplace=True)
#fundAndBookData['YtdGrossReturn'] = fundAndBookData['YtdGrossReturn'].fillna(0)
fundAndBookData['PCT_CHG'] = (fundAndBookData['YtdGrossReturn'] - fundAndBookData['YtdGrossReturn'].shift(1)) / (1 + fundAndBookData['YtdGrossReturn'].shift(1))
fundAndBookData['CUM_RET'] = (1+fundAndBookData['PCT_CHG']).astype(float).cumprod()
fundAndBookData.dropna(subset=['CUM_RET'], how='all', inplace=True)
#连续的cumulative return,但由于PCT change不是连续的,故不适用
#fundAndBookData['CUM_RET'] = fundAndBookData['PCT_CHG'].astype(float).cumsum().apply(np.exp)
if not fundAndBookData.empty:
fundAndBookData['CUM_MAX'] = fundAndBookData['CUM_RET'].cummax()
fundAndBookData['CurrentDD'] = (fundAndBookData['CUM_RET'] /fundAndBookData['CUM_MAX']) -1
maxDD = fundAndBookData['CurrentDD'].min()
maxDDDate = fundAndBookData[fundAndBookData['CurrentDD'] == maxDD].index[0]
CumReturnBeforeMaxDD = fundAndBookData[fundAndBookData['Date'] <= maxDDDate]['CUM_RET'].max()
CumReturnAfterMaxDD = fundAndBookData[fundAndBookData['Date'] > maxDDDate]['CUM_RET'].max()
if CumReturnAfterMaxDD > CumReturnBeforeMaxDD:
recovered = 'Recovered'
maxDDPeriodData = fundAndBookData[fundAndBookData['Date'] <= maxDDDate]
duplicated_test = maxDDPeriodData.duplicated(subset=['Date'],keep=False)
duplicated_data = maxDDPeriodData[duplicated_test]
if not duplicated_data.empty:
##如有重复,只保留Marking source
validData = duplicated_data[duplicated_data['Source'] == 'Marking']
if validData.shape[0] ==1:
maxDDPeriodData.drop_duplicates(subset='Date', inplace=True, keep=False)
maxDDPeriodData = pd.concat([maxDDPeriodData, validData], axis=0)
maxDDPeriodData.sort_index(ascending=True, inplace=True)
else:
raise Exception('duplicate data for fundid:'+str(fundId)+', and bookId:'+str(bookId)+', pls check Nav table')
maxDDStartDate = maxDDPeriodData.ix[maxDDPeriodData['CUM_RET'].idxmax(),'Date']
maxDDStartDateStr = maxDDStartDate.strftime('%Y-%m-%d')
maxDDDateStr = maxDDDate.strftime('%Y-%m-%d')
another_maxDD = maxDD
return (another_maxDD, maxDDStartDateStr, maxDDDateStr, recovered)
return (0, None, None, None)
@staticmethod
def calculateRecoveryWithPct(bookYtdGrossReturnDataframe, fundId, bookId):
if bookId == 'None':
bookId = 0
fundAndBookData = bookYtdGrossReturnDataframe[(bookYtdGrossReturnDataframe['FundId'] == int(fundId)) & (
bookYtdGrossReturnDataframe['BookId'] == int(bookId))].copy()
recovered = 'Not Yet'
fundAndBookData.index = pd.to_datetime(fundAndBookData['Date'])
fundAndBookData['Date'] = pd.to_datetime(fundAndBookData['Date'])
fundAndBookData.sort_index(ascending=True, inplace=True)
firstDate = fundAndBookData['Date'].iloc[0]
# 离散return 公式 : (p_t / p_t - 1) - 1
# 离散累计return: (1 + 离散return).cumprod()
fundAndBookData['CUM_RET'] = (1 + fundAndBookData['PCT_CHG']).astype(float).cumprod()
fundAndBookData.dropna(subset=['CUM_RET'], how='all', inplace=True)
# 连续的cumulative return,但由于PCT change不是连续的,故不适用
# fundAndBookData['CUM_RET'] = fundAndBookData['PCT_CHG'].astype(float).cumsum().apply(np.exp)
if not fundAndBookData.empty:
fundAndBookData['YTD'] = (1 + fundAndBookData['PCT_CHG']).astype(float).cumprod() - 1
ytdYieldList = fundAndBookData['YTD'].tolist()
annualRtn = fundAndBookData['YTD'].iloc[-1] / len(ytdYieldList) * 250
(annualVol, annualRtn, annualSharpe) = IndicatorCalculation.calculateAnnualVolatilitySharpe(ytdYieldList,tradeDays=250)
fundAndBookData['CUM_MAX'] = fundAndBookData['CUM_RET'].cummax()
fundAndBookData['CurrentDD'] = (fundAndBookData['CUM_RET'] / fundAndBookData['CUM_MAX']) - 1
currentDD = fundAndBookData['CurrentDD'].iloc[-1]
maxDD = fundAndBookData['CurrentDD'].min()
maxDDDate = fundAndBookData[fundAndBookData['CurrentDD'] == maxDD].index[0]
CumReturnBeforeMaxDD = fundAndBookData[fundAndBookData['Date'] <= maxDDDate]['CUM_RET'].max()
CumReturnAfterMaxDD = fundAndBookData[fundAndBookData['Date'] > maxDDDate]['CUM_RET'].max()
if CumReturnAfterMaxDD > CumReturnBeforeMaxDD:
recovered = 'Recovered'
maxDDPeriodData = fundAndBookData[fundAndBookData['Date'] <= maxDDDate]
duplicated_test = maxDDPeriodData.duplicated(subset=['Date'], keep=False)
duplicated_data = maxDDPeriodData[duplicated_test]
if not duplicated_data.empty:
##如有重复,只保留Marking source
validData = duplicated_data[duplicated_data['Source'] == 'Marking']
if validData.shape[0] == 1:
maxDDPeriodData.drop_duplicates(subset='Date', inplace=True, keep=False)
maxDDPeriodData = pd.concat([maxDDPeriodData, validData], axis=0)
maxDDPeriodData.sort_index(ascending=True, inplace=True)
else:
raise Exception('duplicate data for fundid:' + str(fundId) + ', and bookId:' + str(
bookId) + ', pls check Nav table')
maxDDStartDate = maxDDPeriodData.ix[maxDDPeriodData['CUM_RET'].idxmax(), 'Date']
maxDDStartDateStr = maxDDStartDate.strftime('%Y-%m-%d')
maxDDDateStr = maxDDDate.strftime('%Y-%m-%d')
another_maxDD = maxDD
return (another_maxDD, maxDDStartDateStr, maxDDDateStr, recovered, annualRtn, currentDD, annualVol, annualSharpe)
return (0, None, None, None,0,0,0,0)
@staticmethod
def calculateAnnualVolatilitySharpe(ytdYieldList, tradeDays = 252):
try:
dtdYieldList = []
i = 0
for ytd in ytdYieldList:
if(i == 0):
dtdYieldList.append(ytd)
else:
dtdYieldList.append((ytdYieldList[i] - ytdYieldList[i-1])/(1+ytdYieldList[i-1]))
i += 1
sumYtd = 0.0
avgYtd = 0.0
for ytd in dtdYieldList:
sumYtd += ytd
avgYtd = sumYtd/len(dtdYieldList)
squareSum = 0.0
for ytd in dtdYieldList:
squareSum += (ytd - avgYtd) * (ytd - avgYtd)
annualRtn = ytdYieldList[-1]/len(ytdYieldList) * tradeDays
annualVol = math.sqrt(squareSum/(len(dtdYieldList) - 1)) * math.sqrt(tradeDays - 2) #why minus 2
annualSharpe = annualRtn/annualVol
return (annualVol, annualRtn, annualSharpe)
except Exception,e:
return (0.0, 0.0, 0.0)
if __name__ == '__main__':
ytdYieldList = [0.98, 0.99, 0.97, 1, 1.02, 1.01, 1.05, 0.9, 0.98, 1.02]
print IndicatorCalculation.calculateMaxDD(ytdYieldList)
print IndicatorCalculation.calculateAnnualVolatilitySharpe(ytdYieldList)
|
[
"[email protected]"
] | |
1d85ed4d923a65a348f818735d606d763db63edc
|
186e0826d663762db647cb96f52d01e54e391e77
|
/Part 1 - Getting Started/2 - Key Types/PublishSubject.py
|
8e76498cbe58b3425c74e1a89c39d63556712500
|
[
"MIT"
] |
permissive
|
gmariotti/Intro-to-RxPY
|
09cb70f603f470b37a83ea854c3c1633a46a305d
|
37d102f13fd26950143875fbf8005bb5ce0b1a73
|
refs/heads/master
| 2021-03-22T04:19:12.775415 | 2016-07-22T21:30:34 | 2016-07-22T21:30:34 | 61,143,226 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 219 |
py
|
from rx.subjects import Subject
# In RxPY Subject instead of PublishSubject as in RxJava
subject = Subject()
subject.on_next(1)
subject.subscribe(on_next=print)
subject.on_next(2)
subject.on_next(3)
subject.on_next(4)
|
[
"[email protected]"
] | |
e3e125bda8aedcaab4392164dede677f506328b4
|
35804ed41d74afd5610adfba6d8ee11dce579a8a
|
/sender/tesla.py
|
176896bbe72028c163f911ec3c6e0097972ebe98
|
[] |
no_license
|
ye11ow/omw
|
cd6edbbfaa8b09d63a30fb64659026a9474b4a8e
|
e4b5828ca5d47315a9c43199e473480eaabfd6d7
|
refs/heads/main
| 2023-07-25T03:11:25.125427 | 2021-09-03T03:45:57 | 2021-09-03T03:45:57 | 389,790,447 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,978 |
py
|
import traceback
import time
import json
import random
import logging
import threading
import click
import requests
import teslapy
LOG_FORMAT = '%(asctime)s %(levelname)s %(module)s(%(lineno)d) - %(message)s'
DATE_FORMAT = '%m/%d %H:%M:%S'
logging.basicConfig(format=LOG_FORMAT, datefmt=DATE_FORMAT, level=logging.INFO)
logger = logging.getLogger('omw_tesla_sender')
logger.setLevel(logging.DEBUG)
start_time = int(time.time())
class MockTesla:
def __init__(self):
with open('tests/fixtures/tesla.json') as f:
self._data = json.load(f)
logger.info('cache data loaded')
def get_vehicle_data(self):
self._data['drive_state']['latitude'] += random.random() / 100
self._data['drive_state']['longitude'] += random.random() / 100
logger.debug('getting vehicle data')
return self._data
def send_data(session, interval, duration, host, vehicle):
session_left = duration - (int(time.time()) - start_time)
logger.info(f'sending location data... Session time left: {session_left}s')
if session_left < 0:
exit(0)
try:
drive_state = vehicle.get_vehicle_data()['drive_state']
now = int(time.time())
payload = {
'next_refresh': now + interval,
'vehicle': drive_state,
'timestamp': now
}
requests.post(f'{host}/location?session={session}', json=payload)
except Exception as err:
logger.error('failed to send location data')
print(traceback.format_exc())
return
@click.command()
@click.option('--email', '-e', help='the email address of your Tesla account', envvar='TESLA_EMAIL')
@click.option('--password', '-p', help='the password of your Tesla account', envvar='TESLA_PASSWORD')
@click.option('--session', '-s', help='name of the session', required=True)
@click.option('--interval', '-i', help='sending interval in seconds', default=10)
@click.option('--duration', '-d', help='total session duration in minutes', default=60 * 60 * 24)
@click.option('--host', '-h', default='http://localhost:5000')
@click.option('--debug', is_flag=True, default=False)
def tesla(email, password, session, interval, duration, host, debug):
logger.info(f'sending location to {host} with interval {interval}s. Session duration {int(duration / 60)} minutes')
if debug:
logger.info('debug mode on, loading from fixture')
vehicle = MockTesla()
else:
logger.info('connecting to Tesla server...')
with teslapy.Tesla(email, password) as tesla:
tesla.fetch_token()
vehicles = tesla.vehicle_list()
if len(vehicles) != 1:
logger.error(f'unexpected number of vehicle found ({len(vehicles)})')
exit(1)
vehicle = vehicles[0]
e = threading.Event()
while not e.wait(interval):
send_data(session, interval, duration, host, vehicle)
if __name__ == '__main__':
tesla()
|
[
"[email protected]"
] | |
9a4a66b73d5ac59e838f0aa82bbb615cf4efa43f
|
6c58da2c54a3d35273e7984313d181f1da9981fc
|
/Multiple_Apps/djangoEnv/bin/django-admin.py
|
78fd42c83301322a9da7ef20392fed2b3158a0b1
|
[
"MIT-0"
] |
permissive
|
py1-10-2017/rgero215_PY1-10-2017
|
e582cb12cc63f84b1c0c14d09a922cb6cb228016
|
f455b335ec9c8c850571f3a75dcd95759b4cfdad
|
refs/heads/master
| 2021-09-04T03:23:48.062326 | 2018-01-14T21:07:26 | 2018-01-14T21:07:26 | 105,612,652 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 187 |
py
|
#!/Users/RGero13/Desktop/rgero215_PY1-10-2017/Multiple_Apps/djangoEnv/bin/python
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
|
[
"[email protected]"
] | |
2c479f99d99661f008430e819abbbaef7e2c6f44
|
f24ebd0ee446e95f3953dbb840526fc6d299b13d
|
/env/bin/ipython
|
82b752a6c290b808cc8ce20367c3d216d899162e
|
[] |
no_license
|
hendro15/flaskFramework
|
158f8f01aeec12e26d88f1a1522a93303ff32468
|
306a5b4885fdb4549d0472eac6fbd99b7986f949
|
refs/heads/master
| 2020-04-08T19:11:39.063975 | 2018-11-29T11:17:21 | 2018-11-29T11:17:21 | 159,644,343 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 268 |
#!/home/sonic/Documents/latihan/flaskPACKT/env/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from IPython import start_ipython
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(start_ipython())
|
[
"[email protected]"
] | ||
7b82ae27e930608f379bae63c303ce502a3c27db
|
8a0acf95b459937c539ef935e524481c9d327ad0
|
/principal.py
|
655980f884a7973ab832dfe9b59d8a81c882d441
|
[] |
no_license
|
ekdespe/botAgenciaTelegram
|
20f7087807590a2ff8f742bf498a26c785e0e73e
|
9768eb1d9de214c3638dcbde4332a9ef7f3d115c
|
refs/heads/master
| 2023-08-22T20:25:43.639874 | 2017-06-22T21:14:09 | 2017-06-22T21:14:09 | 410,154,625 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,041 |
py
|
#!/usr/bin/python
#-*- coding: utf-8 -*-
import telegram
#from telegram.ext import Updater
from telegram.ext import Updater, CommandHandler
import os
myTocken ="354690332:AAFB8Dgjae9nLjbOlwbJHDSczCOXFKP-ybo"
def ajuda(bot, update):
text = "/quemtaai - Lista todos os membros da agência presentes\n/ajuda - exibe este menu\n/info - exibe informações sobre o desenvolvedor"
bot.sendMessage(update.message.chat_id, text)
def quemtaai(bot , update):
os.system("./buscaNomes.sh")
arq = open("listaNomes.txt","r")
texto = arq.read();
arq.close()
bot.sendMessage(update.message.chat_id, texto)
def info(bot , update):
text = "Erik Ferreira - [email protected] \n @ekdespe \n 71 9.8277-6545 "
bot.sendMessage(update.message.chat_id, text)
updater = Updater(myTocken)
updater.dispatcher.add_handler(CommandHandler('ajuda', ajuda))
updater.dispatcher.add_handler(CommandHandler('info', info))
updater.dispatcher.add_handler(CommandHandler('quemtaai', quemtaai))
updater.start_polling()
updater.idle()
|
[
"[email protected]"
] | |
592ac9a1613e8c2b0e733f3b1ebe6ebb4046e7ca
|
cb12e3eff7bbb5fe2f4d0e2be9ca165a5577dc93
|
/plt-and-algo/webrtc-p2pframe/serve.py
|
a43133af73a4329d22e725d2ebc34e112a0c7968
|
[] |
no_license
|
overminder/kitchen-sink
|
6b1227ff00f8804d4d0a632e613ee903d51ab753
|
2e61b9041ceed536d42b42b75a5c50dae080c0ba
|
refs/heads/master
| 2023-06-09T05:48:47.291336 | 2023-05-29T17:38:34 | 2023-05-29T17:38:34 | 50,777,705 | 18 | 3 | null | 2020-02-09T19:22:18 | 2016-01-31T14:13:16 |
Scala
|
UTF-8
|
Python
| false | false | 3,346 |
py
|
#!/usr/bin/env python3.5
import logging
import os
import sys
import random
import json
HERE = os.path.abspath(os.path.dirname(os.path.realpath(__file__)))
STATIC_PATH = os.path.join(HERE, 'static')
from tornado import gen
from tornado.websocket import WebSocketHandler
from tornado.web import Application, RequestHandler, StaticFileHandler
from tornado.ioloop import IOLoop
class Offer:
resp_cb = None
def __init__(self, key=None, offer_payload=None):
self.key = key
self.offer_payload = offer_payload
self.candidates = []
def __repr__(self):
return '<Offer candidates=%d %r>' % (len(self.candidates), self.offer_payload)
def wait_resp(self, callback=None):
self.resp_cb = callback
class Offers:
def __init__(self):
self.offers = {}
@classmethod
def mkkey(cls):
return str(random.randint(10000, 99999)) # Just to be simple.
def add(self, offer):
self.offers[offer.key] = offer
return offer
def find(self, key):
return self.offers[key]
def pop(self, key):
return self.offers.pop(key)
offers = Offers()
class OfferListingHandler(RequestHandler):
def get(self):
self.write({
'offers': [{'key': key, 'resp_cb': repr(resp_cb)}
for (key, resp_cb) in offers.offers.items()],
})
class OfferHandler(WebSocketHandler):
offer = None
key = None
def open(self):
self.key = Offers.mkkey()
def _ensure_offer(self):
if self.offer is None:
self.offer = Offer(key=self.key)
return self.offer
@gen.coroutine
def on_message(self, s):
msg = json.loads(s)
print('msg', type(msg), repr(msg))
if msg['type'] == 'allocKey':
self.write_message({
'type': 'allocKeyResp',
'key': self.key,
})
elif msg['type'] == 'offer':
offer = offers.add(self._ensure_offer())
offer.offer_payload = msg
self.write_message(json.dumps({
'type': 'offer-created',
}))
resp = yield gen.Task(offer.wait_resp)
self.write_message(json.dumps({
'type': 'offer-accepted',
'resp': resp,
}))
elif msg['type'] == 'take-offer':
offer = offers.find(msg['key'])
self.write_message(offer.offer_payload)
for c in offer.candidates:
self.write_message(c)
elif msg['type'] == 'answer':
key = msg.pop('forKey')
offer = offers.pop(key)
offer.resp_cb(msg)
elif msg['type'] == 'candidate':
self._ensure_offer().candidates.append(msg)
class NoCacheStaticFileHandler(StaticFileHandler):
def set_extra_headers(self, path):
self.set_header('Cache-Control', 'no-store, no-cache, must-revalidate, max-age=0')
def mkapp():
return Application([
(r'/offer', OfferHandler),
(r'/offers/list', OfferListingHandler),
(r'/(.*)', NoCacheStaticFileHandler, {
'path': STATIC_PATH,
}),
], gzip=True)
def main():
port = 17080
mkapp().listen(port)
print('Listening on :%d' % port)
IOLoop.current().start()
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
30d76d5a0ff3e6f690abdabd2e750c027eb4391d
|
c05f9fb686ef49c093e618a4078ffe723231f346
|
/config/conf.py
|
aa2247f1303787036d7ea5fca1e3fa1d81a42f4c
|
[] |
no_license
|
wmm0165/PytestAuto
|
d1bb40dcc5760439658c15af653953646119af44
|
42846b12ed7aefaa4e5890529ec71a76d27f245d
|
refs/heads/master
| 2020-07-16T16:35:19.962864 | 2019-09-16T10:08:37 | 2019-09-16T10:08:37 | 205,825,635 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 881 |
py
|
from datetime import datetime
import os
# 项目根目录
ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# 报告目录
REPORT_DIR = os.path.join(ROOT_DIR, 'report')
# ui对象库config.ini文件所在目录
CONF_PATH = os.path.join(ROOT_DIR, 'config', 'config.ini')
# 测试数据所在目录
DATA_Path = os.path.join(ROOT_DIR, 'data', 'tcData.xlsx')
# 当前时间
CURRENT_TIME = datetime.now().strftime('%H_%M_%S')
# 邮件配置信息
# 邮件服务器
SMTP_SERVER = 'smtp.qq.com'
# 发送者
FROM_USER = '[email protected]'
# 发送者密码
FROM_PASSWORD = 'mhxvqpewblldbjhf'
# 接收者
TO_USER = ['账号@qq.com'] # 可以同时发送给多人,追加到列表中
# 邮件标题
SUBJECT = 'xx项目自动化测试报告'
# 邮件正文
CONTENTS = '测试报告正文'
# 报告名称
HTML_NAME = 'testReport{}.html'.format(CURRENT_TIME)
print(HTML_NAME)
|
[
"[email protected]"
] | |
8b92035f4b34e0556c903e155ba9a8618bf17529
|
275a96a33ae1f89e7b2ee0ecdbac7d78abe6d6cc
|
/test/test_bad_request_error_code.py
|
670128cb3664339498ad0e2fe8a03b0977a7c7ff
|
[] |
no_license
|
cascadiarc/cyclos-python-client
|
8029ce07174f2fe92350a92dda9a60976b2bb6c2
|
a2e22a30e22944587293d51be2b8268bce808d70
|
refs/heads/main
| 2023-04-03T16:52:01.618444 | 2021-04-04T00:00:52 | 2021-04-04T00:00:52 | 354,419,532 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 913 |
py
|
# coding: utf-8
"""
Cyclos 4.11.5 API
The REST API for Cyclos 4.11.5 # noqa: E501
OpenAPI spec version: 4.11.5
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.models.bad_request_error_code import BadRequestErrorCode # noqa: E501
from swagger_client.rest import ApiException
class TestBadRequestErrorCode(unittest.TestCase):
"""BadRequestErrorCode unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testBadRequestErrorCode(self):
"""Test BadRequestErrorCode"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.bad_request_error_code.BadRequestErrorCode() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"[email protected]"
] | |
f9fa5bd49d7735c223174b25cefd4fe467330275
|
f513fd306f53653511c8854556971956f5a38a0e
|
/算法入门/查找排序习题/习题3.py
|
62fdf650386650a471df10ed601c64ac166482f4
|
[] |
no_license
|
forest-data/luffy_py_algorithm
|
ab10ca070cfbf17a08a61f88bfd9c3b3cb07f382
|
f6bec726ee98176b56b5ea556c1a521b693a80fb
|
refs/heads/master
| 2023-04-02T21:48:28.372136 | 2020-09-10T01:46:57 | 2020-09-10T01:46:57 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,788 |
py
|
"""
3. 给 nums =【1,2,5,4】 target = 3 结果 返回 (0,1)
"""
from 算法入门.cal_time import cal_time
class Solution:
@cal_time
def twoSum1(self, nums, target):
for ind, val in enumerate(nums):
if target - val in nums and ind != nums.index(target-val):
return (ind, nums.index(target-val))
# 前一个数 和 后面的数比
@cal_time
def twoSum2(self, nums, target):
n = len(nums)
for i in range(n):
for j in range(i+1, n): # 跟后面的比 # 跟前面的比 for j in range(i)
if nums[i] + nums[j] == target:
return sorted([i, j])
def binary_search(self, li, left, right, val):
# left = 0
# right = len(li)-1
while left <= right:
mid = (left + right) // 2
if li[mid] == val:
return mid
elif li[mid] > val:
right = mid - 1
else:
left = mid + 1
else:
return None
# 假如列表是有序的, 查找 val - target 可采用二分查找
@cal_time
def twoSum3(self, nums, target):
for i in range(len(nums)):
a = nums[i]
b = target - a
if b>=a:
# j = self.binary_search(nums[i+1:], b) # 列表切片复杂度O(n)
j = self.binary_search(nums, i+1, len(nums)-1, b)
else:
j = self.binary_search(nums, 0, i-1, b)
if j:
break
return sorted([i,j])
def binary_search2(self, li, left, right, val):
# left = 0
# right = len(li)-1
while left <= right:
mid = (left + right) // 2
if li[mid][0] == val:
return mid
elif li[mid][0] > val:
right = mid - 1
else:
left = mid + 1
else:
return None
@cal_time
def twoSum4(self, nums, target):
new_nums = [[num, i] for i, num in enumerate(nums)]
new_nums.sort(key= lambda x:x[0])
for i in range(len(new_nums)):
a = new_nums[i][0]
b = target - a
if b >= a:
# j = self.binary_search(nums[i+1:], b) # 列表切片复杂度O(n)
j = self.binary_search2(new_nums, i + 1, len(new_nums) - 1, b)
else:
j = self.binary_search2(new_nums, 0, i - 1, b)
if j:
break
return sorted([new_nums[i][1], new_nums[j][1]])
nums = [1,2,4,5]
target = 3
print(Solution().twoSum1(nums, target))
print(Solution().twoSum2(nums, target))
print(Solution().twoSum3(nums, target))
print(Solution().twoSum4(nums, target))
|
[
"[email protected] "
] | |
4fd9c70db157736bfaf3aab4bd859f51b90b8f41
|
82aee3211216f55392d5a757eb57f02c859e9a28
|
/Easy/599_minimumIndexSumOfTwoLists.py
|
d92b6e8b5c30aa160a4ed09faac635a69b9d9ca6
|
[] |
no_license
|
Yucheng7713/CodingPracticeByYuch
|
505d18095d4b9a35c1f3b23632a90a76d811b64a
|
1461b10b8910fa90a311939c6df9082a8526f9b1
|
refs/heads/master
| 2022-05-01T11:51:00.612603 | 2022-04-18T09:46:55 | 2022-04-18T09:46:55 | 198,961,132 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 605 |
py
|
class Solution:
def findRestaurant(self, list1: List[str], list2: List[str]) -> List[str]:
r_set = set(list1 + list2)
map_1 = {res : i for i, res in enumerate(list1)}
map_2 = {res : i for i, res in enumerate(list2)}
common_res = []
min_sum = float('inf')
for r in r_set:
if r in map_1 and r in map_2:
k = map_1[r] + map_2[r]
if min_sum > k:
common_res = [r]
min_sum = k
elif min_sum == k:
common_resI += [r]
return common_res
|
[
"[email protected]"
] | |
b4d37ac1239c80ecc49ed220ce9f6f19b293de14
|
3ac01f05ef7c579c84237dec6fb06d7be141e998
|
/功能说明文档/提取文本.py
|
eef2dbaea6b9c8983c213223f86a1d4861cdcd05
|
[] |
no_license
|
xiaohaiz1/dxkStickIDE
|
8027d4962f23ccf48c5fa3c5448ff6cd28d29979
|
27cbdc9ae4bacc2361e53ebc8c05026f9d58719f
|
refs/heads/master
| 2021-03-27T00:07:48.456668 | 2019-11-17T03:46:57 | 2019-11-17T03:46:57 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,150 |
py
|
import os, sys, re
from os import path
func_name = re.compile(r'(?<=def )([A-Za-z0-9_]+)\((.*)\)')
code_dir = '../plugins_setup/microbit_dxk/dxk_ext'
output = open('Document_raw.md', 'w', encoding='utf-8')
fprint = lambda *args, **kwargs: print(*args, **kwargs, file=output)
fprint('# dxkStick 开发者文档')
for filename in os.listdir(code_dir):
if not filename.endswith('.py'):
continue
fprint(f'## 模块:{path.splitext(filename)[0]}')
with open(path.join(code_dir, filename), encoding='utf-8') as file:
for name, args in func_name.findall(file.read()):
fprint(f"1. ### {name}({args.replace(',',', ')})")
if args:
fprint(f' #### 参数:')
args = args.split(',')
for arg in args:
if '=' in arg:
a, b = arg.split('=')
fprint(f' - {a}(默认为{b}): ')
else:
fprint(f' - {arg}: ')
else:
fprint(f' #### 参数: 无')
fprint(f' #### 返回值: ')
fprint('---')
output.close()
|
[
"[email protected]"
] | |
8449b0ad34f7cd388b0f486ff3029c4e45215abc
|
d12c1a96aa84c6fc24d4670bb8a258521088cee3
|
/art_gallery/gallery/migrations/0012_remove_gallery_g_id.py
|
270601a74289d7b6ba8a3de57dbdd10be3ec2b2a
|
[] |
no_license
|
MrunalKotkar/Art-Gallery
|
b6f1301f236d03af77ee4b09af069d00a3fb1b64
|
91936a076d34515c7efb858c8f5891b9a4d35689
|
refs/heads/main
| 2023-01-07T12:57:54.223138 | 2020-11-06T16:45:18 | 2020-11-06T16:45:18 | 309,106,380 | 2 | 3 | null | 2020-11-06T15:57:31 | 2020-11-01T13:51:23 |
HTML
|
UTF-8
|
Python
| false | false | 316 |
py
|
# Generated by Django 3.1.2 on 2020-10-27 11:54
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('gallery', '0011_profile'),
]
operations = [
migrations.RemoveField(
model_name='gallery',
name='g_id',
),
]
|
[
"[email protected]"
] | |
34aee97e06e7bc1273527c6d8c5021b48c57f2a5
|
a30b3e3d6d9dd71f3df1e284ddf64687b8bb672a
|
/Python Data Structure/Assignment/assignment5.py
|
d055e0670f996a2223eb32a8bf2757f1111596c2
|
[] |
no_license
|
shahbazkhan22/Python_for_Everybody_Specialization
|
23d5b15e9688471fc31a61d8c18e08f9cf4f0028
|
0d098ccaab64eae484416fed4105aea727393d5e
|
refs/heads/master
| 2020-04-29T18:16:48.702294 | 2019-04-14T07:30:50 | 2019-04-14T07:30:50 | 176,318,793 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 297 |
py
|
file = open('mbox-short.txt')
count = 0
lst = []
for line in file:
if line.startswith('From') and not line.startswith('From:'):
#lst = line.split()
print(line.rstrip().split()[1])
count = count+1
print('There were',count,'lines in the file with From as the first word')
|
[
"[email protected]"
] | |
514c6c1c53e0f7b71bdda18ad194f84ac9781aa8
|
968ec31230d56db163f07f46f4aea82225bd03ee
|
/stubhub.py
|
1350e96fe6d240a1adc6d8f4eefda851bc56ad6d
|
[] |
no_license
|
jboenawan/Facebook_Chat
|
3c0c9371d0b46726997de0fc0054ad93b3360785
|
eb966001c7d1eded9106222187126aaf9bf4c6ec
|
refs/heads/master
| 2021-08-23T23:29:25.166366 | 2017-12-07T02:48:37 | 2017-12-07T02:48:37 | 113,389,937 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,996 |
py
|
# application token: b0c594ce-9b1d-3796-8dbe-f4b55d7600f6
# iPwnVBEYbRgOswSyn2kTqZq0qcsa:HM3QaC1nnoRbdqJM91UD70gSvnMa
# aVB3blZCRVliUmdPc3dTeW4ya1RxWnEwcWNzYTpITTNRYUMxbm5vUmJkcUpNOTFVRDcwZ1N2bk1hDQo=
# Content-Type: application/x-www-form-urlencoded
# Authorization: Basic basicAuthorizationToken
import requests
import base64
import json
import pprint
import pandas as pd
import datetime
## Enter user's API key, secret, and Stubhub login
app_token = 'b0c594ce-9b1d-3796-8dbe-f4b55d7600f6'
consumer_key = 'iPwnVBEYbRgOswSyn2kTqZq0qcsa'
consumer_secret = 'HM3QaC1nnoRbdqJM91UD70gSvnMa'
stubhub_username = '[email protected]'
stubhub_password = 'September13!'
combo = consumer_key + ':' + consumer_secret
basic_authorization_token = base64.b64encode(bytes(combo, 'utf-8'))
print(basic_authorization_token)
headers = {
'Content-Type':'application/x-www-form-urlencoded',
'Authorization':'Basic '+basic_authorization_token.decode('utf-8'),}
body = {
'grant_type':'password',
'username':stubhub_username,
'password':stubhub_password,
'scope':'PRODUCTION'}
url = 'https://api.stubhub.com/login'
r = requests.post(url, headers=headers, data=body)
token_response = r.json()
access_token = token_response['access_token']
user_GUID = r.headers['X-StubHub-User-GUID']
print(r)
print(r.text)
print(token_response)
# inventory_url = 'https://api.stubhub.com/search/inventory/v2'
# eventid = '9670859'
# data = {'eventid':eventid, 'rows':200}
headers['Authorization'] = 'Bearer ' + access_token
headers['Accept'] = 'application/json'
headers['Accept-Encoding'] = 'application/json'
# inventory = requests.get(inventory_url, headers=headers, params=data)
# info_url = 'https://api.stubhub.com/catalog/events/v2/' + eventid
city = 'New York'
Testing_URL = "https://api.stubhub.com/search/catalog/events/v3?city={city_name}".format(city_name= city)
info = requests.get(Testing_URL, headers=headers)
pprint.pprint(info.json())
# print(info)
|
[
"[email protected]"
] | |
5f06821bd866c81f3ca62e6f14480248302bfc93
|
f53b37e6454ae2f96ae0608c39ff8395674b808f
|
/settings.py
|
e9e146d84c7c6c40a9d2bb27c732aab7ab72dccd
|
[] |
no_license
|
dhanushraj2508/products
|
346f73317f5ee61ad0f0464ef3f25d7663a4ff27
|
57d7d242ef0f269ea0cf0269781988c73d44c4e7
|
refs/heads/master
| 2022-12-25T20:45:13.571309 | 2020-10-06T08:43:00 | 2020-10-06T08:43:00 | 301,618,655 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,275 |
py
|
"""
Django settings for productdetails project.
Generated by 'django-admin startproject' using Django 3.1.1.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '*fu!^6kof7gp-%p%+$1bl4flk8c$8f#j0w81b29@6!n128m%45'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'productdetails'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'productdetails.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR,'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'productdetails.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
|
[
"[email protected]"
] | |
7b9873841c9133e434fa69c2bdf27cf2707abbce
|
b9d4e2cee9cd6da7f07a63d2b9b426614cbb5346
|
/lambda_function.py
|
fb2c2809897ab79eca8116d17e19f0dd74416880
|
[] |
no_license
|
jscaria/meetup-auto-rsvp
|
caaf422a88314e07e2ce9090100493395417b551
|
e1b84f827277a02e70f4a8300344ac74d9d127d5
|
refs/heads/master
| 2020-03-29T12:35:01.974103 | 2019-09-01T19:52:03 | 2019-09-01T19:52:03 | 149,907,091 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,389 |
py
|
import json
from botocore.vendored import requests
import logging
import os
import boto3
import datetime
import pytz
logger = logging.getLogger()
logger.setLevel(logging.INFO)
ses = boto3.client('ses')
def lambda_handler(event, context):
events_url = "https://api.meetup.com/{0!s}/events?sign=true&photo-host=public&page=10".format(os.environ['GROUP_NAME'])
rsvp_url = "https://api.meetup.com/2/rsvp/"
headers = {
"Authorization": "Bearer {0!s}".format(os.environ["OAUTH_ACCESS_TOKEN"])
}
events_request = requests.get(events_url, headers=headers)
events_response = events_request.json()
logger.debug("events_response: " + json.dumps(events_response, indent=4))
'''
[
{
"local_time": "11:00",
"local_date": "2018-09-09",
"link": "https://www.meetup.com/<group_name>/events/<event_id>/",
"visibility": "public_limited",
"group": {
"created": 1373082291000,
"name": "<group_nane>",
"id": 0,
"join_mode": "approval",
"lat": 37.31,
"lon": -122,
"urlname": "<group_url>",
"who": "<people>",
"localized_location": "<location>",
"region": "en_US",
"timezone": "US/Pacific"
},
"waitlist_count": 0,
"yes_rsvp_count": 23,
"duration": 7200000,
"time": 1536516000000,
"utc_offset": -25200000,
"name": "<name>",
"id": "<id>"
}
]
'''
rsvp = "YES"
responses = []
successful_rsvp = False
for entry in events_response:
event_id = entry["id"]
logger.debug("event: event_id={0!s}&rsvp={1!s}".format(event_id, rsvp))
data = {
"event_id": event_id,
"rsvp": rsvp
}
rsvp_request = requests.post(rsvp_url, headers=headers, data=data)
rsvp_response = rsvp_request.json()
local_response = {
"statusCode": rsvp_request.status_code,
"body": rsvp_response
}
responses.append(local_response)
if "code" not in rsvp_response or rsvp_response["code"] != "event_past":
subject = 'Lambda function - ' + str(rsvp_request.status_code)
data = json.dumps(rsvp_response, indent=4)
if rsvp_request.status_code == 201:
ts = int(rsvp_response["event"]["time"]/1000)
tz = pytz.timezone("US/Pacific")
event_time = datetime.datetime.utcfromtimestamp(ts).replace(tzinfo=pytz.utc) # in UTC
event_time_in_local = event_time.astimezone(tz) # converted to US/Pacific
event_time_string = event_time_in_local.strftime('%m-%d-%Y @ %H:%M')
subject = 'RSVP {0!s} to {1!s} on {2!s}'.format(rsvp_response["response"], rsvp_response["event"]["name"], event_time_string)
response = ses.send_email(
Source = os.environ['EMAIL_FROM'],
Destination={
'ToAddresses': [
os.environ['EMAIL_TO'],
],
},
Message={
'Subject': {
'Data': subject
},
'Body': {
'Text': {
'Data': data
}
}
}
)
if rsvp_request.status_code == 201:
successful_rsvp = True
return responses
return responses
|
[
"[email protected]"
] | |
805b07a4f1730c6ee6d19977351adf8d2591824a
|
21a82223831e5c8a51809a4d384e2de26c42b84e
|
/Solutions/350_Intersection_Of_Two_Array_II.py
|
939ada7741d4713f7c111422eacc9c88f1e45d0d
|
[] |
no_license
|
xtymichael/Leetcode_python
|
91ef24adf174cb8d2a3e99f2784995a7439987a1
|
be47b1e0d22174043a2596eae6f141912573c67f
|
refs/heads/master
| 2021-06-15T23:46:57.335941 | 2017-04-24T05:46:28 | 2017-04-24T05:46:28 | 39,522,606 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 393 |
py
|
class Solution(object):
def intersect(self, nums1, nums2):
"""
:type nums1: List[int]
:type nums2: List[int]
:rtype: List[int]
"""
c1 = collections.Counter(nums1)
c2 = collections.Counter(nums2)
result = []
for num in c1:
if num in c2:
result += [num] * min(c1[num], c2[num])
return result
|
[
"[email protected]"
] | |
f93ba8788473687f41c7a3b09f3253b2ad98a371
|
2662da5c82071f4fa03ee0fa1ce3fd8f4e78096a
|
/Algorithms/Implementation/Lisa's Workbook/solution.py
|
60e15530cb4e33aea6d950351d5453c92f3b55c2
|
[] |
no_license
|
perrinod/hacker-rank-solutions
|
8e64122f15c87932059d7dec5a87bc2f64b23e9e
|
088e94a4d74516c25781be5fd85a50f09b7cbda2
|
refs/heads/master
| 2020-08-02T11:48:48.550299 | 2020-05-29T10:51:33 | 2020-05-29T10:51:33 | 211,340,553 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 737 |
py
|
#https://www.hackerrank.com/challenges/lisa-workbook/problem
#!/bin/python3
import math
import os
import random
import re
import sys
def workbook(n, k, arr):
count, page = 0, 1
for problems in range(0, len(arr)):
for i in range (1, arr[problems] + 1):
if(i == page):
count += 1
if(i % k == 0):
page += 1
if(arr[problems] % k != 0):
page += 1
return count
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
nk = input().split()
n = int(nk[0])
k = int(nk[1])
arr = list(map(int, input().rstrip().split()))
result = workbook(n, k, arr)
fptr.write(str(result) + '\n')
fptr.close()
|
[
"[email protected]"
] | |
6627104198ebf9344162f9f5e1a84e658f4c0a6b
|
7175d25162eaa96c9d5860563b9d134185a3e706
|
/input/topics.py
|
66e5b9d0a8005a2ce62f52067155a7e9925874fd
|
[] |
no_license
|
wimboes/SpeechHub
|
3612b8d9b528162b86d84b2a9d822740eab032cb
|
808f360a804173c1d118b88ab252982133841097
|
refs/heads/master
| 2023-03-04T09:22:52.289028 | 2017-05-26T09:30:21 | 2017-05-26T09:30:21 | 69,444,761 | 1 | 2 | null | 2022-10-22T00:56:11 | 2016-09-28T08:53:34 | null |
UTF-8
|
Python
| false | false | 384 |
py
|
from gensim import models, corpora
lda = models.LdaModel.load('lda_512_10.ds')
dic = corpora.Dictionary.load('dictionary.ds')
#word_to_id = dict()
#for (wordid,word) in dic.iteritems():
# word_to_id[word] = wordid
nb_topics = lda.num_topics
for i in range(nb_topics):
lst = lda.get_topic_terms(i,topn=10)
lal = [dic[tup[0]] for tup in lst]
print('topic ' + str(i))
print(lal)
|
[
"[email protected]"
] | |
be5e0b741dc6ad841c668031edaee115bfe5314e
|
36059411cedfeec7478fd725c43f2120ab5ad38d
|
/boulder_ftp.py
|
a055a456e089b72f19569b633bd3059a4fd66cd2
|
[] |
no_license
|
tytechortz/Arctic-Ice
|
a8345746bdd2d73559941ea71efe06601212a7f1
|
83aac39c00027cca6bd85fd2709fcfe86cf3ef31
|
refs/heads/master
| 2022-07-09T13:38:41.735808 | 2020-01-02T16:22:52 | 2020-01-02T16:22:52 | 167,096,158 | 1 | 0 | null | 2022-06-21T23:10:16 | 2019-01-23T01:35:54 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 541 |
py
|
from ftplib import FTP
import os
import pandas as pd
# Log into ftp site.
ftp = FTP('sidads.colorado.edu')
ftp.login(user='anonymous', passwd='ICE_PSWD')
ftp.login()
# Read file.
ftp.cwd('/pub/DATASETS/NOAA/G02135/north/daily/data/')
ftp.retrbinary('RETR N_seaice_extent_daily_v3.0.csv', open('N_seaice_extent_daily_v3.0.csv', 'wb').write)
ftp.quit()
# Read data.
df = pd.read_csv('N_seaice_extent_daily_v3.0.csv',skiprows=[i for i in range(1,2436)])
# df.columns = []
pd.options.display.float_format = '{:,}'.format
print(df.head())
|
[
"[email protected]"
] | |
44c37a6d6deff849d4ab0e9f88cf61dcde7a8413
|
07697d48b35e964a7d851a26c93508eb8abb1569
|
/xabr/urls.py
|
96462404368d1dead587bd1ad82d903db2fcce87
|
[] |
no_license
|
Solntseva24/NewProjectXabr
|
58decc7961378ed1dca4a7c10744e2d1a0f9740b
|
20bd62c04a8ae2949ec999795ce45d57cefd090e
|
refs/heads/main
| 2023-08-10T18:29:55.137693 | 2021-09-30T21:07:21 | 2021-09-30T21:07:21 | 412,218,218 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 547 |
py
|
from django.contrib import admin
from django.urls import path, include
from django.conf.urls.static import static
from xabr import settings
urlpatterns = [
path('', include('mainapp.urls', namespace='main')),
path('auth/', include('authapp.urls', namespace='auth')),
path('blog/', include('blogapp.urls', namespace='blogapp')),
path('admin/', admin.site.urls),
path('admin/doc/', include('django.contrib.admindocs.urls')),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
[
"[email protected]"
] | |
51834523099e3ca59c889ea40d94cfd03ffdbb05
|
f31fda8014ecadf6af7d4e3392fb917c49e0352a
|
/HeavyIonsAnalysis/JetAnalysis/python/jets/akPu5CaloJetSequence_pp_data_cff.py
|
ee3d9695514ad371c28857cd6d3dce622b1ebe50
|
[] |
no_license
|
jniedzie/lightbylight
|
acea5051f053c49824a49a0b78bac3a2247ee75f
|
f5a4661fcf3fd3c0e9ccd8893a46a238e30c2aa8
|
refs/heads/master
| 2020-03-18T12:24:31.970468 | 2018-02-09T15:50:00 | 2018-02-09T15:50:00 | 134,724,759 | 0 | 1 | null | 2018-05-24T14:11:12 | 2018-05-24T14:11:12 | null |
UTF-8
|
Python
| false | false | 14,202 |
py
|
import FWCore.ParameterSet.Config as cms
from HeavyIonsAnalysis.JetAnalysis.patHeavyIonSequences_cff import patJetGenJetMatch, patJetPartonMatch, patJetCorrFactors, patJets
from HeavyIonsAnalysis.JetAnalysis.inclusiveJetAnalyzer_cff import *
from HeavyIonsAnalysis.JetAnalysis.bTaggers_cff import *
from RecoJets.JetProducers.JetIDParams_cfi import *
from RecoJets.JetProducers.nJettinessAdder_cfi import Njettiness
akPu5Calomatch = patJetGenJetMatch.clone(
src = cms.InputTag("akPu5CaloJets"),
matched = cms.InputTag("ak5GenJets"),
resolveByMatchQuality = cms.bool(False),
maxDeltaR = 0.5
)
akPu5CalomatchGroomed = patJetGenJetMatch.clone(
src = cms.InputTag("ak5GenJets"),
matched = cms.InputTag("ak5GenJets"),
resolveByMatchQuality = cms.bool(False),
maxDeltaR = 0.5
)
akPu5Caloparton = patJetPartonMatch.clone(src = cms.InputTag("akPu5CaloJets")
)
akPu5Calocorr = patJetCorrFactors.clone(
useNPV = cms.bool(False),
useRho = cms.bool(False),
# primaryVertices = cms.InputTag("hiSelectedVertex"),
levels = cms.vstring('L2Relative','L3Absolute'),
src = cms.InputTag("akPu5CaloJets"),
payload = "AKPu5Calo_offline"
)
akPu5CaloJetID= cms.EDProducer('JetIDProducer', JetIDParams, src = cms.InputTag('akPu5CaloJets'))
#akPu5Caloclean = heavyIonCleanedGenJets.clone(src = cms.InputTag('ak5GenJets'))
akPu5CalobTagger = bTaggers("akPu5Calo",0.5)
#create objects locally since they dont load properly otherwise
#akPu5Calomatch = akPu5CalobTagger.match
akPu5Caloparton = patJetPartonMatch.clone(src = cms.InputTag("akPu5CaloJets"), matched = cms.InputTag("genParticles"))
akPu5CaloPatJetFlavourAssociationLegacy = akPu5CalobTagger.PatJetFlavourAssociationLegacy
akPu5CaloPatJetPartons = akPu5CalobTagger.PatJetPartons
akPu5CaloJetTracksAssociatorAtVertex = akPu5CalobTagger.JetTracksAssociatorAtVertex
akPu5CaloJetTracksAssociatorAtVertex.tracks = cms.InputTag("highPurityTracks")
akPu5CaloSimpleSecondaryVertexHighEffBJetTags = akPu5CalobTagger.SimpleSecondaryVertexHighEffBJetTags
akPu5CaloSimpleSecondaryVertexHighPurBJetTags = akPu5CalobTagger.SimpleSecondaryVertexHighPurBJetTags
akPu5CaloCombinedSecondaryVertexBJetTags = akPu5CalobTagger.CombinedSecondaryVertexBJetTags
akPu5CaloCombinedSecondaryVertexV2BJetTags = akPu5CalobTagger.CombinedSecondaryVertexV2BJetTags
akPu5CaloJetBProbabilityBJetTags = akPu5CalobTagger.JetBProbabilityBJetTags
akPu5CaloSoftPFMuonByPtBJetTags = akPu5CalobTagger.SoftPFMuonByPtBJetTags
akPu5CaloSoftPFMuonByIP3dBJetTags = akPu5CalobTagger.SoftPFMuonByIP3dBJetTags
akPu5CaloTrackCountingHighEffBJetTags = akPu5CalobTagger.TrackCountingHighEffBJetTags
akPu5CaloTrackCountingHighPurBJetTags = akPu5CalobTagger.TrackCountingHighPurBJetTags
akPu5CaloPatJetPartonAssociationLegacy = akPu5CalobTagger.PatJetPartonAssociationLegacy
akPu5CaloImpactParameterTagInfos = akPu5CalobTagger.ImpactParameterTagInfos
akPu5CaloImpactParameterTagInfos.primaryVertex = cms.InputTag("offlinePrimaryVertices")
akPu5CaloJetProbabilityBJetTags = akPu5CalobTagger.JetProbabilityBJetTags
akPu5CaloSecondaryVertexTagInfos = akPu5CalobTagger.SecondaryVertexTagInfos
akPu5CaloSimpleSecondaryVertexHighEffBJetTags = akPu5CalobTagger.SimpleSecondaryVertexHighEffBJetTags
akPu5CaloSimpleSecondaryVertexHighPurBJetTags = akPu5CalobTagger.SimpleSecondaryVertexHighPurBJetTags
akPu5CaloCombinedSecondaryVertexBJetTags = akPu5CalobTagger.CombinedSecondaryVertexBJetTags
akPu5CaloCombinedSecondaryVertexV2BJetTags = akPu5CalobTagger.CombinedSecondaryVertexV2BJetTags
akPu5CaloSecondaryVertexNegativeTagInfos = akPu5CalobTagger.SecondaryVertexNegativeTagInfos
akPu5CaloNegativeSimpleSecondaryVertexHighEffBJetTags = akPu5CalobTagger.NegativeSimpleSecondaryVertexHighEffBJetTags
akPu5CaloNegativeSimpleSecondaryVertexHighPurBJetTags = akPu5CalobTagger.NegativeSimpleSecondaryVertexHighPurBJetTags
akPu5CaloNegativeCombinedSecondaryVertexBJetTags = akPu5CalobTagger.NegativeCombinedSecondaryVertexBJetTags
akPu5CaloPositiveCombinedSecondaryVertexBJetTags = akPu5CalobTagger.PositiveCombinedSecondaryVertexBJetTags
akPu5CaloNegativeCombinedSecondaryVertexV2BJetTags = akPu5CalobTagger.NegativeCombinedSecondaryVertexV2BJetTags
akPu5CaloPositiveCombinedSecondaryVertexV2BJetTags = akPu5CalobTagger.PositiveCombinedSecondaryVertexV2BJetTags
akPu5CaloSoftPFMuonsTagInfos = akPu5CalobTagger.SoftPFMuonsTagInfos
akPu5CaloSoftPFMuonsTagInfos.primaryVertex = cms.InputTag("offlinePrimaryVertices")
akPu5CaloSoftPFMuonBJetTags = akPu5CalobTagger.SoftPFMuonBJetTags
akPu5CaloSoftPFMuonByIP3dBJetTags = akPu5CalobTagger.SoftPFMuonByIP3dBJetTags
akPu5CaloSoftPFMuonByPtBJetTags = akPu5CalobTagger.SoftPFMuonByPtBJetTags
akPu5CaloNegativeSoftPFMuonByPtBJetTags = akPu5CalobTagger.NegativeSoftPFMuonByPtBJetTags
akPu5CaloPositiveSoftPFMuonByPtBJetTags = akPu5CalobTagger.PositiveSoftPFMuonByPtBJetTags
akPu5CaloPatJetFlavourIdLegacy = cms.Sequence(akPu5CaloPatJetPartonAssociationLegacy*akPu5CaloPatJetFlavourAssociationLegacy)
#Not working with our PU sub, but keep it here for reference
#akPu5CaloPatJetFlavourAssociation = akPu5CalobTagger.PatJetFlavourAssociation
#akPu5CaloPatJetFlavourId = cms.Sequence(akPu5CaloPatJetPartons*akPu5CaloPatJetFlavourAssociation)
akPu5CaloJetBtaggingIP = cms.Sequence(akPu5CaloImpactParameterTagInfos *
(akPu5CaloTrackCountingHighEffBJetTags +
akPu5CaloTrackCountingHighPurBJetTags +
akPu5CaloJetProbabilityBJetTags +
akPu5CaloJetBProbabilityBJetTags
)
)
akPu5CaloJetBtaggingSV = cms.Sequence(akPu5CaloImpactParameterTagInfos
*
akPu5CaloSecondaryVertexTagInfos
* (akPu5CaloSimpleSecondaryVertexHighEffBJetTags+
akPu5CaloSimpleSecondaryVertexHighPurBJetTags+
akPu5CaloCombinedSecondaryVertexBJetTags+
akPu5CaloCombinedSecondaryVertexV2BJetTags
)
)
akPu5CaloJetBtaggingNegSV = cms.Sequence(akPu5CaloImpactParameterTagInfos
*
akPu5CaloSecondaryVertexNegativeTagInfos
* (akPu5CaloNegativeSimpleSecondaryVertexHighEffBJetTags+
akPu5CaloNegativeSimpleSecondaryVertexHighPurBJetTags+
akPu5CaloNegativeCombinedSecondaryVertexBJetTags+
akPu5CaloPositiveCombinedSecondaryVertexBJetTags+
akPu5CaloNegativeCombinedSecondaryVertexV2BJetTags+
akPu5CaloPositiveCombinedSecondaryVertexV2BJetTags
)
)
akPu5CaloJetBtaggingMu = cms.Sequence(akPu5CaloSoftPFMuonsTagInfos * (akPu5CaloSoftPFMuonBJetTags
+
akPu5CaloSoftPFMuonByIP3dBJetTags
+
akPu5CaloSoftPFMuonByPtBJetTags
+
akPu5CaloNegativeSoftPFMuonByPtBJetTags
+
akPu5CaloPositiveSoftPFMuonByPtBJetTags
)
)
akPu5CaloJetBtagging = cms.Sequence(akPu5CaloJetBtaggingIP
*akPu5CaloJetBtaggingSV
*akPu5CaloJetBtaggingNegSV
# *akPu5CaloJetBtaggingMu
)
akPu5CalopatJetsWithBtagging = patJets.clone(jetSource = cms.InputTag("akPu5CaloJets"),
genJetMatch = cms.InputTag("akPu5Calomatch"),
genPartonMatch = cms.InputTag("akPu5Caloparton"),
jetCorrFactorsSource = cms.VInputTag(cms.InputTag("akPu5Calocorr")),
JetPartonMapSource = cms.InputTag("akPu5CaloPatJetFlavourAssociationLegacy"),
JetFlavourInfoSource = cms.InputTag("akPu5CaloPatJetFlavourAssociation"),
trackAssociationSource = cms.InputTag("akPu5CaloJetTracksAssociatorAtVertex"),
useLegacyJetMCFlavour = True,
discriminatorSources = cms.VInputTag(cms.InputTag("akPu5CaloSimpleSecondaryVertexHighEffBJetTags"),
cms.InputTag("akPu5CaloSimpleSecondaryVertexHighPurBJetTags"),
cms.InputTag("akPu5CaloCombinedSecondaryVertexBJetTags"),
cms.InputTag("akPu5CaloCombinedSecondaryVertexV2BJetTags"),
cms.InputTag("akPu5CaloJetBProbabilityBJetTags"),
cms.InputTag("akPu5CaloJetProbabilityBJetTags"),
#cms.InputTag("akPu5CaloSoftPFMuonByPtBJetTags"),
#cms.InputTag("akPu5CaloSoftPFMuonByIP3dBJetTags"),
cms.InputTag("akPu5CaloTrackCountingHighEffBJetTags"),
cms.InputTag("akPu5CaloTrackCountingHighPurBJetTags"),
),
jetIDMap = cms.InputTag("akPu5CaloJetID"),
addBTagInfo = True,
addTagInfos = True,
addDiscriminators = True,
addAssociatedTracks = True,
addJetCharge = False,
addJetID = False,
getJetMCFlavour = False,
addGenPartonMatch = False,
addGenJetMatch = False,
embedGenJetMatch = False,
embedGenPartonMatch = False,
# embedCaloTowers = False,
# embedPFCandidates = True
)
akPu5CaloNjettiness = Njettiness.clone(
src = cms.InputTag("akPu5CaloJets"),
R0 = cms.double( 0.5)
)
akPu5CalopatJetsWithBtagging.userData.userFloats.src += ['akPu5CaloNjettiness:tau1','akPu5CaloNjettiness:tau2','akPu5CaloNjettiness:tau3']
akPu5CaloJetAnalyzer = inclusiveJetAnalyzer.clone(jetTag = cms.InputTag("akPu5CalopatJetsWithBtagging"),
genjetTag = 'ak5GenJets',
rParam = 0.5,
matchJets = cms.untracked.bool(False),
matchTag = 'patJetsWithBtagging',
pfCandidateLabel = cms.untracked.InputTag('particleFlow'),
trackTag = cms.InputTag("generalTracks"),
fillGenJets = False,
isMC = False,
doSubEvent = False,
useHepMC = cms.untracked.bool(False),
genParticles = cms.untracked.InputTag("genParticles"),
eventInfoTag = cms.InputTag("generator"),
doLifeTimeTagging = cms.untracked.bool(True),
doLifeTimeTaggingExtras = cms.untracked.bool(False),
bTagJetName = cms.untracked.string("akPu5Calo"),
jetName = cms.untracked.string("akPu5Calo"),
genPtMin = cms.untracked.double(5),
hltTrgResults = cms.untracked.string('TriggerResults::'+'HISIGNAL'),
doTower = cms.untracked.bool(False),
doSubJets = cms.untracked.bool(False),
doGenSubJets = cms.untracked.bool(False),
subjetGenTag = cms.untracked.InputTag("ak5GenJets"),
doGenTaus = False
)
akPu5CaloJetSequence_mc = cms.Sequence(
#akPu5Caloclean
#*
akPu5Calomatch
#*
#akPu5CalomatchGroomed
*
akPu5Caloparton
*
akPu5Calocorr
*
#akPu5CaloJetID
#*
akPu5CaloPatJetFlavourIdLegacy
#*
#akPu5CaloPatJetFlavourId # Use legacy algo till PU implemented
*
akPu5CaloJetTracksAssociatorAtVertex
*
akPu5CaloJetBtagging
*
akPu5CaloNjettiness
*
akPu5CalopatJetsWithBtagging
*
akPu5CaloJetAnalyzer
)
akPu5CaloJetSequence_data = cms.Sequence(akPu5Calocorr
*
#akPu5CaloJetID
#*
akPu5CaloJetTracksAssociatorAtVertex
*
akPu5CaloJetBtagging
*
akPu5CaloNjettiness
*
akPu5CalopatJetsWithBtagging
*
akPu5CaloJetAnalyzer
)
akPu5CaloJetSequence_jec = cms.Sequence(akPu5CaloJetSequence_mc)
akPu5CaloJetSequence_mb = cms.Sequence(akPu5CaloJetSequence_mc)
akPu5CaloJetSequence = cms.Sequence(akPu5CaloJetSequence_data)
|
[
"[email protected]"
] | |
d0f6ae52cfa302a69d163e0abc0b1bd64badc931
|
ca3da680541003e604947f0f454e11846b0841da
|
/IEA_Policy_Scrapper_Multi.py
|
254d52681db3cbb2d5172429f0dc15c3f6551ecf
|
[] |
no_license
|
Mejinha/IEAPolicyScrapper
|
06b9c469261d9f849260007e604f40c380d070e6
|
7e1cc10dcb468cdc3afb586ab651321914e76a69
|
refs/heads/main
| 2023-05-05T12:12:44.452081 | 2021-05-28T13:47:25 | 2021-05-28T13:47:25 | 371,115,638 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,041 |
py
|
# -*- coding: utf-8 -*-
"""
Created on Tue May 25 15:49:06 2021
@author: amejd
"""
import requests
from bs4 import BeautifulSoup
import pandas as pd
from multiprocessing import Pool
import numpy as np
def GetData(page):
'''Access main page and search data in each row'''
url = 'https://www.iea.org/policies'
page_data = []
raw = requests.get(f'{url}?page={page}')
soup = BeautifulSoup(raw.content, 'html.parser')
rows = soup.findAll(class_ = 'm-policy-listing-item__row')
try:
for row in rows:
page_data.append(Scrape(row))
return page_data
except:
pass
def Scrape(row):
'''Access each policy and gather useful information'''
temp_dict = {
'Country': row.findAll(class_ = 'm-policy-listing-item__col m-policy-listing-item__col--country')[0]['data-sortable-value'],
'Year': row.findAll(class_ = 'm-policy-listing-item__col m-policy-listing-item__col--year')[0]['data-sortable-value'],
'Policy': row.findAll(class_ = 'm-policy-listing-item__link')[0].text.strip()
}
policy_url = 'https://www.iea.org{}'.format(row.findAll(class_ = 'm-policy-listing-item__link')[0]['href'])
raw_internal = requests.get(policy_url)
soup_internal = BeautifulSoup(raw_internal.text, 'html.parser')
categories = soup_internal.findAll('div', {'class': 'o-policy-content__list'})
for category in categories:
catname = category.find(class_ = 'o-policy-content-list__title').text
temp_dict[catname] = [item.text for item in category.findAll(class_ = 'a-tag__label')]
return temp_dict
if __name__ == '__main__':
pages = range(1, 186)
records = []
# Process the function GetData in batches of 10 parallel functions
with Pool(10) as p:
records = p.map(GetData, pages) # Execute GetData
p.close() # Closes the application to avoid it from keep running as a background task
p.join() # Wait for tasks before start a new batch
# Organize data
df = pd.concat([pd.DataFrame(i) for i in records])
df = df.reset_index()
# Get Unique values for each column
Topics = []
Policies = []
Sectors = []
Technologies = []
for row in df.iterrows():
try:
for topic in row[1]['Topics']:
Topics.append(topic)
except:
pass
try:
for policy in row[1]['Policy types']:
Policies.append(policy)
except:
pass
try:
for sector in row[1]['Sectors']:
Sectors.append(sector)
except:
pass
try:
for techs in row[1]['Technologies']:
Technologies.append(techs)
except:
pass
Topics = np.unique(Topics)
Policies = np.unique(Policies)
Sectors = np.unique(Sectors)
Technologies = np.unique(Technologies)
# Convert each value into unique boolean columns
# 1 Check if the variable is a list, otherwise return False
# 2 Check if the list contains the key, if so, return True
for key in Topics:
df['Topic_'+key] = df['Topics'].apply(lambda x: key in x if type(x) is list else False)
for key in Policies:
df['Policy_'+key] = df['Policy types'].apply(lambda x: key in x if type(x) is list else False)
for key in Sectors:
df['Sector_'+key] = df['Sectors'].apply(lambda x: key in x if type(x) is list else False)
for key in Technologies:
df['Tech_'+key] = df['Technologies'].apply(lambda x: key in x if type(x) is list else False)
# Export to excel
df.to_excel('IEA_Policies.xlsx')
|
[
"[email protected]"
] | |
0d8675623ee25e5eed9509e8c2626383d0468b6c
|
461670493b15f5e9e471ddcc76732261cf948b37
|
/challenge_189/challenge.py
|
7a6537928e7136b6d38472ac3259c8f68390cc46
|
[] |
no_license
|
narenaryan/CodeEval-Solutions
|
d3d5f6bbecf74642d83b1e33c9e41f8ac2518924
|
bd63c78b5fac28439beefd747e272383cd839a89
|
refs/heads/master
| 2021-01-12T06:36:54.665037 | 2016-12-31T16:54:07 | 2016-12-31T16:54:07 | 77,394,649 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 711 |
py
|
import sys
with open(sys.argv[1], 'r') as test_cases:
for test in test_cases:
if test.strip() != '':
# Format the input
inputList = test.split(" ")
noOfFriends = int(inputList[0])
friendHouses = [int(i) for i in inputList[1:]]
# This list stores distances between Alice home and friend's
minDistanceFromFriendHouses = []
for shiftingHouseDistance in friendHouses:
tempDistances = [abs((friendHouseDistance - shiftingHouseDistance)) for friendHouseDistance in friendHouses]
minDistanceFromFriendHouses.append(sum(tempDistances))
print min(minDistanceFromFriendHouses)
|
[
"[email protected]"
] | |
ffcfc74ce4c1bbce06d1f958565e8f9d7d31fafe
|
abebd304a9bc3ff1b90db09eba66c003f51a74d5
|
/sug/preprocess/main/dump_main_db.py
|
52cee76fa23f547cb7454b3abffa9857895b8aa6
|
[] |
no_license
|
jiakechong1991/search
|
817715c58a1b117d177a7b49f443cb2411ee3c6f
|
86c644e9d26f2eba25d4cf50821ffcc8e14e7953
|
refs/heads/master
| 2021-04-15T17:42:01.074581 | 2018-04-10T14:04:17 | 2018-04-10T14:04:17 | 126,668,534 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,800 |
py
|
# coding: utf8
import argparse
import codecs
import copy
import json
import logging
import re
from search_service.indexing import Searchable
from config.conf import LOGGING_FORMAT, MAX_WORD_LEN
from utils.connection import with_mysql_cursor
class WordGetter():
def __init__(self):
pass
@with_mysql_cursor('edxapp')
def get_course_ids(self, cursor):
sql = 'select course_id from course_meta_course where status >= 0'
cursor.execute(sql)
course_ids = [x['course_id'] for x in cursor.fetchall()]
return course_ids
def get_clean_text(self, text):
special_chars = ['\t', '\n', u'\u2028', u'\u0085']
for c in special_chars:
text = text.replace(c, '')
return text
def dump_file(self, file_ot):
course_ids = self.get_course_ids()
course_num = len(course_ids)
wf = codecs.open(file_ot, 'w', encoding='utf8')
search = Searchable()
for no, course_id in enumerate(course_ids):
try:
course_info = search.get_course(course_id.encode('utf8'))
except Exception, e:
logging.error('%s: %s', course_id, e)
continue
if not course_info:
continue
if no % 100 == 0:
logging.info('finished: %s/%s', no, course_num)
base_info = {
'course_id': course_id,
}
# course_name
row = copy.deepcopy(base_info)
row.update({
'category': 'course_name',
'value': course_info.course_name
})
wf.write(json.dumps(row, sort_keys=True, ensure_ascii=False) + '\n')
# course_about
row = copy.deepcopy(base_info)
row.update({
'category': 'course_about',
'value': self.get_clean_text(course_info.get_about_searchable_text())
})
wf.write(json.dumps(row, sort_keys=True, ensure_ascii=False) + '\n')
# children
for child in course_info.children:
if child.searchable_text:
row = copy.deepcopy(base_info)
row.update({
'category': child.category,
'value': self.get_clean_text(child.searchable_text)
})
wf.write(json.dumps(row, sort_keys=True, ensure_ascii=False) + '\n')
wf.close()
if __name__ == '__main__':
logging.basicConfig(format=LOGGING_FORMAT, level=logging.INFO)
parser = argparse.ArgumentParser()
parser.add_argument('--file_ot', required=True)
args = parser.parse_args()
obj = WordGetter()
obj.dump_file(args.file_ot)
|
[
"[email protected]"
] | |
3d8e6882276ef181b422c8f15e7f3ec495bb3668
|
5e7c35e35daf74ebea6a3efbf72eef8597e8a8f1
|
/models/__init__.py
|
9664c39d9c0166bd5db5e259a48b76b8c5d94b01
|
[] |
no_license
|
softhuafei/document_summarization
|
784bd8909d902caab4d1d6da82d146cd5e6b43cb
|
642fa8de5b0200c6270881c7cd75ca9d679b098b
|
refs/heads/master
| 2020-07-29T18:25:40.378238 | 2019-09-23T03:01:30 | 2019-09-23T03:01:30 | 209,916,993 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 31 |
py
|
# from models.seq2seq import *
|
[
"[email protected]"
] | |
6f9fd7fd9d9ed579db5194cea206a47fd8a0b308
|
9e5f71cec02ae4cb58a58b6fc33b75b5e2555022
|
/GO4StructuralPatterns/BridgePattern/MessageSenderBase.py
|
7b5325621725584d73454a674e6752607ae4f17a
|
[] |
no_license
|
sumeshsg/GO4DesignPattern
|
a764335412e22be9d945e321e67c1b9712bf71a2
|
c2d3625ae03aeb0816191a148d9db24e24b78c76
|
refs/heads/master
| 2022-09-07T03:18:17.217719 | 2020-05-29T06:40:24 | 2020-05-29T06:40:24 | 250,414,539 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 155 |
py
|
from abc import abstractmethod
class MessageSenderBase(object):
@abstractmethod
def send_message(self, title, details, importance):
pass
|
[
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.