blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
283
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
41
| license_type
stringclasses 2
values | repo_name
stringlengths 7
96
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 58
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 12.7k
662M
⌀ | star_events_count
int64 0
35.5k
| fork_events_count
int64 0
20.6k
| gha_license_id
stringclasses 11
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 43
values | src_encoding
stringclasses 9
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
5.88M
| extension
stringclasses 30
values | content
stringlengths 7
5.88M
| authors
sequencelengths 1
1
| author
stringlengths 0
73
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
cb5806652dd6da026afc979dedd514931ea72e0d | 4dda601cb02b404bc0ae25f984825641ddb135fe | /scuole/campuses/migrations/0018_auto_20230324_1538.py | dfd660bed1b58acca9e972a52309b108928de740 | [
"MIT"
] | permissive | texastribune/scuole | d89e49d6bf42d6476a8b2e5a4ebe6380c28e9f60 | 155444e313313ba484d98d73d94d34e9b8f57fbe | refs/heads/master | 2023-06-28T02:52:40.037200 | 2023-05-22T21:51:15 | 2023-05-22T21:51:15 | 35,112,798 | 1 | 0 | MIT | 2023-06-12T20:04:49 | 2015-05-05T17:03:23 | Python | UTF-8 | Python | false | false | 6,271 | py | # Generated by Django 3.1.12 on 2023-03-24 15:38
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('campuses', '0017_auto_20230324_1520'),
]
operations = [
migrations.AlterField(
model_name='campusstats',
name='accountability_rating',
field=models.CharField(blank=True, choices=[('M', 'Met standard'), ('A', 'Met alternative standard'), ('T', 'Met alternative standard'), ('I', 'Improvement required'), ('X', 'Not rated'), ('Z', 'Not rated'), ('Q', 'Not rated (data integrity issue)'), ('T', 'Not rated (Annexed)'), ('H', 'Not rated (Harvey provision)'), ('P', 'Not rated (Paired campus)'), ('DD', 'Not Rated: Declared State of Disaster'), ('R', 'Not Rated: Data Under Review'), ('SB', 'Not Rated: Senate Bill 1365'), ('SB', 'Not Rated: SB 1365'), ('', None)], default='', max_length=2, verbose_name='Accountability rating from the latest year'),
),
migrations.AlterField(
model_name='campusstats',
name='accountability_rating_18_19',
field=models.CharField(blank=True, choices=[('M', 'Met standard'), ('A', 'Met alternative standard'), ('T', 'Met alternative standard'), ('I', 'Improvement required'), ('X', 'Not rated'), ('Z', 'Not rated'), ('Q', 'Not rated (data integrity issue)'), ('T', 'Not rated (Annexed)'), ('H', 'Not rated (Harvey provision)'), ('P', 'Not rated (Paired campus)'), ('DD', 'Not Rated: Declared State of Disaster'), ('R', 'Not Rated: Data Under Review'), ('SB', 'Not Rated: Senate Bill 1365'), ('SB', 'Not Rated: SB 1365'), ('', None)], default='', max_length=2, null=True, verbose_name='Accountability rating from 2018-19'),
),
migrations.AlterField(
model_name='campusstats',
name='closing_the_gaps_rating',
field=models.CharField(blank=True, choices=[('M', 'Met standard'), ('A', 'Met alternative standard'), ('T', 'Met alternative standard'), ('I', 'Improvement required'), ('X', 'Not rated'), ('Z', 'Not rated'), ('Q', 'Not rated (data integrity issue)'), ('T', 'Not rated (Annexed)'), ('H', 'Not rated (Harvey provision)'), ('P', 'Not rated (Paired campus)'), ('DD', 'Not Rated: Declared State of Disaster'), ('R', 'Not Rated: Data Under Review'), ('SB', 'Not Rated: Senate Bill 1365'), ('SB', 'Not Rated: SB 1365'), ('', None)], default='', max_length=2, verbose_name='Closing the gaps rating from the latest year'),
),
migrations.AlterField(
model_name='campusstats',
name='closing_the_gaps_rating_18_19',
field=models.CharField(blank=True, choices=[('M', 'Met standard'), ('A', 'Met alternative standard'), ('T', 'Met alternative standard'), ('I', 'Improvement required'), ('X', 'Not rated'), ('Z', 'Not rated'), ('Q', 'Not rated (data integrity issue)'), ('T', 'Not rated (Annexed)'), ('H', 'Not rated (Harvey provision)'), ('P', 'Not rated (Paired campus)'), ('DD', 'Not Rated: Declared State of Disaster'), ('R', 'Not Rated: Data Under Review'), ('SB', 'Not Rated: Senate Bill 1365'), ('SB', 'Not Rated: SB 1365'), ('', None)], default='', max_length=2, null=True, verbose_name='Closing the gaps rating from 2018-19'),
),
migrations.AlterField(
model_name='campusstats',
name='school_progress_rating',
field=models.CharField(blank=True, choices=[('M', 'Met standard'), ('A', 'Met alternative standard'), ('T', 'Met alternative standard'), ('I', 'Improvement required'), ('X', 'Not rated'), ('Z', 'Not rated'), ('Q', 'Not rated (data integrity issue)'), ('T', 'Not rated (Annexed)'), ('H', 'Not rated (Harvey provision)'), ('P', 'Not rated (Paired campus)'), ('DD', 'Not Rated: Declared State of Disaster'), ('R', 'Not Rated: Data Under Review'), ('SB', 'Not Rated: Senate Bill 1365'), ('SB', 'Not Rated: SB 1365'), ('', None)], default='', max_length=2, verbose_name='School progress rating from the latest year'),
),
migrations.AlterField(
model_name='campusstats',
name='school_progress_rating_18_19',
field=models.CharField(blank=True, choices=[('M', 'Met standard'), ('A', 'Met alternative standard'), ('T', 'Met alternative standard'), ('I', 'Improvement required'), ('X', 'Not rated'), ('Z', 'Not rated'), ('Q', 'Not rated (data integrity issue)'), ('T', 'Not rated (Annexed)'), ('H', 'Not rated (Harvey provision)'), ('P', 'Not rated (Paired campus)'), ('DD', 'Not Rated: Declared State of Disaster'), ('R', 'Not Rated: Data Under Review'), ('SB', 'Not Rated: Senate Bill 1365'), ('SB', 'Not Rated: SB 1365'), ('', None)], default='', max_length=2, null=True, verbose_name='School progress rating from 2018-19'),
),
migrations.AlterField(
model_name='campusstats',
name='student_achievement_rating',
field=models.CharField(blank=True, choices=[('M', 'Met standard'), ('A', 'Met alternative standard'), ('T', 'Met alternative standard'), ('I', 'Improvement required'), ('X', 'Not rated'), ('Z', 'Not rated'), ('Q', 'Not rated (data integrity issue)'), ('T', 'Not rated (Annexed)'), ('H', 'Not rated (Harvey provision)'), ('P', 'Not rated (Paired campus)'), ('DD', 'Not Rated: Declared State of Disaster'), ('R', 'Not Rated: Data Under Review'), ('SB', 'Not Rated: Senate Bill 1365'), ('SB', 'Not Rated: SB 1365'), ('', None)], default='', max_length=2, verbose_name='Student achievement rating from the latest year'),
),
migrations.AlterField(
model_name='campusstats',
name='student_achievement_rating_18_19',
field=models.CharField(blank=True, choices=[('M', 'Met standard'), ('A', 'Met alternative standard'), ('T', 'Met alternative standard'), ('I', 'Improvement required'), ('X', 'Not rated'), ('Z', 'Not rated'), ('Q', 'Not rated (data integrity issue)'), ('T', 'Not rated (Annexed)'), ('H', 'Not rated (Harvey provision)'), ('P', 'Not rated (Paired campus)'), ('DD', 'Not Rated: Declared State of Disaster'), ('R', 'Not Rated: Data Under Review'), ('SB', 'Not Rated: Senate Bill 1365'), ('SB', 'Not Rated: SB 1365'), ('', None)], default='', max_length=2, null=True, verbose_name='Student achievement rating from 2018-19'),
),
]
| [
"[email protected]"
] | |
7b0ff48f0a0173f17521f7f8579ec29be80ff5ba | 2e92e5e73422c381cde2926f77cc2ce73e2edffa | /s3_file_check.py | ddd0b8e7f7ca0d01365488356535f0e59ddcecce | [] | no_license | teng1/python-s3-check | 4492b9e9e7298c635831423e778012729c7a66aa | c6b297986274df29eb4e7e5af5360e4acb7b6576 | refs/heads/master | 2020-05-30T17:56:13.686560 | 2019-06-02T19:28:04 | 2019-06-02T19:28:04 | 189,886,354 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,624 | py | import boto3
import re
import datetime
from operator import itemgetter
s3 = boto3.client('s3')
client = boto3.client('s3')
s3_bucket = 'mybucket'
s3_prefix = 'myprefix'
def s3_list_objects(bucket, prefix):
"""Gets a python list of dictionaries of all S3 object properties matching the bucket and prefix."""
partial_list = s3.list_objects_v2(Bucket=bucket, Prefix=prefix)
obj_list = partial_list['Contents']
while partial_list['IsTruncated']:
next_token = partial_list['NextContinuationToken']
partial_list = s3.list_objects_v2(Bucket=s3_bucket, Prefix=s3_prefix, ContinuationToken=next_token)
obj_list.extend(partial_list['Contents'])
return obj_list
def parse_s3_response():
"""Processes the S3 response to a sorted list of object keys only."""
response = s3_list_objects(s3_bucket, s3_prefix)
# response is sorted by name by default, the following sort is redundant
sorted_list = sorted(response, key=itemgetter('Key'), reverse=False)
keys = [d['Key'] for d in sorted_list]
return keys
def find_missing_sequences(ids):
"""Checks for gaps in sequence by parsing a serial number."""
sequence = 1
for i in (ids):
file_seq_number = int((re.findall('\d\d\d\d.zip', i)[0]).split('.')[0])
if file_seq_number == sequence:
print('Sequence check pass: expected:','%04d' % sequence,'Next in sequence is:',i)
sequence += 1
else:
print('Sequence check failed: expected:','%04d' % sequence,'Next file is:',i)
sequence += 2
find_missing_sequences(parse_s3_response()) | [
"[email protected]"
] | |
a5328528846152c1bd69b79fa4f5920c31fd2652 | 3bcad3ca2f3f3b3a93041a844cdfc60598181c91 | /Part3/utilities/losses.py | 4166bc7f5e40e20543145d00cefa1ba13fc20ed1 | [] | no_license | DMarshallDeveloper/Machine-Learning | 474e6337afde2a27bcf88142685c5597351474a8 | 6ded473d7f391f5098d2d3ea75be58b74b617663 | refs/heads/main | 2023-04-30T18:20:11.397336 | 2021-05-23T08:59:47 | 2021-05-23T08:59:47 | 370,002,650 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 858 | py | # -*- coding: utf-8 -*-
"""
A function used to compute for the loss
"""
import numpy as np
def compute_loss(y, x, theta, metric_type):
"""
Compute the loss of given data with respect to the ground truth
y ground truth
x input data (feature matrix)
theta model parameters (w and b)
metric_type metric type seletor, e.g., "MSE" indicates the Mean Squared Error.
"""
if metric_type.upper() == "MSE":
return np.mean(np.power(x.dot(theta) - y, 2))
elif metric_type.upper() == "RMSE":
return np.sqrt(np.mean(np.power(x.dot(theta) - y, 2)))
elif metric_type.upper() == "R2":
return - (1 - np.mean(np.power(x.dot(theta) - y, 2)) / np.mean(np.power(y - np.mean(y), 2)))
elif metric_type.upper() == "MAE":
return np.mean(np.abs(y - x.dot(theta)))
| [
"[email protected]"
] | |
b0df4eace34d430063c111b44bb84d0c2cacf364 | e6ee1044aff06f5f1826543a03da811bb007c1d1 | /qa/rpc-tests/test_framework/util.py | 6bb66b61fe0a6eb693c78caa35f89c2b761eddca | [
"MIT"
] | permissive | ssghost/litecoinz | b7ca54aba47bb1f6f6980ab3f831c97b19aca3b6 | eef13e8349dc83528dda4a00d0013e240a1f53db | refs/heads/master | 2020-05-25T02:15:51.902536 | 2019-04-13T14:09:23 | 2019-04-13T14:09:23 | 187,572,391 | 1 | 0 | null | 2019-05-20T05:20:16 | 2019-05-20T05:20:16 | null | UTF-8 | Python | false | false | 15,931 | py | # Copyright (c) 2014 The Bitcoin Core developers
# Copyright (c) 2017-2018 The LitecoinZ developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Helpful routines for regression testing
#
# Add python-bitcoinrpc to module search path:
import os
import sys
from binascii import hexlify, unhexlify
from base64 import b64encode
from decimal import Decimal, ROUND_DOWN
import json
import random
import shutil
import subprocess
import time
import re
from authproxy import AuthServiceProxy
def p2p_port(n):
return 11000 + n + os.getpid()%999
def rpc_port(n):
return 12000 + n + os.getpid()%999
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def bytes_to_hex_str(byte_str):
return hexlify(byte_str).decode('ascii')
def hex_str_to_bytes(hex_str):
return unhexlify(hex_str.encode('ascii'))
def str_to_b64str(string):
return b64encode(string.encode('utf-8')).decode('ascii')
def sync_blocks(rpc_connections, wait=1, p=False):
"""
Wait until everybody has the same block count
"""
while True:
counts = [ x.getblockcount() for x in rpc_connections ]
if p :
print counts
if counts == [ counts[0] ]*len(counts):
break
time.sleep(wait)
def sync_mempools(rpc_connections, wait=1):
"""
Wait until everybody has the same transactions in their memory
pools
"""
while True:
pool = set(rpc_connections[0].getrawmempool())
num_match = 1
for i in range(1, len(rpc_connections)):
if set(rpc_connections[i].getrawmempool()) == pool:
num_match = num_match+1
if num_match == len(rpc_connections):
break
time.sleep(wait)
litecoinzd_processes = {}
def initialize_datadir(dirname, n):
datadir = os.path.join(dirname, "node"+str(n))
if not os.path.isdir(datadir):
os.makedirs(datadir)
with open(os.path.join(datadir, "litecoinz.conf"), 'w') as f:
f.write("regtest=1\n")
f.write("showmetrics=0\n")
f.write("rpcuser=rt\n")
f.write("rpcpassword=rt\n")
f.write("port="+str(p2p_port(n))+"\n")
f.write("rpcport="+str(rpc_port(n))+"\n")
f.write("listenonion=0\n")
return datadir
def initialize_chain(test_dir):
"""
Create (or copy from cache) a 200-block-long chain and
4 wallets.
litecoinzd and litecoinz-cli must be in search path.
"""
if not os.path.isdir(os.path.join("cache", "node0")):
devnull = open("/dev/null", "w+")
# Create cache directories, run litecoinzds:
for i in range(4):
datadir=initialize_datadir("cache", i)
args = [ os.getenv("LITECOINZD", "litecoinzd"), "-keypool=1", "-datadir="+datadir, "-discover=0" ]
if i > 0:
args.append("-connect=127.0.0.1:"+str(p2p_port(0)))
litecoinzd_processes[i] = subprocess.Popen(args)
if os.getenv("PYTHON_DEBUG", ""):
print "initialize_chain: litecoinzd started, calling litecoinz-cli -rpcwait getblockcount"
subprocess.check_call([ os.getenv("LITECOINZCLI", "litecoinz-cli"), "-datadir="+datadir,
"-rpcwait", "getblockcount"], stdout=devnull)
if os.getenv("PYTHON_DEBUG", ""):
print "initialize_chain: litecoinz-cli -rpcwait getblockcount completed"
devnull.close()
rpcs = []
for i in range(4):
try:
url = "http://rt:[email protected]:%d"%(rpc_port(i),)
rpcs.append(AuthServiceProxy(url))
except:
sys.stderr.write("Error connecting to "+url+"\n")
sys.exit(1)
# Create a 200-block-long chain; each of the 4 nodes
# gets 25 mature blocks and 25 immature.
# blocks are created with timestamps 10 minutes apart, starting
# at 1 Jan 2014
block_time = 1388534400
for i in range(2):
for peer in range(4):
for j in range(25):
set_node_times(rpcs, block_time)
rpcs[peer].generate(1)
block_time += 10*60
# Must sync before next peer starts generating blocks
sync_blocks(rpcs)
# Shut them down, and clean up cache directories:
stop_nodes(rpcs)
wait_litecoinzds()
for i in range(4):
os.remove(log_filename("cache", i, "debug.log"))
os.remove(log_filename("cache", i, "db.log"))
os.remove(log_filename("cache", i, "peers.dat"))
os.remove(log_filename("cache", i, "fee_estimates.dat"))
for i in range(4):
from_dir = os.path.join("cache", "node"+str(i))
to_dir = os.path.join(test_dir, "node"+str(i))
shutil.copytree(from_dir, to_dir)
initialize_datadir(test_dir, i) # Overwrite port/rpcport in litecoinz.conf
def initialize_chain_clean(test_dir, num_nodes):
"""
Create an empty blockchain and num_nodes wallets.
Useful if a test case wants complete control over initialization.
"""
for i in range(num_nodes):
initialize_datadir(test_dir, i)
def _rpchost_to_args(rpchost):
'''Convert optional IP:port spec to rpcconnect/rpcport args'''
if rpchost is None:
return []
match = re.match('(\[[0-9a-fA-f:]+\]|[^:]+)(?::([0-9]+))?$', rpchost)
if not match:
raise ValueError('Invalid RPC host spec ' + rpchost)
rpcconnect = match.group(1)
rpcport = match.group(2)
if rpcconnect.startswith('['): # remove IPv6 [...] wrapping
rpcconnect = rpcconnect[1:-1]
rv = ['-rpcconnect=' + rpcconnect]
if rpcport:
rv += ['-rpcport=' + rpcport]
return rv
def start_node(i, dirname, extra_args=None, rpchost=None, timewait=None, binary=None):
"""
Start a litecoinzd and return RPC connection to it
"""
datadir = os.path.join(dirname, "node"+str(i))
if binary is None:
binary = os.getenv("LITECOINZD", "litecoinzd")
args = [ binary, "-datadir="+datadir, "-keypool=1", "-discover=0", "-rest" ]
if extra_args is not None: args.extend(extra_args)
litecoinzd_processes[i] = subprocess.Popen(args)
devnull = open("/dev/null", "w+")
if os.getenv("PYTHON_DEBUG", ""):
print "start_node: litecoinzd started, calling litecoinz-cli -rpcwait getblockcount"
subprocess.check_call([ os.getenv("LITECOINZCLI", "litecoinz-cli"), "-datadir="+datadir] +
_rpchost_to_args(rpchost) +
["-rpcwait", "getblockcount"], stdout=devnull)
if os.getenv("PYTHON_DEBUG", ""):
print "start_node: calling litecoinz-cli -rpcwait getblockcount returned"
devnull.close()
url = "http://rt:rt@%s:%d" % (rpchost or '127.0.0.1', rpc_port(i))
if timewait is not None:
proxy = AuthServiceProxy(url, timeout=timewait)
else:
proxy = AuthServiceProxy(url)
proxy.url = url # store URL on proxy for info
return proxy
def start_nodes(num_nodes, dirname, extra_args=None, rpchost=None, binary=None):
"""
Start multiple litecoinzds, return RPC connections to them
"""
if extra_args is None: extra_args = [ None for i in range(num_nodes) ]
if binary is None: binary = [ None for i in range(num_nodes) ]
return [ start_node(i, dirname, extra_args[i], rpchost, binary=binary[i]) for i in range(num_nodes) ]
def log_filename(dirname, n_node, logname):
return os.path.join(dirname, "node"+str(n_node), "regtest", logname)
def check_node(i):
bitcoind_processes[i].poll()
return bitcoind_processes[i].returncode
def stop_node(node, i):
node.stop()
litecoinzd_processes[i].wait()
del litecoinzd_processes[i]
def stop_nodes(nodes):
for node in nodes:
node.stop()
del nodes[:] # Emptying array closes connections as a side effect
def set_node_times(nodes, t):
for node in nodes:
node.setmocktime(t)
def wait_litecoinzds():
# Wait for all litecoinzds to cleanly exit
for litecoinzd in litecoinzd_processes.values():
litecoinzd.wait()
litecoinzd_processes.clear()
def connect_nodes(from_connection, node_num):
ip_port = "127.0.0.1:"+str(p2p_port(node_num))
from_connection.addnode(ip_port, "onetry")
# poll until version handshake complete to avoid race conditions
# with transaction relaying
while any(peer['version'] == 0 for peer in from_connection.getpeerinfo()):
time.sleep(0.1)
def connect_nodes_bi(nodes, a, b):
connect_nodes(nodes[a], b)
connect_nodes(nodes[b], a)
def find_output(node, txid, amount):
"""
Return index to output of txid with value amount
Raises exception if there is none.
"""
txdata = node.getrawtransaction(txid, 1)
for i in range(len(txdata["vout"])):
if txdata["vout"][i]["value"] == amount:
return i
raise RuntimeError("find_output txid %s : %s not found"%(txid,str(amount)))
def gather_inputs(from_node, amount_needed, confirmations_required=1):
"""
Return a random set of unspent txouts that are enough to pay amount_needed
"""
assert(confirmations_required >=0)
utxo = from_node.listunspent(confirmations_required)
random.shuffle(utxo)
inputs = []
total_in = Decimal("0.00000000")
while total_in < amount_needed and len(utxo) > 0:
t = utxo.pop()
total_in += t["amount"]
inputs.append({ "txid" : t["txid"], "vout" : t["vout"], "address" : t["address"] } )
if total_in < amount_needed:
raise RuntimeError("Insufficient funds: need %d, have %d"%(amount_needed, total_in))
return (total_in, inputs)
def make_change(from_node, amount_in, amount_out, fee):
"""
Create change output(s), return them
"""
outputs = {}
amount = amount_out+fee
change = amount_in - amount
if change > amount*2:
# Create an extra change output to break up big inputs
change_address = from_node.getnewaddress()
# Split change in two, being careful of rounding:
outputs[change_address] = Decimal(change/2).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
change = amount_in - amount - outputs[change_address]
if change > 0:
outputs[from_node.getnewaddress()] = change
return outputs
def send_zeropri_transaction(from_node, to_node, amount, fee):
"""
Create&broadcast a zero-priority transaction.
Returns (txid, hex-encoded-txdata)
Ensures transaction is zero-priority by first creating a send-to-self,
then using its output
"""
# Create a send-to-self with confirmed inputs:
self_address = from_node.getnewaddress()
(total_in, inputs) = gather_inputs(from_node, amount+fee*2)
outputs = make_change(from_node, total_in, amount+fee, fee)
outputs[self_address] = float(amount+fee)
self_rawtx = from_node.createrawtransaction(inputs, outputs)
self_signresult = from_node.signrawtransaction(self_rawtx)
self_txid = from_node.sendrawtransaction(self_signresult["hex"], True)
vout = find_output(from_node, self_txid, amount+fee)
# Now immediately spend the output to create a 1-input, 1-output
# zero-priority transaction:
inputs = [ { "txid" : self_txid, "vout" : vout } ]
outputs = { to_node.getnewaddress() : float(amount) }
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"])
def random_zeropri_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random zero-priority transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment*random.randint(0,fee_variants)
(txid, txhex) = send_zeropri_transaction(from_node, to_node, amount, fee)
return (txid, txhex, fee)
def random_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment*random.randint(0,fee_variants)
(total_in, inputs) = gather_inputs(from_node, amount+fee)
outputs = make_change(from_node, total_in, amount, fee)
outputs[to_node.getnewaddress()] = float(amount)
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"], fee)
def assert_equal(expected, actual, message=""):
if expected != actual:
if message:
message = "; %s" % message
raise AssertionError("(left == right)%s\n left: <%s>\n right: <%s>" % (message, str(expected), str(actual)))
def assert_true(condition, message = ""):
if not condition:
raise AssertionError(message)
def assert_false(condition, message = ""):
assert_true(not condition, message)
def assert_greater_than(thing1, thing2):
if thing1 <= thing2:
raise AssertionError("%s <= %s"%(str(thing1),str(thing2)))
def assert_raises(exc, fun, *args, **kwds):
try:
fun(*args, **kwds)
except exc:
pass
except Exception as e:
raise AssertionError("Unexpected exception raised: "+type(e).__name__)
else:
raise AssertionError("No exception raised")
def assert_raises_message(ExceptionType, errstr, func, *args, **kwargs):
"""
Asserts that func throws and that the exception contains 'errstr'
in its message.
"""
try:
func(*args, **kwargs)
except ExceptionType as e:
if errstr not in str(e):
raise AssertionError("Invalid exception string: Couldn't find %r in %r" % (
errstr, str(e)))
except Exception as e:
raise AssertionError("Unexpected exception raised: " + type(e).__name__)
else:
raise AssertionError("No exception raised")
def fail(message=""):
raise AssertionError(message)
# Returns txid if operation was a success or None
def wait_and_assert_operationid_status(node, myopid, in_status='success', in_errormsg=None, timeout=300):
print('waiting for async operation {}'.format(myopid))
result = None
for _ in xrange(1, timeout):
results = node.z_getoperationresult([myopid])
if len(results) > 0:
result = results[0]
break
time.sleep(1)
assert_true(result is not None, "timeout occured")
status = result['status']
txid = None
errormsg = None
if status == "failed":
errormsg = result['error']['message']
elif status == "success":
txid = result['result']['txid']
if os.getenv("PYTHON_DEBUG", ""):
print('...returned status: {}'.format(status))
if errormsg is not None:
print('...returned error: {}'.format(errormsg))
assert_equal(in_status, status, "Operation returned mismatched status. Error Message: {}".format(errormsg))
if errormsg is not None:
assert_true(in_errormsg is not None, "No error retured. Expected: {}".format(errormsg))
assert_true(in_errormsg in errormsg, "Error returned: {}. Error expected: {}".format(errormsg, in_errormsg))
return result # if there was an error return the result
else:
return txid # otherwise return the txid
| [
"[email protected]"
] | |
a1ce721648437c692be680703e759af641f629a4 | 370eb3d601cba2d791a7c690896d3504b3204c48 | /car_speech_rec/CarAudio/settings.py | b2f19fe0df1b292619a84d3c306dbc7d86ec3033 | [] | no_license | marchboy/python_machine_learning | ba558efa2bc016964251c0daa0276d6115e39bc2 | 7d2cae69930f381fe2029f58e977b55affb5e88b | refs/heads/master | 2021-07-15T09:05:02.252006 | 2020-01-17T09:35:49 | 2020-01-17T09:35:49 | 132,924,711 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,504 | py | #!/usr/bin/env python
#-*- coding: utf-8 -*-
import os
import time
import logging
MYSQL_INFO = {
'host':'localhost',
'user':'root',
'passwd':'mariadb',
'db':'test',
'charset':'utf8'
}
# MYSQL_INFO = {
# 'host':'localhost',
# 'user':'testuser',
# 'passwd':'testpwd258*',
# 'db':'test_database',
# 'charset':'utf8'
# }
cur_path = os.path.dirname(os.path.realpath(__file__))
# log_path = os.path.join(os.path.dirname(cur_path), "logs")
log_path = os.path.join(cur_path, 'logs')
if not os.path.exists(log_path):
os.mkdir(log_path)
class Log(object):
def __init__(self, module_name):
self.logname = os.path.join(log_path, 'car_audio_{}.log'.format(time.strftime("%Y%m%d")))
self.logger = logging.getLogger(module_name)
self.logger.setLevel(logging.DEBUG)
self.formatter = logging.Formatter(
# '[%(asctime)s] --- %(funcName)s --- %(name)s --- %(levelname)s: %(message)s',
'[%(asctime)s] --- %(name)s --- %(levelname)s: %(message)s',
datefmt="%Y-%m-%d %H:%M:%S"
)
def __console(self, level, message):
# file_handler 输出到本地文件
file_handler = logging.FileHandler(self.logname)
file_handler.setFormatter(self.formatter)
file_handler.setLevel(logging.DEBUG)
self.logger.addHandler(file_handler)
# stream_handler 输出到控制台
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(self.formatter)
stream_handler.setLevel(logging.DEBUG)
self.logger.addHandler(stream_handler)
if level == "info":
self.logger.info(message)
if level == "debug":
self.logger.debug(message)
if level == "warning":
self.logger.warning(message)
if level == "error":
self.logger.error(message)
self.logger.removeHandler(stream_handler)
self.logger.removeHandler(file_handler)
file_handler.close()
def debug(self, message):
self.__console("debug", message)
def info(self, message):
self.__console("info", message)
def warning(self, message):
self.__console("warning", message)
def error(self, message):
self.__console("error", message)
if __name__ == '__main__':
print(cur_path)
print(os.path.dirname(cur_path))
print(log_path)
lg = Log("Test p")
lg.info("葱爷最棒哦···")
| [
"[email protected]"
] | |
c16ff949874ec6e391680cf0a2e5becac995f1e8 | ef853c148a2bfc0def7587f316da1f84d93b1785 | /myproject/app/models.py | db390fa5e856bcaa841960fa403fd35fd65b365f | [] | no_license | gitget2322/goal_web | fd7391621d689e83075b55d715e8923b636d1f8d | 0544c9c6da29da387f92195b183c87456303a171 | refs/heads/master | 2021-01-20T07:28:05.059072 | 2017-08-27T15:37:26 | 2017-08-27T15:37:26 | 101,540,619 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,527 | py | #coding=utf-8
from werkzeug.security import generate_password_hash,check_password_hash
from __init__ import db
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
from flask import current_app
from flask_login import UserMixin,AnonymousUserMixin
import hashlib
from flask import request
#权限常量
class Permission:
FOLLOW=0x01
COMMENT=0x02
WEITE_ARTICLES=0x04
MODERATE_COMMENT=0x08
ADMINISTER=0x80
#创建角色模型或者说角色表
class Role(db.Model):
__tablename__='roles'
id=db.Column(db.Integer,primary_key=True)
name=db.Column(db.String(64),unique=True)
default=db.Column(db.Boolean,default=False,index=True)
permissions=db.Column(db.Integer)
#建立关系表
users=db.relationship('User',backref='role',lazy='dynamic')
def __repr__(self):
return '<Role %r>'% self.name
#在数据库中创建角色
@staticmethod
def inset_roles():
roles={
'User':(Permission.FOLLOW|
Permission.COMMENT|
Permission.WRITE_ARTICLES,True),
'Moderator':(Permission.FOLLOW|
Permission.COMMENT|
Permission.MODERATE_COMMNETS,False),
'Administrator':(0xff,False)
}
for r in roles:
role=Role.query.filter_by(name=r).first()
if role is None:
role=Role(name=r)
role.permissions=role[r][0]
role.default=roles[r][1]
db.session.add(role)
db.session.commit()
#创建用户模型,在数据库中建立表
class User(UserMixin,db.Model):
__tablename__='users'
id=db.Column(db.Integer,primary_key=True)
email=db.Column(db.String(64),unique=True,index=True)
username=db.Column(db.String(64),unique=True,index=True)
password_hash=db.Column(db.String(128))
#建立关系表
role_id=db.Column(db.Integer,db.ForeignKey('roles.id'))
confirmed=db.Column(db.Boolean,default=False)
#跟goal建立关联
goals=db.relationship('Goal',backref='user',lazy='dynamic')
#用户信息字段
name=db.Column(db.String(64))
location=db.Column(db.String(64))
about_me=db.Column(db.Text())
member_since=db.Column(db.Datetime(),default=datetime.utcnow)
last_seen=db.Column(db.DateTime(),default=datetime.utcnow())
#增加用户头像数据
avatar_hash=db.Column(db.String(32))
def __repr__(self):
return '<User %r>'%self.username
def __init__(self,**kwargs):
super(User,self).__init__(**kwargs)
#初始化使用缓存的MD5散列值生成Gravatar URL
if self.email is not None and self.avatar_hash is None:
self.avatar_hash=hashlib.md5(self.email.encode('utf-8')).hexdigest()
@property
def password(self):
raise AttributeError('password is not a readable attribute')
@password.setter
def password(self,password):
self.password_hash=generate_password_hash(password)
def verify_password(self,password):
return check_password_hash(self.password_hash,password)
def generate_confirmation_token(self,expiration=3600):
s=Serializer(current_app.config['SECRET_KEY'],expiration)
return s.dumps({'confirm':self.id})
def confirm(self,token):
s=Serializer(current_app.config['SECRET_KEY'])
try:
data=s.loads(token)
except:
return False
if data.get('confirm')!=self.id:
return False
self.confirmed=True
db.session.add(self)
return True
#刷新用户的最后访问时间
def ping(self):
self.last_seen=datetime.utcnow()
db.session.add(self)
#定义默认的用户角色
if self.role is None:
if self.email==current_app.config['FLASKY_ADMIN']:
self.role=Role.query.filter_by(permissions=0xff).first()
if self.role is None:
self.role=Role.query.filter_by(default=True).first()
#检查用户是否有指定权限
def can(self,permissions):
return self.role is not None and\
(self.role.permission & permissions)==permissions
def is_administrator(self):
return self.can(Permission.ADMINISTER)
#生成用户头像
def gravatar(self,size=100,default='identicon',rating='g'):
if request.is_secure:
url='http://secure.gravatar.com/avatar'
else:
url='http://www.gravatar.com/avatar'
hash=hashlib.md5(self.email.encode('utf-8')).hexdigest()
return '{url}/{hash}?s={size}&d={default}&r={rating}'.format(
url=url,hash=hash,size=size,default=default,rating=rating)
def change_email(self,token):
self.email=new_email
self.avatar_hash=hashlib.md5(self.email.encode('utf-8')).hexdigest()
db.session.add(self)
return True
def gravatar(self,size=100,default='identicon',rating='g'):
if request.is_secure:
url='http://secure.gravatar.com/avatar'
else:
url='http://www.gravatar.com/avatar'
hash=self.avatar_hash or hashlib.md5(self.email.encode('utf-8')).hexdigest()
return '{url}/{hash}?s={size}&d={default}&r={rating}'.format(url=url,hash=hash,size=size,default=default,rating=rating)
#建立匿名用户模型
class AnonymousUser(AnonymousUserMixin):
def can(self,permissions):
return False
def is_administrator(self):
return False
login_manager.anonymous_user=AnonymousUser
class Goal(db.Model):
__tablename__=='goals'
id=db.Column(db.Integer,primary_key=True)
author_id=db.Column(db.Integer,db.ForeignKey('users.id'))
create_time=db.Column(db.Dtaetime,index=True,default=datetime.utcnow)
category=db.Column(db.String(64))
tile=db.Column(db.String(64))
plans=db.Column(db.Text)
completion=db.Column(db.Boolean)
class Category(db.Model):
__tablename__='category'
id=db.Column(db.Integer,primary_key=True)
name=db.Column(db.String(64),unique=True)
@staticmethod
def inser_categorys():
categorys=['study','work','exercise')
for c in categorys:
category=Category.query.filter_by(name=c).first()
if not category:
category=Category(name=c)
db.session.add(category)
db.session.commit()
class Plan(db.Model):
__tablename__='plans'
id=db.Column(db.Integer,primary_key=True)
step=db.Column(db.String(64))
completion=db.Column(db.Boolean)
| [
"[email protected]"
] | |
38f5936465861ffb9a9e1b63f3e2f4926c1f27f6 | 89fd384acfd1b90d398fb41a1c515b42b5a43ccf | /snakes/Caliban.py | a0d456cb092d3de5370a02055ca74a8c135c7198 | [] | no_license | slippedandmissed/CodeSnake | 2fb4cbe091e20af9184db1ea41329db7e8c0ba84 | b771aa8bd240b4d1c9cefaf85acb738ac95f97c5 | refs/heads/master | 2022-03-21T05:38:52.710428 | 2019-09-10T18:05:30 | 2019-09-10T18:05:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,642 | py | from operator import add
import random
name = "Caliban"
myHistory = []
myLength = 1
oldPelletPos = [-1, -1]
def getDist(arg0, arg1):
vector = [arg1[i]-arg0[i] for i in range(len(arg0))]
distance = sum([abs(i) for i in vector])
return [vector, distance]
def isTurningBack(myPos, direction):
global myLength, myHistory
mapOutput = {"left": [-1, 0],
"right": [1, 0],
"up": [0, -1],
"down": [0, 1]}
newPos = list(map(add, myPos, mapOutput[direction]))
return newPos in myHistory[-myLength:]
def reset():
global myLength, myHistory, oldPelletPos
myLength = 1
myHistory = []
oldPelletPos = [-1, -1]
def move(myPos, enemyPos, pelletPos):
global myLength, oldPelletPos
toReturn = ""
enemyVector, enemyDistance = getDist(myPos, enemyPos)
pelletVector, pelletDistance = getDist(myPos, pelletPos)
if myPos == oldPelletPos:
myLength += 1
if enemyDistance < pelletDistance:
if enemyVector[0] < enemyVector[1]:
toReturn = "left" if enemyVector[0]>0 else "right"
else:
toReturn = "up" if enemyVector[1]>0 else "down"
else:
if pelletVector[0] < pelletVector[1]:
toReturn = "right" if pelletVector[0]>0 else "left"
else:
toReturn = "down" if pelletVector[1]>0 else "up"
attempts = 0
while isTurningBack(myPos, toReturn):
toReturn = random.choice(["left", "right", "up", "down"])
attempts += 1
if attempts > 100:
break
myHistory.append(myPos)
oldPelletPos = pelletPos
return toReturn
| [
"[email protected]"
] | |
b149655165dbfc3253e689f968488cd68f3e18c6 | 3e660e22783e62f19e9b41d28e843158df5bd6ef | /script.me.syncsmashingfromgithub/smashingfavourites/scripts/oldscripts/smashingtvextended.py | 23aa7191d67b111064220b6ce41ecbc4caa91859 | [] | no_license | monthou66/repository.smashingfavourites | a9603906236000d2424d2283b50130c7a6103966 | f712e2e4715a286ff6bff304ca30bf3ddfaa112f | refs/heads/master | 2020-04-09T12:14:34.470077 | 2018-12-04T10:56:45 | 2018-12-04T10:56:45 | 160,341,386 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,059 | py |
# -*- coding: utf-8 -*-
# opens tv channel or guide groups via smashingfavourites and / or keymap.
import os
import os.path
import xbmc
import sys
# make sure dvbviewer is running - enable and wait if necessary
def enable():
if not xbmc.getCondVisibility('System.HasAddon(pvr.dvbviewer)'):
xbmc.executeJSONRPC('{"jsonrpc":"2.0","method":"Addons.SetAddonEnabled","id":7,"params":{"addonid":"pvr.dvbviewer","enabled":true}}')
xbmc.sleep(200)
# make sure dvbviewer is not running - disable if necessary
def disable():
xbmc.executeJSONRPC('{"jsonrpc":"2.0","method":"Addons.SetAddonEnabled","id":8,"params":{"addonid":"pvr.dvbviewer","enabled":false}}')
# define terms... c = count
# f=0 for just pvr disabled f = 1 (value) if channels, f=2 (value) if guides, f=3 if radio, f=4 if recordings,
# f=5 if timers, f=6 if search, f=7 if recording / recorded files, f=8 for timeshift, f=9 for permanently enable,
# f=10 for remove enable check.
# g = group number (value)... g=3 for last channel group / guide group
# define f
a = sys.argv[1]
f = int(a)
def terms():
b = sys.argv[2]
c = 2
g = int(b)
# f=3
def radio():
xbmc.executebuiltin('ActivateWindow(Radio)')
exit()
# f=4
def recordings():
xbmc.executebuiltin('ActivateWindow(tvrecordings)')
exit()
# f=5
def timers():
xbmc.executebuiltin('ActivateWindow(tvtimers)')
exit()
# f=6
def search():
xbmc.executebuiltin('ActivateWindow(tvsearch)')
exit()
# pvr can be disabled for recorded files - f=7
def recordedfiles():
xbmc.executebuiltin('Videos,smb://SourceTVRecordings/,return')
exit()
# pvr can be disabled for timeshift files - f=8
def timeshift():
xbmc.executebuiltin('Videos,smb://SourceTVRecordings/,return')
exit()
# print stars to show up in log and error notification
def printstar():
print "****************************************************************************"
print "****************************************************************************"
def error():
xbmc.executebuiltin('Notification(Check, smashingtv)')
exit()
# open channel or guide windows - f = 1,2
def opengroups():
if f == 1:
xbmc.executebuiltin('ActivateWindow(TVChannels)')
elif f == 2:
xbmc.executebuiltin('ActivateWindow(TVGuide)')
else:
xbmc.executebuiltin('Notification(Check, smashingtv)'); exit()
xbmc.executebuiltin('SendClick(28)')
xbmc.executebuiltin( "XBMC.Action(FirstPage)" )
# loop move down to correct group (if necessary)
if g > 1:
while (c <= g):
c = c + 1
xbmc.executebuiltin( "XBMC.Action(Down)" )
# open group if not using 'choose' option.
if g >=1:
xbmc.executebuiltin( "XBMC.Action(Select)" )
xbmc.executebuiltin( "XBMC.Action(Right)" )
xbmc.executebuiltin( "ClearProperty(SideBladeOpen)" )
# define file locations
def files():
SOURCEFILE = os.path.join(xbmc.translatePath('special://userdata/favourites/smashingtv/enablefile'), "enablepvr.txt")
TARGET = os.path.join(xbmc.translatePath('special://userdata/favourites/smashingtv'), "enablepvr.txt")
# permanentenable:
# Copy pvrenable.txt to favourites/smashingtv folder as marker and enable pvr.dvbviewer - f=9
# check if SOURCEFILE exists - if not give an error message
# check if TARGET exists - if so give a notification 'already enabled'
# copy SOURCEFILE to TARGET, enable and close
def permanentenable():
if not os.path.isfile(SOURCEFILE):
printstar()
print "smashingtv problem - check userdata/favourites/smashingtv/enablefile folder for missing pvrenable.txt"
printstar()
error()
if os.path.isfile(TARGET):
xbmc.executebuiltin('Notification(PVR is, already enabled)')
enable()
exit()
else:
shutil.copy(SOURCEFILE, TARGET)
xbmc.executebuiltin('Notification(PVR is, permanently enabled)')
enable()
exit()
#removepermanentcheck
# Remove pvrenable.txt from favourites/smashingtv folder f=10
def removepermanentcheck():
if not os.path.isfile(TARGET):
xbmc.executebuiltin('Notification(No PVR, lock found)')
disable()
exit()
else:
os.remove(TARGET)
xbmc.executebuiltin('Notification(PVR, unlocked)')
disable()
exit()
# Get on with it...
# disable or enable pvr.dvbviewer, exit if necessary, exit and print message if f is out of range
if f == 0:
disable()
exit()
elif f == 7 or f == 8:
disable()
elif f > 10 or f < 0:
printstar()
print "smashingtv exited 'cos f is out of range"
print "f is ",f
printstar()
error()
else:
enable()
if f == 1 or f == 2:
terms()
opengroups()
elif f == 3:
radio()
elif f == 4:
recordings()
elif f == 5:
timers()
elif f == 6:
search()
elif f == 7:
recordedfiles()
elif f == 8:
timeshift()
elif f == 9:
permanentenable()
enable()
elif f == 10:
removepermanentcheck()
disable()
else:
printstar()
print "smashingtv exited 'cos sumfink went rong"
printstar()
error()
| [
"[email protected]"
] | |
434bebc7cbd0dff66c53188da6ab39dc4e50735f | 7ca7eb242d5c2211bead3039ef8d2271a473bf00 | /ce86.py | 345e2164b3e2be729d3feb48a3c597ec651290ec | [] | no_license | raman934/python-2019 | 613214da5f4215f1db507321d94ac9d29784c126 | ba5cf4e2204349e3bc716d8a7ef68a863d74b547 | refs/heads/master | 2020-06-25T04:12:56.717409 | 2019-10-02T07:33:56 | 2019-10-02T07:33:56 | 199,197,390 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 238 | py | # RG
# 7.3 # todo learn
def is_palindrome(s):
n = len(s)
if n == 1:
return True
elif s[0] == s[n - 1] and is_palindrome(s[1:n - 1]):
return True
return False
print(is_palindrome('level'))
| [
"[email protected]"
] | |
edc5c3eb6a2016b78d59f991f388998fe03d5bae | 8b3f06622b801801db3587e74e2c8ccf7227c25b | /models.py | 96a74f922609920396615303689b8aeb73f207b8 | [] | no_license | mayuri-shah/warbler | 2197b491f6f40f02e75471b5b7bed6dc75bef7e0 | 3ab262a86609e56efafbb1d2486fa8afcf20bb52 | refs/heads/main | 2023-04-28T19:37:39.626844 | 2021-05-26T14:28:48 | 2021-05-26T14:28:48 | 371,063,688 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,578 | py | """SQLAlchemy models for Warbler."""
from datetime import datetime
from flask_bcrypt import Bcrypt
from flask_sqlalchemy import SQLAlchemy
bcrypt = Bcrypt()
db = SQLAlchemy()
class Follows(db.Model):
"""Connection of a follower <-> followed_user."""
__tablename__ = 'follows'
user_being_followed_id = db.Column(
db.Integer,
db.ForeignKey('users.id', ondelete="cascade"),
primary_key=True,
)
user_following_id = db.Column(
db.Integer,
db.ForeignKey('users.id', ondelete="cascade"),
primary_key=True,
)
class Likes(db.Model):
"""Mapping user likes to warbles."""
__tablename__ = 'likes'
id = db.Column(
db.Integer,
primary_key=True
)
user_id = db.Column(
db.Integer,
db.ForeignKey('users.id', ondelete='cascade')
)
message_id = db.Column(
db.Integer,
db.ForeignKey('messages.id', ondelete='cascade'),
)
class User(db.Model):
"""User in the system."""
__tablename__ = 'users'
id = db.Column(
db.Integer,
primary_key=True,
)
email = db.Column(
db.Text,
nullable=False,
unique=True,
)
username = db.Column(
db.Text,
nullable=False,
unique=True,
)
image_url = db.Column(
db.Text,
default="/static/images/default-pic.png",
)
header_image_url = db.Column(
db.Text,
default="/static/images/warbler-hero.jpg"
)
bio = db.Column(
db.Text,
)
location = db.Column(
db.Text,
)
password = db.Column(
db.Text,
nullable=False,
)
messages = db.relationship('Message')
followers = db.relationship(
"User",
secondary="follows",
primaryjoin=(Follows.user_being_followed_id == id),
secondaryjoin=(Follows.user_following_id == id)
)
following = db.relationship(
"User",
secondary="follows",
primaryjoin=(Follows.user_following_id == id),
secondaryjoin=(Follows.user_being_followed_id == id)
)
likes = db.relationship(
'Message',
secondary="likes",
backref="users"
)
def __repr__(self):
return f"<User #{self.id}: {self.username}, {self.email}>"
def is_followed_by(self, other_user):
"""Is this user followed by `other_user`?"""
found_user_list = [user for user in self.followers if user == other_user]
return len(found_user_list) == 1
def is_following(self, other_user):
"""Is this user following `other_use`?"""
found_user_list = [user for user in self.following if user == other_user]
return len(found_user_list) == 1
@classmethod
def signup(cls, username, email, password, image_url):
"""Sign up user.
Hashes password and adds user to system.
"""
hashed_pwd = bcrypt.generate_password_hash(password).decode('UTF-8')
user = User(
username=username,
email=email,
password=hashed_pwd,
image_url=image_url,
)
db.session.add(user)
return user
@classmethod
def authenticate(cls, username, password):
"""Find user with `username` and `password`.
This is a class method (call it on the class, not an individual user.)
It searches for a user whose password hash matches this password
and, if it finds such a user, returns that user object.
If can't find matching user (or if password is wrong), returns False.
"""
user = cls.query.filter_by(username=username).first()
if user:
is_auth = bcrypt.check_password_hash(user.password, password)
if is_auth:
return user
return False
class Message(db.Model):
"""An individual message ("warble")."""
__tablename__ = 'messages'
id = db.Column(
db.Integer,
primary_key=True,
)
text = db.Column(
db.String(140),
nullable=False,
)
timestamp = db.Column(
db.DateTime,
nullable=False,
default=datetime.utcnow(),
)
user_id = db.Column(
db.Integer,
db.ForeignKey('users.id', ondelete='CASCADE'),
nullable=False,
)
user = db.relationship('User')
def connect_db(app):
"""Connect this database to provided Flask app.
You should call this in your Flask app.
"""
db.app = app
db.init_app(app)
| [
"[email protected]"
] | |
e87d75c1630684e37d72f6c38d6cea9b33695223 | 2471c87c3338eec3121499b34cb5684fd2eb9390 | /blackjack/blackjack.py | d7820d1aa6d6285eaaccd23eb28d61d413ecb206 | [] | no_license | nums11/OpenAIGym | 0c893c9a086ea4ea6590bca7aa79e24103fada60 | 5df988497426645dc229cccce1ca35fe7d880377 | refs/heads/main | 2023-04-16T07:54:27.282401 | 2021-04-12T21:55:43 | 2021-04-12T21:55:43 | 346,795,159 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,927 | py | import gym
import numpy as np
from tqdm import tqdm
import random
from agents.simple_q_learning_agent import SimpleQLearningAgent
from agents.advanced_q_learning_agent import AdvancedQLearningjackAgent
from agents.simple_monte_carlo_agent import SimpleMonteCarloAgent
from agents.advanced_monte_carlo_agent import AdvancedMonteCarloAgent
from agents.simple_sarsa_agent import SimpleSarsaAgent
from agents.advanced_sarsa_agent import AdvancedSarsaAgent
def comparePolicies(p1, p2):
print('State P1 Action P2 Action Different?')
for state, action in p1.items():
diff_string = ''
if action != p2[state]:
diff_string = ' DIFFERENT'
print(f'{state}: {action} {p2[state]}'
+ diff_string)
# Actions: 0 - stick, 1 - hit
# Observation:
# 0 - Player's current sum
# 1 - The dealer's one showing card
# 2 - Whether or not the player holds a usable ace (0 or 1)
env = gym.make('Blackjack-v0')
state_space_size = 2
action_space_size = 2
num_episodes = 1000000
# simple_ql_agent = SimpleQLearningAgent()
# simple_ql_agent.train(env, num_episodes)
advanced_ql_agent = AdvancedQLearningjackAgent()
# advanced_ql_agent.train(env, num_episodes)
simple_monte_carlo_agent = SimpleMonteCarloAgent()
# simple_monte_carlo_agent.train(env, num_episodes)
advanced_monte_carlo_agent = AdvancedMonteCarloAgent()
# advanced_monte_carlo_agent.train(env,num_episodes)
# simple_sarsa_agent = SimpleSarsaAgent()
# simple_sarsa_agent.train(env, num_episodes)
advanced_sarsa_agent = AdvancedSarsaAgent()
# advanced_sarsa_agent.train(env, num_episodes)
simple_mc_policy_100 = np.load('policies/simple_mc_policy_100.npy',allow_pickle='TRUE').item()
simple_mc_policy_1k = np.load('policies/simple_mc_policy_1k.npy',allow_pickle='TRUE').item()
simple_mc_policy_1k_disc_09 = np.load('policies/simple_mc_policy_1k_disc_09.npy',allow_pickle='TRUE').item()
simple_mc_policy_1k_disc_095 = np.load('policies/simple_mc_policy_1k_disc_095.npy',allow_pickle='TRUE').item()
simple_mc_policy_10k = np.load('policies/simple_mc_policy_10k.npy',allow_pickle='TRUE').item()
simple_mc_policy_10k_disc_095 = np.load('policies/simple_mc_policy_10k_disc_095.npy',allow_pickle='TRUE').item()
simple_mc_policy_100k = np.load('policies/simple_mc_policy_100k.npy',allow_pickle='TRUE').item()
simple_mc_policy_1mil = np.load('policies/simple_mc_policy_1mil.npy',allow_pickle='TRUE').item()
advanced_mc_policy_100 = np.load('policies/advanced_mc_policy_100.npy',allow_pickle='TRUE').item()
advanced_mc_policy_1k = np.load('policies/advanced_mc_policy_1k.npy',allow_pickle='TRUE').item()
advanced_mc_policy_10k = np.load('policies/advanced_mc_policy_10k.npy',allow_pickle='TRUE').item()
advanced_mc_policy_100k = np.load('policies/advanced_mc_policy_100k.npy',allow_pickle='TRUE').item()
advanced_mc_policy_1mil = np.load('policies/advanced_mc_policy_1mil.npy',allow_pickle='TRUE').item()
custom_policy = np.load('policies/custom_policy.npy',allow_pickle='TRUE').item()
custom_policy_two = np.load('policies/custom_policy_two.npy',allow_pickle='TRUE').item()
custom_policy_three = np.load('policies/custom_policy_three.npy',allow_pickle='TRUE').item()
custom_policy_four = np.load('policies/custom_policy_four.npy',allow_pickle='TRUE').item()
articl_policy = np.load('policies/articl_policy.npy',allow_pickle='TRUE').item()
simple_ql_policy = np.load('policies/simple_ql_policy.npy',allow_pickle='TRUE').item()
simple_ql_policy_two = np.load('policies/simple_ql_policy_two.npy',allow_pickle='TRUE').item()
advanced_ql_policy_two = np.load('policies/advanced_ql_policy_two.npy',allow_pickle='TRUE').item()
advanced_ql_policy_three = np.load('policies/advanced_ql_policy_three.npy',allow_pickle='TRUE').item()
simple_sarsa_policy = np.load('policies/simple_sarsa_policy.npy',allow_pickle='TRUE').item()
advanced_sarsa_policy = np.load('policies/advanced_sarsa_policy.npy',allow_pickle='TRUE').item()
# print("custom_policy_four", custom_policy_four)
# simple_ql_policy = np.load('simple_ql_policy.npy',allow_pickle='TRUE').item()
# comparePolicies(advanced_ql_policy_two, advanced_ql_policy_three)
# simple_ql_agent.test(env, 100000, simple_ql_policy)
# advanced_ql_agent.test(env, 100000, advanced_ql_policy_three)
# simple_monte_carlo_agent.test(env, 1000000, simple_mc_policy_10k_disc_095)
# advanced_monte_carlo_agent.test(env, 1000000, articl_policy)
# simple_sarsa_agent.test(env, 100000, simple_sarsa_policy)
advanced_sarsa_agent.test(env, 100000, advanced_sarsa_policy)
# rewards = []
# with open('advanced_rewards') as f:
# rewards = f.read().splitlines()
# rewards = np.array(rewards)
# rewards = [float(i) for i in rewards]
# split = 100000
# rewards_per_split_episodes = np.split(np.array(rewards),num_episodes/split)
# count = split
# print(f"\n********Average reward per {split} episodes ********\n")
# for r in rewards_per_split_episodes:
# print(count, ": ", str(sum(r/split)))
# count += split | [
"[email protected]"
] | |
906e3e03a8bec34146c12f47b27c47b30a868b4b | 91a5afd451c999092838d6cf8c3b8478783750ad | /ch03_Neural_Network/ex03_MNIST_example_and_batch.py | 846049a0c6c2018337daf51e7c1e3345b7ef4610 | [] | no_license | DeokO/Deeplearning-from-scratch | eda7c66b53cdcaf43368bd26753bd5463f0c1e08 | a1c902871d243f6c510231d09e9c65decb75361d | refs/heads/master | 2021-06-24T14:58:08.168127 | 2017-09-13T06:03:24 | 2017-09-13T06:03:24 | 103,262,108 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,545 | py | # coding: utf-8
#MNIST : 28*28의 회색조 이미지이며, 각 픽셀은 0부터 255의 값을 가짐. 숫자 label이 붙어있음. 학습데이터 60,000장, 시험 데이터 10,000장
import sys, os
sys.path.append(os.pardir) #부모 디렉터리의 파일을 가져오기 위해 path를 지정해둔다.
from dataset.mnist import load_mnist
#처음 한번은 몇 분 정도 걸림
(x_train, t_train), (x_test, t_test) = load_mnist(flatten=True, normalize=False) #http://yann.lecun.com/exdb/mnist/ 이 사이트에서 데이터를 받아와야 하는데 이거 자체가 먹통인 상황.
#각 데이터의 형상 출력
print(x_train.shape) #(60000, 784)
print(t_train.shape) #(60000, )
print(x_test.shape) #(10000, 784)
print(t_test.shape) #(10000, )
import sys, os
sys.path.append(os.pardir)
import numpy as np
from dataset.mnist import load_mnist
from PIL import Image #python image library 모듈
def img_show(img):
pil_img = Image.fromarray(np.uint8(img))
pil_img.show()
img = x_train[0]
label = t_train[0]
print(label) #5
print(img.shape) # (784, )
img = img.reshape(28, 28) #원래 이미지 모양으로 변형
print(img.shape) # (28, 28)
img_show(img) #이미지 열기
#신경망의 추론 처리
#input : 784 차원 -> output : 10 차원
#은닉층 h1 : 50개 뉴런, h2 : 100개 뉴런
import pickle, os
#data load function
def get_data():
(x_train, t_train), (x_test, t_test) = \
load_mnist(flatten=True, normalize=True, one_hot_label=False)
return x_test, t_test
#network initialize
os.chdir('./ch03_Neural_Network') # 현재 dir 설정 (sample_weight.pkl 불러오기 위함)
def init_network():
with open("sample_weight.pkl", 'rb') as f:
network = pickle.load(f)
return network
#activation-sigmoid
def sigmoid(x):
return 1/(1+np.exp(-x))
#activation-softmax
def softmax(a):
c = np.max(a)
exp_a = np.exp(a-c)
sum_exp_a = np.sum(exp_a)
return exp_a/sum_exp_a
#예측된 y 산출
def predict(network, x):
W1, W2, W3 = network['W1'], network['W2'], network['W3']
b1, b2, b3 = network['b1'], network['b2'], network['b3']
a1 = np.dot(x, W1) + b1
z1 = sigmoid(a1)
a2 = np.dot(z1, W2) + b2
z2 = sigmoid(a2)
a3 = np.dot(z2, W3) + b3
y = softmax(a3)
return y
#정확도 평가
# x, t = get_data()
x, t = x_test, t_test
network = init_network()
accuracy_cnt = 0
for i in range(len(x)):
print(i)
y = predict(network, x[i])
p = np.argmax(y)
if p == t[i]:
accuracy_cnt += 1
print("Accuracy:" + str(float(accuracy_cnt)/len(x)))
#구조 확인
x, _ = get_data()
network = init_network()
W1, W2, W3 = network['W1'], network['W2'], network['W3']
x.shape
print(x[0].shape, W1.shape, W2.shape, W3.shape,)
#배치 처리
x, t = get_data()
network = init_network()
batch_size = 100
accuracy_cnt = 0
for i in range(0, len(x), batch_size): #0부터 데이터 개수번째까지, batch_size 단위로 커지면서 seq 생성
print(i)
x_batch = x[i:i+batch_size]
y_batch = predict(network, x_batch)
p = np.argmax(y_batch, axis=1) #y_batch.shape : (100, 10). 행단위로 최대 arg를 output으로 생각하자. 열 전체를 보고 argmax 구함: axis=1, 행 전체를 보고 argmax 구함: axis=0
accuracy_cnt += np.sum(p == t[i:i+batch_size]) #boolean을 np.sum 하면 int형으로 더해짐
print("Accuracy:" + str(float(accuracy_cnt)/len(x)))
| [
"[email protected]"
] | |
5d30285999bbb36f4f2e1fd387a252661c7caafe | 988c1009561153d0d7682c833b03babe4c2ce12d | /Qust/q3.py | cc2a39cc738dd387d0b426b38fc32855670f53b0 | [] | no_license | shibushimeiyoumingzile/week1 | f61c9da316b35db07236853de42e5aaf8c50561a | 6bd63c287f1a8e26141324e00668b65b997d702d | refs/heads/master | 2020-04-24T09:44:15.210920 | 2019-02-21T12:56:11 | 2019-02-21T12:56:11 | 171,871,615 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 153 | py | import random
list = []
for i in range(1,21):
a = random.randint(1,10)
list.append(a)
print(list)
li = sorted(list[::2],reverse=True)
print(li) | [
"[email protected]"
] | |
9e72b47e9121173ec1189fe4f3ee46d3f7ef2635 | 4062ffcc0fdd4308aa75e4883a02b190a802759b | /backend/core/urls.py | d2de78f5f85479db5ae4933609eaee1a373314c6 | [] | no_license | 5000eliteprw/RestaurantReview | 89be210a6268855c9add0e47276b84e71b5df384 | 547e648eaaa11ee79b0a46b82f6203543d48920f | refs/heads/master | 2023-06-16T15:18:01.277155 | 2021-07-10T08:06:14 | 2021-07-10T08:06:14 | 384,647,110 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 966 | py | """core URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include, re_path
from .swagger import urlpatterns as swaggerpatterns
from backend.core import views
urlpatterns = [
path('admin', admin.site.urls),
path('api/', include('backend.api.urls')),
re_path(r'^', views.FrontendAppView.as_view()),
*swaggerpatterns
]
| [
"[email protected]"
] | |
b4648d3ef6b9bf7f0f856176fd2bb007ac18a41b | 181b482a7089bb891c8d2af63fd6ff366be6f703 | /passtable/table.py | 092c099c72d20447ef6575a05756ba85baa80a26 | [] | no_license | mferland/experiments | 9f30bed1fcdba07cba58e52f3577c76f52df22d6 | e760519f0e702371504fc683b873ad85b75d5595 | refs/heads/master | 2020-07-02T11:58:06.272116 | 2016-04-02T00:10:44 | 2016-04-02T00:10:44 | 52,111,544 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,972 | py | #!/usr/bin/python
def split_list(alist, chunks=1):
length = len(alist)
return [ alist[i*length // chunks: (i+1)*length // chunks]
for i in range(chunks) ]
def distribute(string, chunks):
if chunks == len(string):
# i.e.: 'abc' in 3 chunks ==> 'a', 'b', 'c'
return list(string)
if chunks == 1:
# i.e.: 'abc' in 1 chunk ==> 'abc'
return [string]
if (chunks < len(string)):
# i.e.: 'abcdef' in 3 chunks ==> 'ab', 'cd', 'ef'
return sorted(split_list(string, chunks))
else:
# i.e.: 'abc' in 4 chunks ==> 'a', 'a', 'b', 'c',
return sorted(list((string * (chunks // len(string) + 1))[:chunks]))
def recurse(pool, first_index, count, alist):
if count == 1:
return
start_point = distribute(pool, count)
i = first_index
for s in start_point:
alist[i].append(s)
i+=1
for s in sorted(set(start_point)):
recurse(pool, start_point.index(s), start_point.count(s), alist)
def generate_table(threads, pwlen, pool):
if threads > len(pool) ** pwlen or (threads > 1 and len(pool) == 1):
raise AttributeError('More threads than available passwords')
table=[]
start_point = distribute(pool, threads)
for s in sorted(set(start_point)):
count = start_point.count(s)
if count == 1:
row = [s] + [pool] * (pwlen - 1)
table.append(row)
else:
rows = [[] for x in xrange(count)]
recurse(pool, 0, count, rows)
for r in rows:
r = [s] + r
remaining = pwlen - len(r)
if remaining > 0:
r = r + [pool] * remaining
table.append(r)
return table
def find_nearest_match(table, firstpw):
nearest = []
for row in table:
tmp = ''
for i, e in enumerate(row):
if firstpw[i] in e:
tmp += firstpw[i]
elif abs(ord(firstpw[i]) - ord(e[0])) > abs(ord(firstpw[i]) - ord(e[-1])):
tmp += e[-1]
else:
tmp += e[0]
nearest.append(tmp)
print nearest
def print_table(table):
for row in table:
print row
# generate_table(3, 5, 'abcdefghij')
# happy path
# print_table(generate_table(1, 1, 'a'))
# print '===='
# print_table(generate_table(1, 2, 'a'))
# print '===='
# print_table(generate_table(1, 2, 'abc'))
# print '===='
print_table(generate_table(2, 2, 'abc'))
print '===='
print_table(generate_table(2, 3, 'abc'))
print '===='
print_table(generate_table(16, 5, 'abcde'))
find_nearest_match(generate_table(16, 5, 'abcde'), 'aabbc')
# print '===='
# print_table(generate_table(8, 2, 'abc'))
# print '===='
# print_table(generate_table(9, 2, 'abc'))
# print '===='
# print_table(generate_table(9, 3, 'abc'))
# print '===='
# print_table(generate_table(9, 5, 'ab'))
# print '===='
# print_table(generate_table(12, 5, 'ab'))
| [
"[email protected]"
] | |
019224f7f5c9f1c5399ec71d8eeed014832fc4f8 | 4f56670a3c4c8c2d3c269c0ee8ff47a4db31a43a | /craps_classes.py | c05cc83852655cf0754bf95ee35e6c3e8729e77b | [] | no_license | mejongetje/Craps | a663e1751f36a90f02c68d9478c55e767c42b5f2 | 9869b290b3ae197e3181208d7c6c96a859c86d70 | refs/heads/main | 2023-07-02T04:08:57.482165 | 2021-08-08T12:29:47 | 2021-08-08T12:29:47 | 393,954,025 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 620 | py | import random
class Die:
def __init__(self, eyes=6):
self.eyes = eyes
def throw_die(self):
result = random.randint(1,self.eyes)
return result
class Player:
players = []
def __init__(self, name, position=0, balance=50):
self.name = name
self.balance = balance
self.position = position
__class__.players.append(self)
def __repr__(self):
return self.name
def positioning(self, other):
x = random.randint(1,2)
self.position = 1 if x == 1 else 2
other.position = 1 if x == 2 else 2
if self.position == 1:
shooter = self
else:
shooter = other
return shooter
| [
"[email protected]"
] | |
dd814c9e5a365e583776871999a9fa14e4d4e014 | 0e33389f026a7b249e46825dd0d7c192cee91666 | /db.py | 13d5717dabe0f8099aadc9472ac570b16aa993e5 | [] | no_license | sachinprajapati/paym | 59b17fc97dfaa279ddbc236c49822ef257fa7195 | f5f87525e2618e327930957d2209ced549db4b89 | refs/heads/master | 2023-02-16T06:48:48.513720 | 2021-01-18T12:09:04 | 2021-01-18T12:09:04 | 328,702,317 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,146 | py | from settings import *
class DB():
db_name = dir_path+'/demo.db'
conn = None
curr = None
table = None
schema = """CREATE TABLE Cards (
id integer PRIMARY KEY AUTOINCREMENT,
card_num int NOT NULL UNIQUE,
date text NOT NULL,
cvv int NOT NULL,
pin int NOT NULL
);
CREATE TABLE Transactions (
id integer PRIMARY KEY AUTOINCREMENT,
tid varchar(255) NOT NULL UNIQUE,
amount int NOT NULL,
status varchar(255) NOT NULL,
message varchar(255) NOT NULL,
card_id integer,
dt TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
FOREIGN KEY (card_id) REFERENCES Cards (card_id)
)"""
def __init__(self, table):
if not os.path.exists(self.db_name):
self.conn = connect(self.db_name)
self.curr = self.conn.cursor()
self.curr.executescript(self.schema)
self.conn.commit()
else:
self.conn = connect(self.db_name)
self.curr = self.conn.cursor()
self.table = table
def InsertOne(self, myDict):
placeholders = ', '.join(['?'] * len(myDict))
columns = ', '.join(myDict.keys())
sql = "INSERT INTO {0} ({1}) VALUES ({2})".format(self.table, columns, placeholders)
if myDict.get('date'):
if isinstance(myDict['date'], datetime):
myDict['date'] = myDict['date'].strftime("%Y-%m")
try:
print("sql is", sql, list(myDict.values()))
self.curr.execute(sql, list(myDict.values()))
self.conn.commit()
except IntegrityError as e:
print(e)
def getDetail(self, **kwargs):
sql = "select id from {0} where {1}".format(self.table, ' AND '.join('{} = {}'.format(key, value) for key, value in kwargs.items()))
self.curr.execute(sql)
return self.curr.fetchone()
def DateFilter(self, date):
print("date is", date.day, date.month, date.year)
sql = """select card_num, cvv, date, pin, count(case message when 'SUCCESS' then 1 else null end) as
success FROM Transactions left join Cards on Cards.id=Transactions.card_id where dt BETWEEN
'{0}-{1:02d}-{2:02d}' AND '{0}-{1:02d}-{3:02d}' group by Cards.id order by success;""".format(date.year, date.month, date.day, date.day+1)
print("sql is", sql)
self.curr.execute(sql)
return self.curr.fetchall() | [
"[email protected]"
] | |
bae738bc7e7a8bb92e7ac2548c71d0c31bd31a99 | 6197379b99f2de08bc01c495cee1666feec732eb | /rexster_rest/client.py | 79b6e132354103b339ae2ff8f7b4601f08ada24d | [] | no_license | windj007/python-rexster-rest | 8160e149c8afd465ac59697dacf6bccaa1f75831 | cd8119961315e890680ec965b6cb0be2b604da22 | refs/heads/master | 2020-03-28T18:52:30.104520 | 2015-09-01T22:25:02 | 2015-09-01T22:25:02 | 41,481,666 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,496 | py | import os, glob
from pyarc import ClientBase
from rexster_rest.query import Q, format_typed_value
__ALL__ = ['RexsterClient', 'Dir', 'Q']
class Urls:
GRAPHS = '/graphs'
GRAPH = GRAPHS + '/{graph}'
VERTICES = GRAPH + '/vertices'
VERTEX = VERTICES + '/{vertex_id}'
INCIDENT = VERTEX + '/{direction}'
INCIDENT_CNT = INCIDENT + 'Count'
INCIDENT_IDS = INCIDENT + 'Ids'
ADJACENT_E = INCIDENT + 'E'
EDGES = GRAPH + '/edges'
EDGE = EDGES + '/{edge_id}'
INDICES = GRAPH + '/indices'
INDEX = INDICES + '/{index_id}'
INDEX_CNT = INDEX + '/count'
KEYS = GRAPH + '/keyindices'
KEYS_V = KEYS + '/vertex'
KEYS_E = KEYS + '/edge'
KEY_V = KEYS_V + '/{key}'
KEY_E = KEYS_E + '/{key}'
GREMLIN_G = GRAPH + '/tp/gremlin'
GREMLIN_V = VERTEX + '/tp/gremlin'
GREMLIN_E = EDGE + '/tp/gremlin'
class Dir:
IN = 'in'
OUT = 'out'
BOTH = 'both'
_DIRS = frozenset({ Dir.IN, Dir.OUT, Dir.BOTH })
class _ItemGetter(object):
def __init__(self, base_future, attrib, default = None):
self.base_future = base_future
self.attrib = attrib
self.default = None
def get(self):
return self.base_future.get().get(self.attrib, self.default)
class _FirstGetter(object):
def __init__(self, impl):
self.impl = impl
def get(self):
res = self.impl.get()
if len(res) > 0:
return res[0]
return None
class RexsterClient(ClientBase):
def __init__(self, base_url, graph, async = False):
super(RexsterClient, self).__init__(base_url,
add_headers = { 'Content-Type' : 'application/json; charset=utf-8' },
default_url_args = { 'graph' : graph },
async = async)
self.scripts = {}
self.refresh_scripts(os.path.join(os.path.dirname(__file__), 'scripts'))
################################ GET operations ###########################
def vertices(self, key = None, value = None):
args = { 'key' : key, 'value' : format_typed_value(value) } if not key is None else {}
return self.get(Urls.VERTICES, query_args = args)
def vertex(self, _id):
return self.get(Urls.VERTEX, url_args = { 'vertex_id' : _id })
def incident(self, _id, direction, *query_args, **query_kwargs):
return self._neighbor_impl(Urls.INCIDENT, _id, direction, *query_args, **query_kwargs)
def count_incident(self, _id, direction, *query_args, **query_kwargs):
return self._neighbor_impl(Urls.INCIDENT_CNT, _id, direction, *query_args, **query_kwargs)
def incident_ids(self, _id, direction, *query_args, **query_kwargs):
return self._neighbor_impl(Urls.INCIDENT_IDS, _id, direction, *query_args, **query_kwargs)
def edges(self, key = None, value = None):
args = { 'key' : key, 'value' : format_typed_value(value) } if not key is None else {}
return self.get(Urls.EDGES, query_args = args)
def edge(self, _id):
return self.get(Urls.EDGE, url_args = { 'edge_id' : _id })
def adjacent_edges(self, _id, direction, *query_args, **query_kwargs):
return self._neighbor_impl(Urls.ADJACENT_E, _id, direction, *query_args, **query_kwargs)
def indices(self):
return self.get(Urls.INDICES)
def query_index(self, index, key, value):
return self._index_impl(Urls.INDEX, index, key, value)
def query_index_cnt(self, index, key, value):
return self._index_impl(Urls.INDEX_CNT, index, key, value)
def keys(self):
return self.get(Urls.KEYS)
def keys_vertex(self):
return self.get(Urls.KEYS_V)
def keys_edge(self):
return self.get(Urls.KEYS_E)
def _index_impl(self, url_template, index, key, value):
return self.get(url_template,
url_args = { 'index_id' : index },
query_args = { 'key' : key, 'value' : format_typed_value(value) })
def _neighbor_impl(self, url_template, _id, direction, *query_args, **query_kwargs):
if not direction in _DIRS:
raise ValueError('"%s" is not a valid direction (only %s allowed)' % (direction,
', '.join(_DIRS)))
return self.get(url_template,
url_args = { 'vertex_id' : _id, 'direction' : direction },
query_args = Q(*query_args, **query_kwargs).build())
############################### POST operations ###########################
def create_vertex(self, **properties):
return self.post(Urls.VERTICES, payload = properties)
def create_vertex_with_known_id(self, _id, **properties):
return self.post(Urls.VERTEX,
url_args = { 'vertex_id' : _id },
payload = properties)
def upsert_vertex(self, _id, **properties):
return self.post(Urls.VERTEX, url_args = { 'vertex_id' : _id }, payload = properties)
def create_edge(self, _outV, _inV, _label, **properties):
payload = {'_outV' : _outV,
'_inV' : _inV,
'_label' : _label}
payload.update(properties)
return self.post(Urls.EDGES, payload = payload)
def create_edge_with_known_id(self, _id, _outV, _inV, _label, **properties):
payload = {'_outV' : _outV,
'_inV' : _inV,
'_label' : _label}
payload.update(properties)
return self.post(Urls.EDGES, url_args = { 'edge_id' : _id }, payload = payload)
def update_edge(self, _id, **properties):
return self.post(Urls.EDGES, url_args = { 'edge_id' : _id }, payload = properties)
def create_index(self, index_id, **params):
params['class'] = 'vertex'
return self.post(Urls.INDEX, url_args = { 'index_id' : index_id }, payload = params)
def create_key_index_vertex(self, index_id, key):
return self.post(Urls.KEY_V, url_args = { 'index_id' : index_id, 'key' : key })
def create_key_index_edge(self, index_id, key):
return self.post(Urls.KEY_E, url_args = { 'index_id' : index_id, 'key' : key })
############################### PUT operations ###########################
def update_vertex_put(self, _id, **properties):
return self.put(Urls.VERTEX, url_args = { 'vertex_id' : _id }, payload = properties)
def update_edge_put(self, _id, **properties):
return self.put(Urls.EDGES, url_args = { 'edge_id' : _id }, payload = properties)
def index_vertex(self, index_id, vertex_id, key, value):
return self.put(Urls.INDEX,
url_args = { 'index_id' : index_id },
payload = {'id' : vertex_id,
'key' : key,
'value' : value })
############################## DELETE operations ##########################
def delete_vertex(self, _id):
return self.delete(Urls.VERTEX, url_args = { 'vertex_id' : _id })
def delete_vertex_properties(self, _id, *keys):
return self.delete(Urls.VERTEX, url_args = { 'vertex_id' : _id }, query_args = { k : '' for k in keys })
def delete_edge(self, _id):
return self.delete(Urls.EDGE, url_args = { 'edge_id' : _id })
def delete_edge_properties(self, _id, *keys):
return self.delete(Urls.EDGE, url_args = { 'edge_id' : _id }, query_args = { k : '' for k in keys })
def drop_index(self, _id):
return self.delete(Urls.INDEX, url_args = { 'index_id' : _id })
def remove_vertex_from_index(self, index_id, vertex_id, key, value):
return self.delete(Urls.INDEX,
url_args = { 'index_id' : index_id },
query_args = {'id' : vertex_id,
'key' : key,
'value' : format_typed_value(value),
'class' : 'vertex' })
############################### Scripts ###########################
def refresh_scripts(self, dirname):
for f in glob.glob(os.path.join(dirname, "*.groovy")):
self.load_script(f)
def load_script(self, filename):
if not os.path.isfile(filename):
return
script_name = os.path.splitext(os.path.basename(filename))[0]
with open(filename, 'r') as f:
self.scripts[script_name] = f.read()
def run_script_on_graph(self, script_code_or_name, **params):
return self.post(Urls.GREMLIN_G, payload = {
"params" : params,
"script" : self.scripts.get(script_code_or_name,
script_code_or_name)
})
def run_script_on_vertex(self, script_code_or_name, vertex_id, **params):
return self.post(Urls.GREMLIN_V,
url_args = { 'vertex_id' : vertex_id },
payload = {
"params" : params,
"script" : self.scripts.get(script_code_or_name,
script_code_or_name)
})
def run_script_on_edge(self, script_code_or_name, edge_id, **params):
return self.post(Urls.GREMLIN_E,
url_args = { 'edge_id' : edge_id },
payload = {
"params" : params,
"script" : self.scripts.get(script_code_or_name,
script_code_or_name)
})
def lookup_vertex(self, *query_args, **query_kwargs):
q_str = Q(*query_args, **query_kwargs).build_gremlin()
if q_str:
q_str = 'g.query().%s.vertices()' % q_str
return self.run_script_on_graph(q_str)
def get_unique_vertex(self, *query_args, **query_kwargs):
return _FirstGetter(self.lookup_vertex(*query_args, **query_kwargs))
def lookup_edge(self, *query_args, **query_kwargs):
q_str = Q(*query_args, **query_kwargs).build_gremlin()
if q_str:
q_str = 'g.E.%s.toList()' % q_str
return self.run_script_on_graph(q_str)
def upsert_vertex_custom_id(self, id_prop, id_value, label = None, **props):
return self.run_script_on_graph('upsert_vertex',
id_prop = id_prop,
id_value = id_value,
label = label,
properties = props)
def delete_vertices(self, *query_args, **query_kwargs):
q_str = Q(*query_args, **query_kwargs).build_gremlin()
if q_str:
q_str = 'g.query().%s.vertices().each { g.removeVertex(it) } ; g.commit()' % q_str
return self.run_script_on_graph(q_str)
############################## Overrides ##########################
def do_req(self, *args, **kwargs):
base_future = super(RexsterClient, self).do_req(*args, **kwargs)
return _ItemGetter(base_future, 'results')
| [
"[email protected]"
] | |
6d3a4a146ddc89f0e0c9dd1362b0945b5dffba41 | 1d2b1e1d2573f15ec964235fed2b76b3436735f1 | /agents/simple_agent.py | bf55f079a456dee0c413eb243fb2731d5c19713f | [] | no_license | hellovertex/hanabi | 8e7eeefd27b8237f0b393eb9690920b64755dcaa | e8aefac46a5814296a5499b99d1c07bf5a6e9964 | refs/heads/master | 2023-01-07T04:40:38.671048 | 2020-09-24T11:37:22 | 2020-09-24T11:37:22 | 184,106,764 | 2 | 1 | null | 2022-11-21T21:54:50 | 2019-04-29T16:33:17 | Python | UTF-8 | Python | false | false | 2,783 | py | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
"""Simple Agent."""
from hanabi_learning_environment.rl_env import Agent
class SimpleAgent(Agent):
"""Agent that applies a simple heuristic."""
def __init__(self, config, *args, **kwargs):
"""Initialize the agent."""
self.config = config
self.max_information_tokens = config['max_information_tokens']
@staticmethod
def playable_card(card, fireworks):
"""A card is playable if it can be placed on the fireworks pile."""
return card['rank'] == fireworks[card['color']]
def act(self, observation):
"""Act based on an observation."""
if observation['current_player_offset'] != 0:
return None
# Check if there are any pending hints and play the card corresponding to
# the hint.
for card_index, hint in enumerate(observation['card_knowledge'][0]):
if hint['color'] is not None or hint['rank'] is not None:
return {'action_type': 'PLAY', 'card_index': card_index}
# Check if it's possible to hint a card to your colleagues.
fireworks = observation['fireworks']
if observation['information_tokens'] > 0:
# Check if there are any playable cards in the hands of the opponents.
for player_offset in range(1, observation['num_players']):
player_hand = observation['observed_hands'][player_offset]
player_hints = observation['card_knowledge'][player_offset]
# Check if the card in the hand of the opponent is playable.
for card, hint in zip(player_hand, player_hints):
if SimpleAgent.playable_card(card,
fireworks) and hint['color'] is None:
#return {
# 'action_type': 'REVEAL_COLOR',
# 'color': card['color'],
# 'target_offset': player_offset
# }
return {
'action_type': 'REVEAL_RANK',
'rank': card['rank'],
'target_offset': player_offset
}
# If no card is hintable then discard or play.
if observation['information_tokens'] < self.max_information_tokens:
return {'action_type': 'DISCARD', 'card_index': 0}
else:
return {'action_type': 'PLAY', 'card_index': 0}
| [
"[email protected]"
] | |
1e96507dae057ddf6c7da82b6d321afb5554140d | e2b1008be564227f4c87e23c66d7981dfcd61024 | /src/modules/pipe09_Hierarchical_Cluster.py | d1f1476f15546bdcf468da294127442864734c7f | [] | no_license | codeaudit/mia | 39092664f66d10d017ce41f17a67d115109297af | 2b82f9708f828faa2d5bf3a218b06a1db66599cc | refs/heads/master | 2021-01-22T23:10:35.574187 | 2015-12-11T18:22:32 | 2015-12-11T18:22:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,115 | py | #-*- coding: utf-8 -*-
'''
Created 08/08/2012
Updated 01/09/2014
Updated 11/10/2015 - get_params()
@author: Damian Kao
@url: http://blog.nextgenetics.net/?e=44
'''
import FileDialog
from scipy.cluster.hierarchy import linkage
from scipy.cluster.hierarchy import dendrogram
import classes.BarGraphic as graphPac
import matplotlib.pyplot as plt
import classes.Drosophila as dro
import os
import numpy as np
class Pipe():
def __init__(self, desk):
self.desk = desk
self.failure = True
self.error_msg = ''
try:
desk.get_params()
except:
self.error_msg = 'Could not get parameters.'
return
if desk.cluster_method == 'complete':
desk.cluster_method_desc = desk.cluster_method + '-max'
elif desk.cluster_method == 'single':
desk.cluster_method_desc = desk.cluster_method + '-min'
elif desk.cluster_method == 'weighted':
desk.cluster_method_desc = desk.cluster_method
else:
desk.cluster_method = 'centroid'
desk.cluster_method_desc = desk.cluster_method
desk.cluster_method_desc = desk.cluster_method_desc[0].upper() + desk.cluster_method_desc[1:]
if desk.organism == '':
self.error_msg = "Define the organism or write 'any'"
return
if desk.gene == '' and desk.title == '':
self.error_msg = 'Define at least one Gene or Title'
return
''' -----------------------------------------'''
if desk.each_all == 'each':
matLoopList = [ [desk.withCorrection, desk.minmax]]
else:
matLoopList = [ [kCorr, kMinMax] for kCorr in [False, True] for kMinMax in ['mincut','maxmer']]
for matLoop in matLoopList:
if not self.looping(desk, matLoop):
return
self.failure = False
self.error_msg = 'Task ended. All right.'
return
def looping(self, desk, opt):
plt.close("all")
plt.clf()
desk.withCorrection = opt[0]
desk.minmax = opt[1]
if desk.withCorrection:
self.str_correction = '-bias corr.'
self.filename_correction = '_bias_corr'
else:
self.str_correction = ''
self.filename_correction = ''
print("\n--->>>", desk.minmax, self.str_correction)
desk.colorThreshold = desk.colorThreshold_var.get()
if desk.mnat:
desk.unit = 'mnat'
desk.factor = 1000
desk.roundVal = 2
else:
desk.unit = 'nat'
desk.factor = 1
desk.roundVal = 4
if desk.vert_horiz == 'HMI':
xLabel = 'JSD(HMI) (%s)'%(desk.unit)
title = "Hierarchical Cluster Method=%s of JSD(HMI)- %s %s %s"\
%(desk.cluster_method_desc, desk.organism, desk.seqType, desk.gene)
if desk.frame > 0:
title += '\nJSD(HMI) %s%s, desk.frame %i, #letter %i, min(L)=%i, min(#seqs)=%i' % \
(desk.minmax, self.str_correction, desk.frame, desk.numOfLetters, desk.cutoffLength, desk.cutoffNumSeq)
else:
title += '\n%s%s, letter %i, min(L)=%i, min(#seqs)=%i' % \
(desk.minmax, self.str_correction, desk.numOfLetters, desk.cutoffLength, desk.cutoffNumSeq)
elif desk.vert_horiz == 'VMI':
xLabel = 'JSD(VMI) (%s)'%(desk.unit)
''' multidimensional distance '''
title = "Hierarchical Cluster Method=%s of JSD(VMI), %s %s %s"\
%(desk.cluster_method_desc, desk.organism, desk.seqType, desk.gene)
title += '\n%s%s, #letter %i, min(L)=%i, min(#seqs)=%i' % \
(desk.minmax, self.str_correction, desk.numOfLetters, desk.cutoffLength, desk.cutoffNumSeq)
else:
xLabel = 'JSD(VSH) (nat)'
''' multidimensional distance '''
title = "Hierarchical Cluster Method=%s of JSD(VSH), %s %s %s"\
%(desk.cluster_method_desc, desk.organism, desk.seqType, desk.gene)
title += '\n%s%s, #letter %i, min(L)=%i, min(#seqs)=%i' % \
(desk.minmax, self.str_correction, desk.numOfLetters, desk.cutoffLength, desk.cutoffNumSeq)
desk.set_cluster_filenames()
filename = desk.cluster_input_filename
ret, _, colHeaders, dataMatrix = self.open_distance_matrix_file(desk.rootTable + filename)
if not ret:
self.error_msg = 'Could not find %s'%(desk.rootTable + filename)
return False
pictureName = 'Cluster_' + filename.replace('.txt','')
''' desk.dr defined in pipe_desktop get_params() '''
if desk.dr:
rows = desk.dr.labels(colHeaders)
else:
rows = colHeaders
#convert native python array into a numpy array
# dataMatrix = log10(dataMatrix)
# print dataMatrix
dataMatrix = np.array(dataMatrix)
maxDist = 0
if desk.factor != 1:
for i in range(len(dataMatrix)):
for j in range(len(dataMatrix[i])):
dataMatrix[i][j] = dataMatrix[i][j] * desk.factor
if dataMatrix[i][j] > maxDist:
maxDist = dataMatrix[i][j]
else:
for i in range(len(dataMatrix)):
for j in range(len(dataMatrix[i])):
if dataMatrix[i][j] > maxDist:
maxDist = dataMatrix[i][j]
# single, weighted, average, co mplete
linkageMatrix = linkage(dataMatrix, method=desk.cluster_method, metric='euclidean')
''' finding maximum '''
maxLinkDist = 0
for i in range(len(linkageMatrix)):
for j in range(len(linkageMatrix[i])):
if linkageMatrix[i][j] > maxLinkDist:
maxLinkDist = linkageMatrix[i][j]
''' hierarchical cluster distorce distances
factor = maxDist/(2*maxLinkDist) '''
for i in range(len(linkageMatrix)):
linkageMatrix[i][2] = round(linkageMatrix[i][2]*.5, desk.roundVal)
fig = plt.figure(1, dpi=desk.dpi)
ax = fig.add_subplot('111')
plt.subplots_adjust(bottom=.1, left=.05, right=.84)
yLabel = 'species'
plt.rcParams['lines.linewidth'] = 2.5
fontsize = 26
plt.title(title, fontsize=fontsize)
ax.set_xlabel(xLabel, fontsize=fontsize)
ax.set_ylabel(yLabel, fontsize=fontsize)
# make colorbar labels bigger
leaf_font_size = 28
''' ddata = '''
try:
dendrogram(linkageMatrix,
color_threshold=desk.colorThreshold,
labels=rows, orientation='right') # show_leaf_counts=True , leaf_font_size=leaf_font_size
except:
print("Failed in printing dendrogram")
pass
plt.xticks(fontsize=leaf_font_size)
plt.yticks(fontsize=leaf_font_size)
'''
# print ddata
spList = ddata['ivl']
# print len(spList), spList
nickList = copy.deepcopy(spList)
nickList.sort()
dic = {}
for i in range(len(spList)):
sp = spList[i]
for j in range(len(nickList)):
if sp == nickList[j]:
dic[i] = j
#print i, spList[i], ' equal ',j, nickList[j]
break
count = 0
for i, d in zip(ddata['icoord'], ddata['dcoord']):
count += 1
# print i, d
# specie01 x specie02 - mean error distance
num = (i[0]-5)/10.
sp1a = int(num)
diff = num - sp1a
if diff == 0:
wei1a = 1
sp1b = sp1a
wei1b = 0
else:
sp1b = sp1a+1
wei1a = diff
wei1b = 1. - wei1a
#if num == 0:
# print '>>>> viri'
num = (i[2]-5)/10.
sp2a = int(num)
diff = num - sp2a
if diff == 0:
sp2b = sp2a
wei2a = 1
wei2b = 0
else:
sp2b = sp2a+1
wei2a = diff
wei2b = 1. - wei2a
#print sp1a, sp1b, sp2a, sp2b
#print wei1a, wei1b, wei2a, wei2b
ste = 0.
if wei1a>0 and wei2a>0:
ste += wei1a*wei2a*seMatrix[dic[sp1a]][dic[sp2a]]
if wei1a>0 and wei2b>0:
ste += wei1a*wei2b*seMatrix[dic[sp1a]][dic[sp2b]]
if wei1b>0 and wei2a>0:
ste += wei1b*wei2a*seMatrix[dic[sp1b]][dic[sp2a]]
if wei1b>0 and wei2b>0:
# print sp1b, sp2b
ste += wei1b*wei2b*seMatrix[dic[sp1b]][dic[sp2b]]
ste = round(ste,4)
dist = seMatrix[dic[sp1a]][dic[sp2a]]
dist = round(dist,4)
# print 'dist', dist, 'ste', ste
x = 0.5 * sum(i[1:3])
y = round(d[1],4)
stry = str(y) + '\nd='+str(dist) + '\nse='+str(ste)
plt.plot(x, y, 'ro')
stry = ''
if abs(y) > desk.colorThreshold:
plt.annotate(stry, (x, y), xytext=(0, -8),
textcoords='offset points',
va='top', ha='center')
'''
self.myPlot = graphPac.Plot()
self.myPlot.print_graph(desk, fig, pictureName=pictureName, frame=desk.tk_root, stay=True)
return True
def open_distance_matrix_file(self, filename):
#open the file assuming the data above is in a file called 'dataFile'
if not os.path.exists(filename):
return False, None, None, None
# print 'opening %s'%(filename)
try:
inFile = open(filename,'r')
#save the column/row headers (conditions/genes) into an array
colHeaders = inFile.next().strip().split()[1:]
rowHeaders = []
dataMatrix = []
for line in inFile:
data = line.strip().split('\t')
if data[0] == '':
break
rowHeaders.append(data[0])
dataMatrix.append([np.double(x) for x in data[1:]])
except:
return False, None, None, None
return True, rowHeaders, colHeaders, dataMatrix
| [
"[email protected]"
] | |
18bf65b91ede81fc160c195e1d93f35b3c373aeb | 2a8707d969bd5966a3249b5bfccc433ad9a229bb | /myblog/migrations/0005_goods_category.py | b3210bd1bb0b72fe26acf6c8017cafee77c6f3e8 | [] | no_license | alexsef/django_second | 9520d889b9cb83cd0f73aa5d38a338b0f7dac021 | 32972892d6d6c6b349ec84418d781de695619586 | refs/heads/master | 2021-01-01T03:45:53.394005 | 2016-05-14T09:12:26 | 2016-05-14T09:12:26 | 58,160,930 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 645 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('myblog', '0004_auto_20160505_2207'),
]
operations = [
migrations.AddField(
model_name='goods',
name='category',
field=models.SlugField(default=1, verbose_name='Категория', max_length=255, choices=[('fiction', 'художественная'), ('technical', 'техническая'), ('science', 'научная'), ('detective', 'детектив')]),
preserve_default=False,
),
]
| [
"[email protected]"
] | |
dc4a83fcf617cf0e46d2d03846c23c224b9fb4b9 | 01d98b95044dd4a2c900d603e1809f20437e996c | /P1-6.py | 37bb1c7ea9520d443b5c2068d1f09a419dd069de | [] | no_license | Muhfa12/PROTEK-5 | 4bdf906cb5e2ccc681c1b3946ac1824a8998fe94 | 38f120e3cb95009c880152a6b7364ec6cff5bfb1 | refs/heads/main | 2023-01-06T23:09:17.469865 | 2020-11-02T05:19:00 | 2020-11-02T05:19:00 | 309,265,258 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 299 | py | a = 8
b = 3
if (a > 0) and (b > 0):
print("Keduanya positif")
else:
print("Keduanya tidak positif")
#DAN
a = 8
b = 3
if (a > 0):
if (b > 0):
print("Keduanya positif")
else:
print("Keduanya tidak positif")
else:
print("Keduanya tidak positif")
| [
"[email protected]"
] | |
f8653a4732b84e5e401655ad1ec9bc50eee9b21a | dc1bb0c8359361344665b02a28d72236324cb7f2 | /bin/gen_fake.py | c91431238e67478015c9fae152883e94eb482656 | [] | no_license | shayanzare007/ZippySLS | c7afaf0ed1474327bef6dd3173c9c92cb3297a93 | 505aebf27e65dc0b3755fa52b18ef546d06628bd | refs/heads/master | 2021-05-31T06:23:16.278471 | 2015-09-09T16:29:27 | 2015-09-09T16:29:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 424 | py | import random
for l in open('score.txt').readlines():
sum = 0
for i in range(0,9):
sum = sum + float(l.split()[0][i])
# sum = 1- sum/10
# sum = sum/100 + 0.9;
print l.split()[0] + ' ' + str(sum)
#print l.split()[0] + ' ' + str(random.randint(0,9))
#print '1' + l.strip('\n')
#for l in open('score.txt').readlines():
# print l.strip('\n')[0:14]
#for l in open('score.txt').readlines():
# print '0' + l.strip('\n')
| [
"[email protected]"
] | |
d6f22f480566e94dadadeba81e2d06eaf3c14708 | 5d97136e559b6f2f68726d1a77dc5a8408850a65 | /trydjango/settings.py | 62daf7b10e2ef5c5f83854af8b9ea44381119b38 | [] | no_license | msarar/django | cda9271a368621d9024cf20925590b5c67ee29c5 | 547cd00ee49ddbcb210c3dfb1460dd4d4dd1ab52 | refs/heads/master | 2023-05-23T05:20:57.263561 | 2020-05-06T01:55:46 | 2020-05-06T01:55:46 | 259,022,025 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,176 | py | """
Django settings for trydjango project.
Generated by 'django-admin startproject' using Django 2.0.7.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '()f0aflo335*p#+*1_vbnbikgj61du-@apcul-ft4@5#szj)(2n_jn'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'products',
'pages',
'blog'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'trydjango.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, "templates")],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'trydjango.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
| [
"[email protected]"
] | |
28f97ce5395cfba08ac1efa1858fd84709e57c68 | fe226faa1854e33bf06cd43fcfe409c5cb7c46e0 | /Notes/Basic Return.py | 35fcd30a207460b9a61e7c3ed1543b1b91432e09 | [] | no_license | Tom-Szendrey/Highschool-basic-notes | 385a73f6b0567e6c8a35df814314d02560971459 | fd3a5efda24486e8c6ce68941bf8c52ec266769e | refs/heads/master | 2021-01-20T01:27:57.200778 | 2017-04-24T20:01:34 | 2017-04-24T20:01:34 | 89,281,704 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 265 | py | def square(n):
"""Returns the square of a number."""
squared = n**2
print "%s squared is %s." % (n, squared)
return squared
# Call the square function on line 9! Make sure to
# include the number 10 between the parentheses.
square(10)
| [
"[email protected]"
] | |
cc69c17eb918fd897c9f5b7af3a2c749094d41a7 | 0dbfea9dcbbdf7a329c9d0f61831973b3168e560 | /camera.py | a9b8a4f8902973ba7b1242f9bbea89e50609084c | [] | no_license | SmartPracticeschool/SPS-4035-Intelligent-Best-Safety-Max-Safety-Rating-Generator-for-Restaurant | 6e6be915bf0208c5a9ca1368f99d754051fd9fee | 47ba68197c24623fee757788e03cbb41e5fca921 | refs/heads/master | 2022-12-11T09:28:01.007804 | 2020-09-11T10:20:54 | 2020-09-11T10:20:54 | 292,284,479 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,508 | py | import cv2
import boto3
import datetime
import requests
face_cascade=cv2.CascadeClassifier("haarcascade_frontalface_alt2.xml")
ds_factor=0.6
count=0
class VideoCamera(object):
def __init__(self):
self.video = cv2.VideoCapture(0)
def __del__(self):
self.video.release()
def get_frame(self):
#count=0
global count
success, image = self.video.read()
is_success, im_buf_arr = cv2.imencode(".jpg", image)
image1 = im_buf_arr.tobytes()
client=boto3.client('rekognition',
aws_access_key_id="ASIA3JZX6DJK2IOZBPEV",
aws_secret_access_key="GQ4AuQs80d8r+gLfQCadeLY/vmll0SLFPQMF/x9P",
aws_session_token="FwoGZXIvYXdzEHoaDMK6+Vqt+bc4zxdiSyLKAe9iC6fIvoALw6dZuXTSz5Vb0GfE43zPfJTLsmHOA+pDUpGwlCEBfT6xXrgPq5XiGabwP/5ZFbp517LpM08a3f76c356zrXXYSVPazZogFUMc/qMDkEWly/SW66SeT9cgRirmZAj49GMGUBAFovwnWAUOmWEMJVOT+R7BCcRDs7qzlV8mrmhichmPsmSWqOcZsJY+2b99WyupvX8XorhsQepP0eQK0VkZVxU0FN1iFgijdC1FgZ51y0fKVfkXFbONQ2CXdn0EnAYOcAoqu3s+gUyLRhXqAddoXMzN2yXr8kKsDW9H2XiMzfy4lVX669OchDI696RMMVo3K66fvIdiA==",
region_name='us-east-1')
response = client.detect_custom_labels(
ProjectVersionArn='arn:aws:rekognition:us-east-1:776969525845:project/Mask-Detection2/version/Mask-Detection2.2020-09-07T23.02.02/1599499928143',Image={
'Bytes':image1})
print(response['CustomLabels'])
if not len(response['CustomLabels']):
count=count+1
date = str(datetime.datetime.now()).split(" ")[0]
#print(date)
url = " https://81ryisfwlc.execute-api.us-east-1.amazonaws.com/apiForMaskCount?date="+date+"&count="+str(count)
resp = requests.get(url)
f = open("countfile.txt", "w")
f.write(str(count))
f.close()
#print(count)
image=cv2.resize(image,None,fx=ds_factor,fy=ds_factor,interpolation=cv2.INTER_AREA)
gray=cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
face_rects=face_cascade.detectMultiScale(gray,1.3,5)
for (x,y,w,h) in face_rects:
cv2.rectangle(image,(x,y),(x+w,y+h),(0,255,0),2)
break
ret, jpeg = cv2.imencode('.jpg', image)
#cv2.putText(image, text = str(count), org=(10,40), fontFace=cv2.FONT_HERSHEY_PLAIN, fontScale=1, color=(1,0,0))
cv2.imshow('image',image)
return jpeg.tobytes()
| [
"[email protected]"
] | |
0706d765576d572b9e51ca80a194e7ef9a3a0ae5 | 3270a22bdb2b3789639e2e67e8e784ffe08987c2 | /MiniProject1.py | 0cde198840b984b526cef982e033355055887760 | [] | no_license | ericli21/ec601-tweet-pulls | 06cda558ce8b117ac1c62260cfcc83cea94d9ac2 | b398087db67f5708b912ea7af8fbda8a0ef64ad4 | refs/heads/master | 2020-04-09T01:48:29.289507 | 2018-12-01T06:56:26 | 2018-12-01T06:56:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,125 | py | #Eric Li
#EC601: Mini-Project 1
#main script
import os
import twitterpull
import convertvid
import analyzeim
#Insert consumer and access information
#Twitter API credentials
consumer_key = "Insert consumer key"
consumer_secret = "Insert consumer secret"
access_key = "Insert access key"
access_secret = "Insert access secret"
#Google API credentials (filename.json)
json_file = "Insert path to filename.json"
def main(screen_name):
twitter_ob = twitterpull.tweet_processor()
twitter_ob.twitter_verify(consumer_key, consumer_secret, access_key, access_secret, screen_name)
twitter_ob.pull_tweets()
twitter_ob.remove_old_images()
twitter_ob.store_new_images()
print("Image process done!\n")
google_ob = analyzeim.google_processor()
google_ob.google_verify(json_file)
google_ob.get_annotations()
google_ob.remove_old_labels()
google_ob.store_new_labels()
print("Analysis process done!\n")
ff_ob = convertvid.ff_processor()
ff_ob.ff_framerate(0.5)
ff_ob.ff_endfile("foo")
ff_ob.delete_old_video()
ff_ob.create_new_video()
print("Video process done!\n")
if __name__ == "__main__":
main("@agameoftones_") | [
"[email protected]"
] | |
a9e0478108e5abb89730a9ef9663fb774c017015 | 6a43a12f82d51dd87c15ed097c99bbdcc5da44b8 | /custom_reg_form/old_forms.py | c6813094377e506e77ad1eafa4ea812fd123598f | [] | no_license | eol-virtuallabx/eol_custom_reg_form | 7c5d834460183fc08f6eb8aca2e4598eadea903e | bcc9233281392e916c789a6e244c933b928bf42b | refs/heads/main | 2023-08-01T14:21:24.654672 | 2021-08-31T20:07:24 | 2021-08-31T20:07:24 | 393,120,555 | 0 | 0 | null | 2021-08-10T16:38:37 | 2021-08-05T17:14:52 | Python | UTF-8 | Python | false | false | 3,271 | py | # -*- coding:utf-8 -*-
from .models import ExtraInfo
from django.forms import ModelForm
class ExtraInfoForm(ModelForm):
"""
The fields on this form are derived from the ExtraInfo model in models.py.
"""
def __init__(self, *args, **kwargs):
super(ExtraInfoForm, self).__init__(*args, **kwargs)
self.fields['labx_firstname'].error_messages = {
"required": u"Por favor ingresa Nombres.",
}
self.fields['labx_lastname'].error_messages = {
"required": u"Por favor ingresa Apellidos.",
}
self.fields['labx_rut'].error_messages = {
"required": u"Por favor ingresa RUT.",
}
self.fields['labx_birth_date'].error_messages = {
"required": u"Por favor ingresa Fecha Nacimiento.",
"invalid":u"Fecha no valida",
}
self.fields['labx_gender'].error_messages = {
"required": u"Por favor selecciona Género.",
}
self.fields['labx_phone'].error_messages = {
"required": u"Por favor ingresa Teléfono.",
}
self.fields['labx_country_nac'].error_messages = {
"required": u"Por favor selecciona País de Nacionalidad.",
}
self.fields['labx_part_address'].error_messages = {
"required": u"Por favor ingresa Dirección Particular.",
}
self.fields['labx_part_region'].error_messages = {
"required": u"Por favor selecciona Región Particular.",
}
self.fields['labx_part_provincia'].error_messages = {
"required": u"Por favor selecciona Provincia Particular.",
}
self.fields['labx_part_comuna'].error_messages = {
"required": u"Por favor selecciona Comuna Particular.",
}
self.fields['labx_lab_address'].error_messages = {
"required": u"Por favor ingresa Dirección Laboral.",
}
self.fields['labx_lab_region'].error_messages = {
"required": u"Por favor selecciona Región Laboral.",
}
self.fields['labx_lab_provincia'].error_messages = {
"required": u"Por favor selecciona Provincia Laboral.",
}
self.fields['labx_lab_comuna'].error_messages = {
"required": u"Por favor selecciona Comuna Laboral.",
}
self.fields['labx_work'].error_messages = {
"required": u"Por favor selecciona Profesión/Ocupación.",
}
self.fields['labx_educ_level'].error_messages = {
"required": u"Por favor selecciona Nivel de Estudios.",
}
self.fields['labx_lab_lugar'].error_messages = {
"required": u"Por favor ingresa Institución donde Trabajas Actualmente.",
}
self.fields['labx_lab_type'].error_messages = {
"required": u"Por favor selecciona Tipo Institución en la que Trabajas.",
}
self.fields['labx_lab_rubro'].error_messages = {
"required": u"Por favor selecciona Rubro de la Institución en la que Trabajas.",
}
self.fields['labx_lab_cargo'].error_messages = {
"required": u"Por favor ingresa Cargo.",
}
class Meta(object):
model = ExtraInfo
fields = ('labx_firstname','labx_lastname','labx_rut','labx_birth_date','labx_gender','labx_phone','labx_country_nac','labx_part_address','labx_part_region','labx_part_provincia','labx_part_comuna','labx_lab_address','labx_lab_region','labx_lab_provincia','labx_lab_comuna','labx_work','labx_educ_level','labx_lab_lugar','labx_lab_type','labx_lab_rubro','labx_lab_cargo')
| [
"[email protected]"
] | |
f85a05335cbae03eecb345fa58a4e227ed7aeed9 | aa3e4c5a6e6dbc7af8a22af24906a1832a1651c7 | /train.py | fde4083e839b90dc785ed0bf4a26abbef73b34bf | [] | no_license | Chaudhary-Furqan/starGANv1-Pytorch | 3f02668f014e6413f4bdeda0c2780fa10b9bb95e | 7d0fd4074fd6fa92395bd4068b0166856b858a4d | refs/heads/main | 2023-01-09T16:34:41.853973 | 2020-11-09T10:33:00 | 2020-11-09T10:33:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,527 | py | import torch
import torchvision
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
from torch.utils.data import Dataset
import torch.nn as nn
import tqdm
import os
import time
from torch.nn import init
from arch import generator
from arch import discriminator
import utils
import data_loader
class starGAN(nn.Module):
def __init__(self,args):
super(starGAN, self).__init__()
self.device = torch.device("cuda:"+str(args.cuda_id)+"" if torch.cuda.is_available() else "cpu")
self.G = generator.Generator().to(self.device)
self.D = discriminator.Discriminator().to(self.device)
# self.init_weights(self.G)
# self.init_weights(self.D)
utils.print_networks([self.G, self.D], ['G', 'D'])
self.optim_G = torch.optim.Adam(params=self.G.parameters(),lr=args.g_lr,betas=(args.beta1,args.beta2))
self.optim_D = torch.optim.Adam(params=self.D.parameters(), lr=args.d_lr, betas=(args.beta1,args.beta2))
self.BCE = nn.BCELoss()
self.L1 = nn.L1Loss()
self.train_loader = data_loader.get_loader(args.img_path, args.attr_path, args.mode, args.batch_size, args.crop_size,
args.img_size)
def init_weights(net, gain=0.02):
def init_func(m):
classname = m.__class__.__name__
if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):
init.normal(m.weight.data, 0.0, gain)
if hasattr(m, 'bias') and m.bias is not None:
init.constant(m.bias.data, 0.0)
elif classname.find('BatchNorm2d') != -1:
init.normal(m.weight.data, 1.0, gain)
init.constant(m.bias.data, 0.0)
print('Network initialized with weights sampled from N(0,0.02).')
net.apply(init_func)
def train_D(self,args):
D_loss = []
for i, (x, y) in tqdm.tqdm(enumerate(self.train_loader)):
x = x.to(self.device)
y = y.to(self.device)
rand_index = torch.randperm(y.size(0))
random_label = y[rand_index].to(self.device)
# random_label = torch.randint(0,2,size=(x.size(0),5)).float().to(device)
fake = torch.zeros(x.size(0),1,2,2).to(self.device)
valid = torch.ones(x.size(0),1,2,2).to(self.device)
fake_img = self.G(x,random_label).detach()
fake_src,fake_cls = self.D(fake_img) ###B*1*2*2 B*5
# loss1 = criterion1(fake_src,fake)
loss1 = torch.mean(fake_src)
real_src,real_cls = self.D(x)
# loss2 = criterion1(real_src,valid)
loss2 = -torch.mean(real_src)
# loss3 =0
# for j in range(5):
# loss3 = loss3+criterion1(real_cls[:,j],y[:,j])
loss3 = self.BCE(real_cls,y)
alpha = torch.rand(x.size(0),3,128,128).to(self.device) #添加梯度惩罚项
x_ = alpha*x+(1-alpha)*fake_img
x_.requires_grad_(True)
y_, _ = self.D(x_)
gradients = torch.autograd.grad(outputs=y_, inputs=x_,
grad_outputs=torch.ones(y_.size()).to(self.device),
create_graph=True, retain_graph=True, only_inputs=True)[0]
gradients = gradients.view(gradients.size(0),-1)
gradient_penalty = torch.pow((torch.sum(gradients**2, dim=1) - 1), 2).mean()
loss_D = loss1+loss2+loss3*args.lambda_cls+gradient_penalty*args.lambda_gp
D_loss.append(float(loss_D.item()))
self.optim_D.zero_grad()
loss_D.backward()
self.optim_D.step()
return D_loss
def train_G(self,args):
G_loss = []
for i, (x, y) in tqdm.tqdm(enumerate(self.train_loader)):
x = x.to(self.device)
y = y.to(self.device)
rand_index = torch.randperm(y.size(0))
random_label = y[rand_index].to(self.device)
# random_label = torch.randint(0,2,size=(x.size(0),5)).float().to(device)
fake = torch.zeros(x.size(0),1,2,2).to(self.device)
valid = torch.ones(x.size(0),1,2,2).to(self.device)
fake_img = self.G(x, random_label)
fake_src, fake_cls = self.D(fake_img) ###B*1*2*2 B*5
# loss1 = criterion1(fake_src, valid)
loss1 = -torch.mean(fake_src)
# loss2 = 0
# for j in range(5):
# loss2 = loss2+criterion1(fake_cls[:,j], random_label[:,j])
loss2 = self.BCE(fake_cls,random_label)
loss3 = self.L1(x, self.G(fake_img, y))
loss_G = loss1 + args.lambda_cls*loss2 + loss3*args.lambda_rec
G_loss.append(float(loss_G.item()))
self.optim_G.zero_grad()
loss_G.backward()
self.optim_G.step()
return G_loss
def updata_lr(self):
for param_group in self.optim_G.param_groups:
param_group['lr'] -=param_group['lr']/10000
for param_group in self.optim_D.param_groups:
param_group['lr'] -=param_group['lr']/10000
def train(self,args):
for epoch in range(args.train_epoch):
if (epoch + 1) % 1000 == 0:
self.updata_lr()
D_loss = []
G_loss = []
for j in range(args.n_critic):
D_loss = self.train_D(args)
G_loss = self.train_G(args)
print("epoch:", epoch + 1, "D_loss:", torch.mean(torch.FloatTensor(D_loss)))
print("epoch:", epoch + 1, "G_loss:", torch.mean(torch.FloatTensor(G_loss)))
if (epoch+1)%args.model_save_epoch == 0:
utils.mkdir(args.model_save_dir)
utils.save_checkpoint({'epoch': epoch + 1,
'D': self.D.state_dict(),
'G': self.G.state_dict()},
'%s/latest.ckpt' % (args.model_save_dir))
# utils.save_checkpoint(self.G.state_dict(), os.path.join(args.model_save_dir, 'G_' + str(epoch + 1) + '.pkl'))
# utils.save_checkpoint(self.D.state_dict(), os.path.join(args.model_save_dir, 'D_' + str(epoch + 1) + '.pkl'))
| [
"[email protected]"
] | |
e25a6759957ae1539cb7b8b7a14e01fb8a89ffc5 | 016a7d2dca5dd90987733b1303c61f8e7c4a943d | /launchpad_node1.py | 7786f419b9f4ad57af6df1c8baca28cb77c1099d | [] | no_license | huyrua291996/ros_serial_stm32 | eb4272e69e8d7777c728772da33d7d5e50d3e670 | 31e5e8ca96a9f6dd3d7fb5823dfca04ac22090e4 | refs/heads/master | 2020-04-19T15:18:57.473736 | 2019-05-01T16:53:21 | 2019-05-01T16:53:21 | 168,270,061 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,572 | py | #!/usr/bin/env python
'''
launchpad_node.py - Receive sensor values from Launchpad board and publish as topics
Created September 2014
Copyright(c) 2014 Lentin Joseph
Some portion borrowed from Rainer Hessmer blog
http://www.hessmer.org/blog/
'''
# Python client library for ROS
import rospy
import sys
import time
import math
# This module helps to receive values from serial port
from SerialDataGateway import SerialDataGateway
# Importing ROS data types
from std_msgs.msg import Int16, Int32, Int64, Float32, String, Header, UInt64
# Importing ROS data type for IMU
from sensor_msgs.msg import Imu
from geometry_msgs.msg import Twist
from nav_msgs.msg import Path,Odometry
from tf.transformations import euler_from_quaternion, quaternion_from_euler
# Class to handle serial data from Launchpad and converted to ROS topics
class Launchpad_Class(object):
def __init__(self):
print "Initializing Launchpad Class"
#######################################################################################################################
# Sensor variables
self._Counter = 0
self._dxy = 0
self._dth = 0
self._left_encoder_value = 0
self._right_encoder_value = 0
self._battery_value = 0
self._ultrasonic_value = 0
self._qx = 0
self._qy = 0
self._qz = 0
self._qw = 0
self._left_wheel_speed_ = 0
self._right_wheel_speed_ = 0
self._LastUpdate_Microsec = 0
self._Second_Since_Last_Update = 0
self.robot_heading = 0
#######################################################################################################################
# Get serial port and baud rate of Tiva C Launchpad
port = rospy.get_param("~port", "/dev/ttyS0")
baudRate = int(rospy.get_param("~baudRate", 115200))
#######################################################################################################################
rospy.loginfo("Starting with serial port: " +
port + ", baud rate: " + str(baudRate))
# Initializing SerialDataGateway with port, baudrate and callback function to handle serial data
self._SerialDataGateway = SerialDataGateway(
port, baudRate, self._HandleReceivedLine)
rospy.loginfo("Started serial communication")
#######################################################################################################################
#Subscribers and Publishers
self.dxy = rospy.Publisher('dxy', Float32, queue_size=10)
self.dth = rospy.Publisher('dth', Float32, queue_size=10)
# Publisher for left and right wheel encoder values
self._Left_Encoder = rospy.Publisher('lwheel', Int64, queue_size=10)
self._Right_Encoder = rospy.Publisher('rwheel', Int64, queue_size=10)
# Publisher for Battery level(for upgrade purpose)
self._Battery_Level = rospy.Publisher(
'battery_level', Float32, queue_size=10)
# Publisher for Ultrasonic distance sensor
self._Ultrasonic_Value = rospy.Publisher(
'ultrasonic_distance', Float32, queue_size=10)
# Publisher for IMU rotation quaternion values
#self._qx_ = rospy.Publisher('qx', Float32, queue_size=10)
#self._qy_ = rospy.Publisher('qy', Float32, queue_size=10)
#self._qz_ = rospy.Publisher('qz', Float32, queue_size=10)
#self._qw_ = rospy.Publisher('qw', Float32, queue_size=10)
# Publisher for entire serial data
#self._SerialPublisher = rospy.Publisher(
# 'serial', String, queue_size=10)
#######################################################################################################################
# Subscribers and Publishers of IMU data topic
self.frame_id = '/base_footprint'
self.reach = 0
self.cal_offset = 0.0
self.orientation = 0.0
self.cal_buffer = []
self.cal_buffer_length = 1000
self.imu_data = Imu(header=rospy.Header(frame_id="base_footprint"))
self.imu_data.orientation_covariance = [
1e6, 0, 0, 0, 1e6, 0, 0, 0, 1e-6]
self.imu_data.angular_velocity_covariance = [
1e6, 0, 0, 0, 1e6, 0, 0, 0, 1e-6]
self.imu_data.linear_acceleration_covariance = [
-1, 0, 0, 0, 0, 0, 0, 0, 0]
self.gyro_measurement_range = 150.0
self.gyro_scale_correction = 1.35
#self.imu_pub = rospy.Publisher('imu/data', Imu, queue_size=10)
self.deltat = 0
self.lastUpdate = 0
# New addon for computing quaternion
self.pi = 3.14159
self.GyroMeasError = float(self.pi * (40 / 180))
self.beta = float(math.sqrt(3 / 4) * self.GyroMeasError)
self.GyroMeasDrift = float(self.pi * (2 / 180))
self.zeta = float(math.sqrt(3 / 4) * self.GyroMeasDrift)
self.beta = math.sqrt(3 / 4) * self.GyroMeasError
self.q = [1, 0, 0, 0]
#######################################################################################################################
# Speed subscriber
#self._left_motor_speed = rospy.Subscriber(
# 'left_wheel_speed', Float32, self._Update_Left_Speed)
#self._right_motor_speed = rospy.Subscriber(
# 'right_wheel_speed', Float32, self._Update_Right_Speed)
rospy.Subscriber('cmd_vel', Twist, self._handle_cmd_vel)
#self.sub = rospy.Subscriber('/move_base/DWAPlannerROS/global_plan', Path, self.clbk_path)
#rospy.Subscriber('/odom', Odometry, self.clbk_odom)
#rospy.Timer(rospy.Duration(0.1), self.timerCB)
#rospy.Subscriber('/pure', String, self.clbk_pure)
#self.odom = Odometry()
def clbk_pure(self, msg):
self._WriteSerial(msg.data)
def timerCB(self, event):
odom_msg = "CDN"
odom_msg += "," + str(int(self.odom.pose.pose.position.x * 1000))
odom_msg += "," + str(int(self.odom.pose.pose.position.y * 1000))
ori_li = [self.odom.pose.pose.orientation.x,self.odom.pose.pose.orientation.y,self.odom.pose.pose.orientation.z,
self.odom.pose.pose.orientation.w]
(roll, pitch, yaw) = euler_from_quaternion(ori_li)
odom_msg += "," + str(int(yaw * 10)) + '\n'
self._WriteSerial(odom_msg)
if (self.reach == 1):
self.reach = 0
self.sub = rospy.Subscriber('/move_base/DWAPlannerROS/global_plan',Path, self.clbk_path)
def clbk_odom(self, msg):
self.odom.pose.pose.position.x = msg.pose.pose.position.x
self.odom.pose.pose.position.y = msg.pose.pose.position.y
self.odom.pose.pose.position.z = msg.pose.pose.position.z
self.odom.pose.pose.orientation.x = msg.pose.pose.orientation.x
self.odom.pose.pose.orientation.y = msg.pose.pose.orientation.y
self.odom.pose.pose.orientation.z = msg.pose.pose.orientation.z
self.odom.pose.pose.orientation.w = msg.pose.pose.orientation.w
def clbk_path(self, msg):
size_msg = "PAS,"+str(len(msg.poses))+'\n'
self._WriteSerial(size_msg)
poi_msg = "POI"
#pay_msg = "PAY"
#paa_msg = "PAA"
for pose in msg.poses:
ori_q = pose.pose.orientation
ori_list = [ori_q.x, ori_q.y, ori_q.z, ori_q.w]
(roll, pitch, yaw) = euler_from_quaternion(ori_list)
#path_msg = "CDN,"+str(int(pose.pose.position.x*1000))+","+str(int(pose.pose.position.y*1000))+","+str(int(yaw*10))+'\n'
#self._WriteSerial(path_msg)
poi_msg += ","+str(int(pose.pose.position.x * 1000))
poi_msg += ","+str(int(pose.pose.position.y * 1000))
#paa_msg += ","+str(int(yaw * 10))
poi_msg += '\n'
#pay_msg += '\n'
#paa_msg += '\n'
#time.sleep(2)
self._WriteSerial(poi_msg)
#time.sleep(1)
#self._WriteSerial(pay_msg)
#self.sub.unregister()
#time.sleep(1)
#self._WriteSerial(paa_msg)
#time.sleep(1)
def _handle_cmd_vel(self, msg):
cmd_message = "VES," + str(int(msg.linear.x*1000))+'\n'
self._WriteSerial(cmd_message)
cmd_message = "WHS," + str(int(msg.angular.z*10))+'\n'
self._WriteSerial(cmd_message)
#######################################################################################################################
def _Update_Left_Speed(self, left_speed):
self._left_wheel_speed_ = left_speed.data
rospy.loginfo(left_speed.data)
speed_message = 's %d %d\r' % (
int(self._left_wheel_speed_), int(self._right_wheel_speed_))
self._WriteSerial(speed_message)
# 3
def _Update_Right_Speed(self, right_speed):
self._right_wheel_speed_ = right_speed.data
rospy.loginfo(right_speed.data)
speed_message = 's %d %d\r' % (
int(self._left_wheel_speed_), int(self._right_wheel_speed_))
self._WriteSerial(speed_message)
#######################################################################################################################
# Calculate orientation from accelerometer and gyrometer
def _HandleReceivedLine(self, line):
#rospy.loginfo(line)
#self._Counter = self._Counter + 1
#self._SerialPublisher.publish(
# String(str(self._Counter) + ", in: " + line))
if(len(line) > 0):
lineParts = line.split(',')
try:
if(lineParts[0] == 'DXY'):
self._dxy = float(lineParts[1])
rospy.loginfo("DXY:" + str(self._dxy))
self.dxy.publish(self._dxy)
if(lineParts[0] == 'DTH'):
self._dth = float(lineParts[1])
rospy.loginfo("DTH:" + str(self._dth))
self.dth.publish(self._dth)
if(lineParts[0] == 'REA'):
self.reach = 1
except:
rospy.logwarn("Error in Sensor values")
rospy.logwarn(lineParts)
pass
#######################################################################################################################
def _WriteSerial(self, message):
# self._SerialPublisher.publish(
# String(str(self._Counter) + ", out: " + message))
self._SerialDataGateway.Write(message)
#######################################################################################################################
def Start(self):
rospy.logdebug("Starting")
self._SerialDataGateway.Start()
#######################################################################################################################
def Stop(self):
rospy.logdebug("Stopping")
self._SerialDataGateway.Stop()
#######################################################################################################################
def Subscribe_Speed(self):
a = 1
# print "Subscribe speed"
#######################################################################################################################
def Reset_Launchpad(self):
print "Reset"
reset = 'r\r'
self._WriteSerial(reset)
time.sleep(1)
self._WriteSerial(reset)
time.sleep(2)
#######################################################################################################################
def Send_Speed(self):
# print "Set speed"
a = 3
if __name__ == '__main__':
rospy.init_node('launchpad_ros', anonymous=True)
launchpad = Launchpad_Class()
try:
launchpad.Start()
rospy.spin()
except rospy.ROSInterruptException:
rospy.logwarn("Error in main function")
launchpad.Reset_Launchpad()
launchpad.Stop()
#######################################################################################################################
| [
"[email protected]"
] | |
fa26e61098686a4c42aded2420c936db123a2682 | 2f4bfb038e99f4bf0d8394c27ace357f82e1c82f | /focal_loss_ae/trainer.py | 63160c14d8967dfe13086521a67d395ef4e8063f | [] | no_license | NoListen/DeepWhat | b7b67c54669b8cdb10fafb9131cb0eb6d5f70a3d | 11697841dd1cf7b26ad4da24cdeb97b5a849213e | refs/heads/master | 2023-03-12T19:12:19.093738 | 2021-03-03T14:46:48 | 2021-03-03T14:46:48 | 102,484,638 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,073 | py | import torch
from torch import nn
from collections import namedtuple
from enum import IntEnum
from utils import focal_loss
opt_params = namedtuple("opt_params", ["lr"])
class TrainerPhase(IntEnum):
train = 0
test = 1
class VAELoss(nn.Module):
def __init__(self, kl_loss_threshold=0, use_focal_loss=False, dev="cuda:0"):
super(VAELoss, self).__init__()
self.kl_loss_threshold = torch.tensor(kl_loss_threshold).to("cuda")
self.use_focal_loss = use_focal_loss
def forward(self, x, output, mu, logvar) -> torch.Tensor:
z_size = mu.shape[1]
# logistic regression.
if not self.use_focal_loss:
r_loss = -torch.sum(x * torch.log(output+1e-8) +
(1-x) * torch.log(1-output+1e-8), dim=[1, 2, 3])
r_loss = torch.mean(r_loss)
else:
r_loss = focal_loss(output, x)
kl_loss = -0.5 * \
torch.sum(1+logvar-torch.square(mu)-torch.exp(logvar), axis=1)
kl_loss = torch.max(kl_loss, self.kl_loss_threshold)
kl_loss = torch.mean(kl_loss)
loss = kl_loss + r_loss
return loss
class AETrainer:
def __init__(self, network, loss, opt_params, writer):
self.network = network
self.loss = loss
self.optimizer = None
self.opt_params = opt_params
self.phase = TrainerPhase.train
self.train_step = 0
self.writer = writer
self._build_optimizer()
def set_phase(self, phase: TrainerPhase):
self.phase = phase
def _build_optimizer(self):
self.optimizer = torch.optim.Adam(
self.network.parameters(), self.opt_params.lr)
def train(self, batch):
mu, logvar, output = self.network(batch)
loss = self.loss(batch, output, mu, logvar)
if self.phase == TrainerPhase.train:
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
self.train_step += 1
self.writer.add_scalar("Loss/train", loss, self.train_step)
| [
"[email protected]"
] | |
e0d78fa37ce6d5e30c4f900d0b3f441cc7154c52 | 0647dc7794126ef03e0a78b31c1ed64cc74e7c50 | /script2.py | d4cc094833fe27a8dce25a39d0338ac67fb62968 | [] | no_license | panserbjorn/ControllerV-REP | 71a8fc9f7abd807852f869fc9d2e030fa3db09c3 | d3c1f8adfcbb3fa06279f9e8642126a95b58f7ff | refs/heads/master | 2021-09-11T19:58:02.216214 | 2018-04-11T20:03:50 | 2018-04-11T20:03:50 | 109,732,983 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,565 | py | #Python code for de controller of my thesis
#Author Joaquín Silveira
import vrep
import time
import math
import numpy as np
import matplotlib.pyplot as plt
import secuenceGenerator as sg
#Funciones auxiliares:
def recoverRobotParts(clientID):
#Recover the handles for the motors
LUMRetCode, LUM = vrep.simxGetObjectHandle(clientID, "LUM", vrep.simx_opmode_blocking)
LLMRetCode, LLM = vrep.simxGetObjectHandle(clientID, "LLM", vrep.simx_opmode_blocking)
RUMRetCode, RUM = vrep.simxGetObjectHandle(clientID, "RUM", vrep.simx_opmode_blocking)
RLMRetCode, RLM = vrep.simxGetObjectHandle(clientID, "RLM", vrep.simx_opmode_blocking)
#Recover the handles for other parts of the robot
HeadRetCode, head = vrep.simxGetObjectHandle(clientID, "Head", vrep.simx_opmode_blocking)
return (LUM,LLM,RUM,RLM,head)
def setVelocity(clientID, motorHandle, targetVelocity):
vrep.simxSetJointTargetVelocity(clientID, motorHandle, targetVelocity, vrep.simx_opmode_oneshot)
def readInstructions(fileName):
instructions = []
archivo = open(fileName, "r")
for line in archivo:
instructions.append(line)
archivo.close()
#Remove ending '\n' from strings
for i in range(0,len(instructions)):
instructions[i] = instructions[i].replace('\n', '')
#Parse lines of instructions to list of list of tuples with motor and velocity
parsedInstructions = []
for i in instructions:
velocitySeries = []
splitbycomma = i.split(',')
for j in splitbycomma:
splitbydash = j.split('-')
velocitySeries.append((int((splitbydash[0])), int(splitbydash[1])))
parsedInstructions.append(velocitySeries)
return parsedInstructions
def moveRobot(clientID, robotMovement, LUM, LLM, RUM, RLM):
#This if statement determines which movement the robot sould perform
if (robotMovement == 1):
#Right_contract
lowerSpeed = -1
upperSpeed = 1
vrep.simxSetJointTargetVelocity(clientID, RUM, upperSpeed, vrep.simx_opmode_oneshot)
vrep.simxSetJointTargetVelocity(clientID, RLM, lowerSpeed, vrep.simx_opmode_oneshot)
elif (robotMovement == 2):
#Right_stretch
lowerSpeed = 1
upperSpeed = -1
vrep.simxSetJointTargetVelocity(clientID, RUM, upperSpeed, vrep.simx_opmode_oneshot)
vrep.simxSetJointTargetVelocity(clientID, RLM, lowerSpeed, vrep.simx_opmode_oneshot)
elif (robotMovement == 3):
#Right_ahead
lowerSpeed = 0
upperSpeed = 1
vrep.simxSetJointTargetVelocity(clientID, RUM, upperSpeed, vrep.simx_opmode_oneshot)
vrep.simxSetJointTargetVelocity(clientID, RLM, lowerSpeed, vrep.simx_opmode_oneshot)
elif (robotMovement == 4):
#Right_back
lowerSpeed = 0
upperSpeed = -1
vrep.simxSetJointTargetVelocity(clientID, RUM, upperSpeed, vrep.simx_opmode_oneshot)
vrep.simxSetJointTargetVelocity(clientID, RLM, lowerSpeed, vrep.simx_opmode_oneshot)
elif (robotMovement == 5):
#Left_contract
lowerSpeed = -1
upperSpeed = 1
vrep.simxSetJointTargetVelocity(clientID, LUM, upperSpeed, vrep.simx_opmode_oneshot)
vrep.simxSetJointTargetVelocity(clientID, LLM, lowerSpeed, vrep.simx_opmode_oneshot)
elif (robotMovement == 6):
#Left_stretch
lowerSpeed = 1
upperSpeed = -1
vrep.simxSetJointTargetVelocity(clientID, LUM, upperSpeed, vrep.simx_opmode_oneshot)
vrep.simxSetJointTargetVelocity(clientID, LLM, lowerSpeed, vrep.simx_opmode_oneshot)
elif (robotMovement == 7):
#Left_ahead
lowerSpeed = 0
upperSpeed = 1
vrep.simxSetJointTargetVelocity(clientID, LUM, upperSpeed, vrep.simx_opmode_oneshot)
vrep.simxSetJointTargetVelocity(clientID, LLM, lowerSpeed, vrep.simx_opmode_oneshot)
elif (robotMovement == 8):
#Left_back
lowerSpeed = 0
upperSpeed = -1
vrep.simxSetJointTargetVelocity(clientID, LUM, upperSpeed, vrep.simx_opmode_oneshot)
vrep.simxSetJointTargetVelocity(clientID, LLM, lowerSpeed, vrep.simx_opmode_oneshot)
elif (robotMovement == 9):
#Stop_left
lowerSpeed = 0
upperSpeed = 0
vrep.simxSetJointTargetVelocity(clientID, LUM, upperSpeed, vrep.simx_opmode_oneshot)
vrep.simxSetJointTargetVelocity(clientID, LLM, lowerSpeed, vrep.simx_opmode_oneshot)
elif (robotMovement == 10):
#Stop_right
lowerSpeed = 0
upperSpeed = 0
vrep.simxSetJointTargetVelocity(clientID, RUM, upperSpeed, vrep.simx_opmode_oneshot)
vrep.simxSetJointTargetVelocity(clientID, RLM, lowerSpeed, vrep.simx_opmode_oneshot)
def distancia(puntoA, puntoB):
return abs(puntoA[0]-puntoB[0])+ abs(puntoA[2]-puntoB[2])
def puntoMovil(tiempo):
return ((tiempo*0.088)-1.5,0.03,0.8)
#Código del controlador
'''
TODO Debería mejorar el modo de ejecución por parámetros por consola
TODO Debería agregar un modo más solo para ver las corridas que se encuentran en archivo.txt, porque muchas veces estoy viendo esas para compararlas con las generadas.
'''
def mainLoop(mode):
nombreArchivo = ""
if mode == 'incr':
print("El programa se ejecutará de la manera tradicional")
nombreArchivo = "nuevo.txt"
else:
print("El programa se ejecutará para visualizar las mejores corridas")
nombreArchivo = "archivo.txt"
vrep.simxFinish(-1)
portNumb = 19997
clientID = vrep.simxStart('127.0.0.1', portNumb, True, True, 5000, 5)
if clientID != -1 :
print ("se pudo establecer la conexión con la api del simulador")
#Recover handlers for robot parts
LUM,LLM,RUM,RLM,head = recoverRobotParts(clientID)
#Set Initial Target Velocity to 0
LUMSpeed = 0
LLMSpeed = 0
RUMSpeed = 0
RLMSpeed = 0
setVelocity(clientID,LUM,LUMSpeed)
setVelocity(clientID,LLM,LLMSpeed)
setVelocity(clientID,RUM,RUMSpeed)
setVelocity(clientID,RLM,RLMSpeed)
#Read Instructions from file
instructions = readInstructions(nombreArchivo)
#Set simulation to be Synchonous instead of Asynchronous
vrep.simxSynchronous(clientID, True)
#Setting Time Step to 50 ms (miliseconds)
dt = 0.05
#WARNING!!! - Time step should NEVER be set to custom because it messes up the simulation behavior!!!!
#vrep.simxSetFloatingParameter(clientID, vrep.sim_floatparam_simulation_time_step, dt, vrep.simx_opmode_blocking)
#Start simulation if it didn't start
vrep.simxStartSimulation(clientID,vrep.simx_opmode_blocking)
#This are for controlling where I'm in the instructions while simulation is running
secuenceIndex = 0
runInfo = []
headSecuenceTrace = []
lowerSpeed, upperSpeed = 0, 0
secuenceTimes = []
for secuence in instructions:
instructionIndex = 0
headTrace = []
extraPoints = 0
runtime = 0
for instruction in secuence:
instructionIndex+=1
moveRobot(clientID,instruction[0], LUM, LLM, RUM, RLM)
#This is what makes the simulation Synchronous
initialTime = 0.0
actualTime = initialTime + dt
runtime += dt
#Condition to stop simulation
hasFallen = False
vrep.simxSynchronousTrigger(clientID)
#Retrive head position
headPosition = vrep.simxGetObjectPosition(clientID, head, -1, vrep.simx_opmode_oneshot)
#headTrace.append((headPosition,runtime))
while((actualTime - initialTime) < (instruction[1]/10)):
#Make de simulation run one step (dt determines how many seconds pass by between step and step)
vrep.simxSynchronousTrigger(clientID)
#Advance time in my internal counter
actualTime = actualTime + dt
runtime += dt
#TODO do I still need the extra points for time?
extraPoints += dt
#Retrive head position
headPosition = vrep.simxGetObjectPosition(clientID, head, -1, vrep.simx_opmode_oneshot)
headTrace.append((headPosition,runtime))
#Verify that the model hasn't fallen
if(headPosition[0] == 0 and headPosition[1][2]<0.65):
print("Posición de la cabeza:", headPosition[1][2])
print("tiempo: ", runtime)
hasFallen = True
break
if(hasFallen):
break
if (hasFallen):
print ("Secuence: ", secuenceIndex, " has fallen!!")
else:
print("Secuence: ", secuenceIndex, " has finished without falling")
print(secuence)
secuenceTimes.append(extraPoints)
#Here I collect the data for the whole secuence
#filter not valid positions
headTrace = list(filter(lambda x: x[0][0] == 0,headTrace))
#add to whole run trace info
headSecuenceTrace.append(headTrace)
fallenFactor = 0
if hasFallen:
fallenFactor = -50
#format: (index, score, headtrace((valid,(x,y,z),time))
runInfo.append((secuenceIndex, sum(map(lambda x:math.log(1/distancia(x[0][1],puntoMovil(x[1]))),headTrace))+fallenFactor,headTrace))
print("puntaje obtenido",runInfo[-1][1])
secuenceIndex+=1
#Stop_Start_Simulation
vrep.simxStopSimulation(clientID, vrep.simx_opmode_blocking)
#This sleep is necesary for the simulation to finish stopping before starting again
time.sleep(2)
vrep.simxStartSimulation(clientID, vrep.simx_opmode_blocking)
#Should always end by finishing connetions
vrep.simxStopSimulation(clientID, vrep.simx_opmode_blocking)
vrep.simxFinish(clientID)
'''
TODO Ahora tengo que graficar la posición x de la cabez en las mejores 10 corridas y también el punto móvil
'''
#Visualization of the info collected
sortedScore = sorted(runInfo, key=lambda x:x[1], reverse=True)
filteredBestSec = []
for i in range(0,10):
filteredBestSec.append(instructions[sortedScore[i][0]])
sg.recordSecuences(filteredBestSec, "mejores.txt")
# print(runInfo)
sg.recordRunOutput(runInfo, "salida.txt")
if mode == 'incr':
newLot = []
for x in filteredBestSec:
newLot = newLot + sg.generateNewSec(x,10)
sg.recordSecuences(filteredBestSec + newLot, "nuevo.txt")
else:
# TODO graficar solo las mejores 10 y el punto móvil.
for h in range(0,10):
# print ("esto debería ser el tiempo: ", sortedScore[h][2][0])
# print ("Que es esto?: ", sortedScore[h][0], " ---- ", sortedScore[h][1])
# # print ("esto debería ser el valor de x: ", sortedScore[h][2][0][1])
# print (list(map(lambda x:x[2][1],sortedScore[h])))
# print (list(map(lambda x:x[2][0][1][0],sortedScore[h])))
plt.plot(list(map(lambda x:x[1],sortedScore[h][2])),list(map(lambda x:x[0][1][0],sortedScore[h][2])))
#plt.plot(list(map(lambda x:x[1],h)),list(map(lambda x:x[0][1][1],sortedScore[h])))
#plt.plot(list(map(lambda x:x[1],h)),list(map(lambda x:x[0][1][2],sortedScore[h])))
timeList = list(map(lambda x:x[1],sortedScore[0][2]))
plt.plot(timeList,list(map(lambda x: puntoMovil(x)[0],timeList)))
plt.show()
else:
print ("No se pudo establecer conexión con la api del simulador")
print ("Verificar que el simulador está abierto")
# for x in range(0,25):
# print("Vuelta número: ",x)
# mainLoop('incr')
mainLoop('visual')
| [
"[email protected]"
] | |
b9f4c1ee05705264d5c2b945f6b48035a79bbcbf | 1f0e94ab15f00dc02f94420e374a8328723f95bb | /Sorting/mergesort.py | 4166122c89fa0b5c6eb011241c94d80bb26101b3 | [] | no_license | fdelaros/Lab2_202010 | 5bdf5a7a75ddad5de384be43779c7b8ff12b5909 | 7a4ba7edd52f61ec98b405d33f63fbb38a1c2947 | refs/heads/master | 2022-11-23T17:15:06.525186 | 2020-07-28T19:12:32 | 2020-07-28T19:12:32 | 280,005,240 | 0 | 0 | null | 2020-07-15T23:35:41 | 2020-07-15T23:35:40 | null | UTF-8 | Python | false | false | 2,350 | py | import config as cf
from ADT import list as lt
from DataStructures import listnode as node
def mergeSort (lst, lessfunction):
lstaux = lt.subList (lst, 1, lt.size(lst)) #crear una lista auxiliar del mismo tamaNo y con el mismo contenido
sort(lst, lstaux, lessfunction, 1, lt.size(lst))
def sort (lst, auxlst, lessfunction, lo, hi):
"""
Ordena los elementos de la lista lst que se encuentren en el rango [lo, hi].
lessfunction es la función de comparación entre los elementos de la lista.
auxlst es una lista auxiliar de mismo tamaño que la lista original para ayudar al ordenamiento
"""
if hi <= lo:
return
mid = lo + (hi - lo) // 2
sort(lst, auxlst, lessfunction, lo, mid) #ordenamiento mitad de lst en el rango [lo, mid]
sort(lst, auxlst, lessfunction, mid+1, hi) #ordenamiento mitad de lst en el rango [mid+1, hi]
merge(lst, auxlst, lessfunction, lo, mid, hi) #mezcla de las dos mitades ordenadas de lst
def merge(lst, auxlst, lessfunction, lo, mid, hi):
"""
ordenar el rango [lo, hi] en lst mezclando sus mitades ordenadas en los rangos [lo, mid] y [mid+1, hi]
"""
if hi <= lo:
return
lt.copy(lst, auxlst, lo, hi) #copiar la sublista de lst en el rango [lo, hi] a la lista auxlst en el mismo rango
i = lo #recorre la midad ordenada en auxlist en el rango [lo, mid]
j = mid+1 #recorre la midad ordenada en auxlist en el rango [mid+1, hi]
for k in range(lo, hi+1):
if i > mid: #ya se pasaron los elementos de la mitad ordenada [lo, mid] a lst
lt.changeInfo(lst, k, lt.getElement(auxlst,j))
j += 1
elif j > hi: #ya se pasaron los elementos de la mitad ordenada [mid+1, hi] a lst
lt.changeInfo(lst, k, lt.getElement(auxlst,i))
i += 1
elif lessfunction(lt.getElement(auxlst,j), lt.getElement(auxlst,i)): # auxlst[j] < auxlst[i]
lt.changeInfo(lst, k, lt.getElement(auxlst,j))
j += 1
else: # auxlst[i] <= auxlst[j]
lt.changeInfo(lst, k, lt.getElement(auxlst,i))
i += 1 | [
"[email protected]"
] | |
dd9e1a964765428a5202a1ba8da704daab76d517 | 6fc5050ee0f983edeed08e7ec6a3949c5425194a | /problem_003/problem_003.py | 72cd273036f43e45318bcebf030bf8db70ff378c | [] | no_license | HKervadec/Project-Euler | 474d8d840475b19e7955a8324fbe90138143a02a | a2a537904596599459e2cbc265834045b9182067 | refs/heads/master | 2020-12-25T18:31:59.100141 | 2017-08-25T11:35:36 | 2017-08-25T11:35:36 | 9,522,644 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 964 | py | #!/usr/bin/env python3
# https://projecteuler.net/
# The prime factors of 13195 are 5, 7, 13 and 29.
# What is the largest prime factor of the number 600851475143 ?
from time import time
from tools.utils import measure_func
from tools.prime import prime
def solve_2():
number = 600851475143
i = 3
maxPrimeFactor = 1 # Btw, 1 is not prime.
lim = number**0.5
while i < lim:
if not (number % i):
if prime(i):
maxPrimeFactor = max(maxPrimeFactor, i)
if prime(number/i):
maxPrimeFactor = max(maxPrimeFactor, number/i)
i += 2
return maxPrimeFactor
def solve():
number = 600851475143
i = 2
while i < number:
if not number % i:
number //= i
i = 2
else:
i += 1
return number
# **************************************
if __name__ == "__main__":
measure_func(solve)
measure_func(solve_2)
| [
"[email protected]"
] | |
165c12e14da405727a906d3e419de0a1d7201897 | 37279a0ac59daab785f1d1583851eb3f5dad30e1 | /workflow/oer_analysis/oer_scaling/oer_scaling.py | 4e2df538b6004a0966af53cec1e2bb3ef03c7538 | [
"MIT"
] | permissive | flash-jaehyun/PROJ_IrOx_OER | 50269e34f428e4c54b34afe3e07aae77e6ff82fc | e0b3ef8e69deeb41d62059a92f466477238efbed | refs/heads/master | 2023-02-09T05:45:11.969160 | 2020-12-30T23:18:54 | 2020-12-30T23:18:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,594 | py | # -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:light
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.4.2
# kernelspec:
# display_name: Python [conda env:PROJ_irox_oer] *
# language: python
# name: conda-env-PROJ_irox_oer-py
# ---
# # Creating OER scaling plot from raw data, not my modules
# ---
# ### Import Modules
# + jupyter={"source_hidden": true}
import os
print(os.getcwd())
import sys
import time; ti = time.time()
import copy
import numpy as np
from sklearn.metrics import mean_squared_error
import plotly.graph_objs as go
from plotly.subplots import make_subplots
# #########################################################
from proj_data import layout_shared as layout_shared_main
from proj_data import scatter_shared_props as scatter_shared_props_main
from proj_data import stoich_color_dict
# #########################################################
from methods import get_df_features_targets
# #########################################################
from layout import layout
# -
from methods import isnotebook
isnotebook_i = isnotebook()
if isnotebook_i:
from tqdm.notebook import tqdm
verbose = True
show_plot = True
else:
from tqdm import tqdm
verbose = False
show_plot = False
# ### Read Data
df_features_targets = get_df_features_targets()
# + active=""
#
#
# +
df_features_targets = df_features_targets.dropna(subset=[
("targets", "g_o", ""),
("targets", "g_oh", ""),
])
# df_targets = df_features_targets["targets"].dropna()
df_targets = df_features_targets["targets"]
x_array = df_targets["g_oh"]
y_array = df_targets["g_o"]
color_array = df_features_targets["format"]["color"]["stoich"]
# +
# print(111 * "TEMP | ")
# print("")
# df_features_targets.columns.tolist()
# df_tmp = df_features_targets.loc[:,
# [
# ('format', 'color', 'stoich'),
# ('data', 'stoich', ''),
# ]
# ]
# for index_i, row_i in df_tmp.iterrows():
# tmp = 42
# color_i = row_i["format"]["color"]["stoich"]
# stoich_i = row_i["data"]["stoich"][""]
# # print("# ", stoich_i, " '", color_i, "'", sep="")
# if stoich_i == "AB2":
# if color_i == "#46cf44":
# tmp = 42
# # print("AB2 Good")
# else:
# print("AB2 Bad")
# if stoich_i == "AB3":
# if color_i == "#42e3e3":
# tmp = 42
# # print("AB3 Good")
# else:
# print("AB3 Bad")
# -
# ### Fitting data
x_poly = np.linspace(x_array.min() - 0.2, x_array.max() + 0.2, num=50)
# +
z_1 = np.polyfit(
x_array, y_array,
1,
)
p_1 = np.poly1d(z_1)
print(
"Polynomial Fit (1st order): ",
"\n",
[np.round(i, 3) for i in list(z_1)],
sep="")
rmse_i = mean_squared_error(
y_array,
[p_1(i) for i in x_array],
squared=False)
print(
"RMSE (1st order): ",
rmse_i,
sep="")
y_poly_1 = [p_1(i) for i in x_poly]
# +
z_2 = np.polyfit(
x_array, y_array,
2,
)
p_2 = np.poly1d(z_2)
print(
"Polynomial Fit (2nd order): ",
"\n",
[np.round(i, 3) for i in list(z_2)],
sep="")
rmse_i = mean_squared_error(
y_array,
[p_2(i) for i in x_array],
squared=False)
print(
"RMSE (2nd order): ",
rmse_i,
sep="")
y_poly_2 = [p_2(i) for i in x_poly]
# -
# ### Layout
# +
layout_shared = copy.deepcopy(layout_shared_main)
layout_master = layout_shared.update(
layout
)
layout_master["xaxis"]["range"] = [x_array.min() - 0.2, x_array.max() + 0.2]
layout_master["title"] = "*O vs *OH Scaling Plot (1st and 2nd order fits)"
# -
# ### Instantiate scatter plots
# +
trace_poly_1 = go.Scatter(
x=x_poly, y=y_poly_1,
mode="lines",
line_color="grey",
name="poly_fit (1st order)",
)
trace_poly_2 = go.Scatter(
x=x_poly, y=y_poly_2,
mode="lines",
line_color="black",
name="poly_fit (2nd order)",
)
# +
trace = go.Scatter(
x=x_array, y=y_array,
mode="markers",
# marker_color=color_i,
marker_color=color_array,
name="main",
)
scatter_shared_props = copy.deepcopy(scatter_shared_props_main)
trace = trace.update(
scatter_shared_props,
overwrite=False,
)
# -
# ### Instantiate figure
# +
fig = go.Figure(
data=[
trace_poly_1,
trace_poly_2,
trace,
],
layout=layout_master,
)
fig.write_json(
os.path.join(
os.environ["PROJ_irox_oer"],
"workflow/oer_analysis/oer_scaling",
"out_plot/oer_scaling__O_vs_OH_plot.json"))
# -
if show_plot:
fig.show()
# + active=""
# There seems to be some nonlinearities at weak bonding energies
# +
# assert False
# + active=""
#
#
#
#
#
#
#
#
#
#
#
# -
# ## Plotting Histogram
df_ab2 = df_features_targets[df_features_targets["data"]["stoich"] == "AB2"]
df_ab3 = df_features_targets[df_features_targets["data"]["stoich"] == "AB3"]
print(
# "\n",
"AB2 ΔG_O Mean: ",
df_ab2["targets"]["g_o"].mean(),
"\n",
"AB3 ΔG_O Mean: ",
df_ab3["targets"]["g_o"].mean(),
"\n",
"diff: ",
df_ab3["targets"]["g_o"].mean() - df_ab2["targets"]["g_o"].mean(),
"\n",
40 * "-",
"\n",
"AB2 ΔG_OH Mean: ",
df_ab2["targets"]["g_oh"].mean(),
"\n",
"AB3 ΔG_OH Mean: ",
df_ab3["targets"]["g_oh"].mean(),
"\n",
"diff: ",
df_ab3["targets"]["g_oh"].mean() - df_ab2["targets"]["g_oh"].mean(),
sep="")
# +
shared_layout_hist = go.Layout(
yaxis_title="N",
barmode="overlay",
)
shared_trace_hist = dict(
opacity=0.55,
nbinsx=15,
)
# -
# ### Trying to get the number of data in bins to set y-axis range (NOT WORKING SO FAR)
# +
# y_targets_list = [
# df_ab2.targets.g_oh,
# # df_ab3.targets.g_oh,
# # df_ab2.targets.g_o,
# # df_ab3.targets.g_o,
# ]
# max_num_data_list = []
# for y_target_i in y_targets_list:
# width = (y_target_i.max() - y_target_i.min()) / shared_trace_hist["nbinsx"]
# num_data_in_sliver_list = []
# for i in np.linspace(y_target_i.min(), y_target_i.max(), 200):
# i_upper = i + width / 2
# i_lower = i - width / 2
# print(i_upper, i_lower)
# y_in_sliver = y_target_i[
# (y_target_i < i_upper) & \
# (y_target_i > i_lower)
# ]
# num_data_in_sliver = y_in_sliver.shape[0]
# #print(num_data_in_sliver)
# num_data_in_sliver_list.append(num_data_in_sliver)
# max_num_data_in_sliver_i = np.max(num_data_in_sliver_list)
# print(max_num_data_in_sliver_i)
# print("")
# max_num_data_list.append(max_num_data_in_sliver_i)
# max_max_num_in_sliver = np.max(max_num_data_list)
# max_max_num_in_sliver
# # width =
# (y_target_i.max() - y_target_i.min()) / shared_trace_hist["nbinsx"]
# # y_targets_list[0]
# # y_in_sliver =
# y_target_i[
# (y_target_i < 0.6) & \
# (y_target_i > 0.4)
# ]
# -
# ### Instantiate *OH plots
# +
# %%capture
fig_oh = go.Figure()
fig_oh.add_trace(
go.Histogram(
x=df_ab2.targets.g_oh,
marker_color=stoich_color_dict["AB2"],
name="AB2",
).update(dict1=shared_trace_hist)
)
fig_oh.add_trace(
go.Histogram(
x=df_ab3.targets.g_oh,
marker_color=stoich_color_dict["AB3"],
name="AB3",
).update(dict1=shared_trace_hist)
)
# #########################################################
# Layout manipulation
layout_shared = copy.deepcopy(layout_shared_main)
layout_shared.update(
go.Layout(
# title="TEMP01",
xaxis=go.layout.XAxis(
title="ΔG<sub>*OH</sub>",
),
),
overwrite=False,
)
layout_shared.update(shared_layout_hist)
fig_oh.update_layout(dict1=layout_shared)
# -
# ### Instantiate *O plots
# +
# %%capture
fig_o = go.Figure()
fig_o.add_trace(
go.Histogram(
x=df_ab2.targets.g_o,
marker_color=stoich_color_dict["AB2"],
name="AB2",
).update(dict1=shared_trace_hist)
)
fig_o.add_trace(
go.Histogram(
x=df_ab3.targets.g_o,
marker_color=stoich_color_dict["AB3"],
name="AB3",
).update(dict1=shared_trace_hist)
)
# #########################################################
# Layout manipulation
layout_shared = copy.deepcopy(layout_shared_main)
layout_shared.update(
go.Layout(
# title="",
xaxis=go.layout.XAxis(
title="ΔG<sub>*O</sub>",
),
),
overwrite=False,
)
layout_shared.update(shared_layout_hist)
fig_o.update_layout(dict1=layout_shared)
# -
# ### Instantiate subplot
# +
# %%capture
fig = make_subplots(rows=1, cols=2)
for trace_i in fig_o.data:
fig.add_trace(
trace_i,
row=1, col=1,
)
for trace_i in fig_oh.data:
fig.add_trace(
trace_i,
row=1, col=2,
)
fig.update_layout(
height=600,
width=1000,
title_text="ΔG<sub>*O</sub> and ΔG<sub>*OH</sub> Histograms (eV)",
)
fig.update_layout(layout_shared_main)
fig.update_layout(shared_layout_hist)
fig.update_xaxes(
fig_o.layout["xaxis"],
row=1, col=1,
overwrite=False,
)
fig.update_xaxes(
fig_oh.layout["xaxis"],
row=1, col=2,
overwrite=False,
)
y_range_ub = 45
fig.update_yaxes(
fig_o.layout["yaxis"].update(
range=[0, y_range_ub],
),
row=1, col=1,
overwrite=False,
)
fig.update_yaxes(
fig_oh.layout["yaxis"].update(
range=[0, y_range_ub],
),
row=1, col=2,
overwrite=False,
)
# -
# ### Saving plot to json
fig.write_json(
os.path.join(
os.environ["PROJ_irox_oer"],
"workflow/oer_analysis/oer_scaling",
"out_plot/oer_scaling__O_OH_histogram.json"))
if show_plot:
fig.show()
# #########################################################
print(20 * "# # ")
print("All done!")
print("Run time:", np.round((time.time() - ti) / 60, 3), "min")
print("oer_scaling.ipynb")
print(20 * "# # ")
# #########################################################
# + active=""
#
#
#
# + jupyter={"source_hidden": true}
# stoich_color_dict["AB2"]
# # go.Histogram?
# + jupyter={"source_hidden": true}
# df_features_targets.head()
# df_features_targets.columns.tolist()
# + jupyter={"source_hidden": true}
# color_i
# + jupyter={"source_hidden": true}
# print(len(x_array))
# print(len(y_array))
# print(len(color_i))
# + jupyter={"source_hidden": true}
# df_targets.sort_values("g_oh")
| [
"[email protected]"
] | |
0c766cc55b91d47cac770418a24cf7240e8dc7e0 | b61994fee5c28b1e2d0c25c61e90af1f8bbbfd85 | /mymodules/myclass/augmentations/mydataAugmentation_difSpe.py | 4a779b281c7aae0aed9236c9d3d4b1947615054e | [] | no_license | ZhaoJinyu96/DNN_RGBStokes | f083bde8559303b3112dfc1b62c35575031018df | 31c1d7cd963632a9b1aacd14dd46bb360b02c75f | refs/heads/master | 2020-07-17T07:11:05.493759 | 2019-09-06T00:42:05 | 2019-09-06T00:42:05 | 205,968,325 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,499 | py | # -*- coding: utf-8 -*-
"""
Created on Thu Mar 7 15:38:09 2019
@author: 0000145046
"""
# add path
import sys
import pathlib
current_dir = pathlib.Path(__file__).resolve().parent
sys.path.append(str(current_dir.joinpath("../../")))
import cv2
import numpy as np
import mymodules.myclass.augmentations.dataAugmentationBaseClass as dataAugmentationBaseClass
from mymodules.myutils.polarutils import normalize_s0s1s2
from mymodules.myutils.polarutils import de_normalize_s0s1s2
from mymodules.myutils.polarutils import calc_fourPolar_from_stokes
from mymodules.myutils.polarutils import calc_s0s1s2_from_fourPolar
class DataAugmentation_difSpe_Base(dataAugmentationBaseClass.DataAugmentationBaseClass):
def __init__(self, path_list, bit, operationName):
super().__init__(path_list, bit, operationName)
def make_savedir(self, path):
# "./hoge/hoge" -> "./hoge/hoge_noise"
basePath = pathlib.Path(str(path) + "_" + self.operationName)
basePath.joinpath("gt/").mkdir(parents=True)
basePath.joinpath("gt_difspe/").mkdir(parents=True)
basePath.joinpath("train/").mkdir(parents=True)
# "./hoge/hoge_noise/gt_difspe/hoge_noise_spe"
path_spe = basePath.joinpath("gt_difspe/", basePath.name+"_spe")
path_dif = basePath.joinpath("gt_difspe/", basePath.name+"_dif")
path_spe.joinpath("train/").mkdir(parents=True)
path_dif.joinpath("train/").mkdir(parents=True)
def load_s0s1s2(self, path, names):
s0_name = path.joinpath("train/",names[0])
s1_name = path.joinpath("train/",names[1])
s2_name = path.joinpath("train/",names[2])
s0_img = cv2.imread(str(s0_name), -1).astype(np.float32)
s1_img = cv2.imread(str(s1_name), -1).astype(np.float32)
s2_img = cv2.imread(str(s2_name), -1).astype(np.float32)
s0_img, s1_img, s2_img = normalize_s0s1s2(s0_img, s1_img, s2_img, self.max)
return s0_img, s1_img, s2_img
def load_image(self, path, names):
s0_img, s1_img, s2_img = self.load_s0s1s2(path, names) # NOTICE, they are normalized!!
gt_name = path.joinpath("gt/",names[3])
gt_img = cv2.imread(str(gt_name), -1).astype(np.float32)
path_spe = path.joinpath("gt_difspe/", path.name+"_spe")
path_dif = path.joinpath("gt_difspe/", path.name+"_dif")
s0spe_img, s1spe_img, s2spe_img = self.load_s0s1s2(path_spe, names)
s0dif_img, s1dif_img, s2dif_img = self.load_s0s1s2(path_dif, names)
return (s0_img, s1_img, s2_img,
gt_img,
s0spe_img, s1spe_img, s2spe_img,
s0dif_img, s1dif_img, s2dif_img)
def save_image(self, savepath, names, imgs):
s0_img, s1_img, s2_img, gt_img, s0spe_img, s1spe_img, s2spe_img, s0dif_img, s1dif_img, s2dif_img = imgs
cv2.imwrite(
str(savepath.joinpath("train/",names[0])),
s0_img.astype(self.bit))
cv2.imwrite(
str(savepath.joinpath("train/",names[1])),
s1_img.astype(self.bit))
cv2.imwrite(
str(savepath.joinpath("train/",names[2])),
s2_img.astype(self.bit))
cv2.imwrite(
str(savepath.joinpath("gt/",names[3])),
gt_img.astype(self.bit))
path_spe = savepath.joinpath("gt_difspe/", savepath.name+"_spe")
path_dif = savepath.joinpath("gt_difspe/", savepath.name+"_dif")
cv2.imwrite(
str(path_spe.joinpath("train/",names[0])),
s0spe_img.astype(self.bit))
cv2.imwrite(
str(path_spe.joinpath("train/",names[1])),
s1spe_img.astype(self.bit))
cv2.imwrite(
str(path_spe.joinpath("train/" + names[2])),
s2spe_img.astype(self.bit))
cv2.imwrite(
str(path_dif.joinpath("train/",names[0])),
s0dif_img.astype(self.bit))
cv2.imwrite(
str(path_dif.joinpath("train/",names[1])),
s1dif_img.astype(self.bit))
cv2.imwrite(
str(path_dif.joinpath("train/",names[2])),
s2dif_img.astype(self.bit))
# augmentations
class FlipImage_Horizontal(DataAugmentation_difSpe_Base):
def __init__(self, path_list, bit, operationName):
super().__init__(path_list, bit, operationName)
def augmentation(self, imgs):
s0, s1, s2, gt, s0spe, s1spe, s2spe, s0dif, s1dif, s2dif = imgs
# flip coordinate of gt image
gt[:,:,2] = gt[:,:,2] * (-1) + self.max
gt[:,:,2][gt[:,:,0]==0] = 0 # z=0 means background area
# flip images
s0 = s0[:,::-1,:]
s1 = s1[:,::-1,:]
s2 = s2[:,::-1,:] * -1
s0, s1, s2 = de_normalize_s0s1s2(s0, s1, s2, self.max)
gt = gt[:,::-1,:]
s0spe = s0spe[:,::-1,:]
s1spe = s1spe[:,::-1,:]
s2spe = s2spe[:,::-1,:] * -1
s0spe, s1spe, s2spe = de_normalize_s0s1s2(s0spe, s1spe, s2spe, self.max)
s0dif = s0dif[:,::-1,:]
s1dif = s1dif[:,::-1,:]
s2dif = s2dif[:,::-1,:] * -1
s0dif, s1dif, s2dif = de_normalize_s0s1s2(s0dif, s1dif, s2dif, self.max)
return (s0, s1, s2,
gt,
s0spe, s1spe, s2spe,
s0dif, s1dif, s2dif)
class Blur(DataAugmentation_difSpe_Base):
def __init__(self, path_list, bit, operationName, square):
super().__init__(path_list, bit, operationName)
self.square = square
def augmentation(self, imgs):
s0, s1, s2, gt, s0spe, s1spe, s2spe, s0dif, s1dif, s2dif = imgs
s0 = np.clip(cv2.blur(s0, self.square), 0, 1)
s1 = np.clip(cv2.blur(s1, self.square), -1, 1)
s2 = np.clip(cv2.blur(s2, self.square), -1, 1)
s0, s1, s2 = de_normalize_s0s1s2(s0, s1, s2, self.max)
s0spe, s1spe, s2spe = de_normalize_s0s1s2(s0spe, s1spe, s2spe, self.max)
s0dif, s1dif, s2dif = de_normalize_s0s1s2(s0dif, s1dif, s2dif, self.max)
return (s0, s1, s2,
gt,
s0spe, s1spe, s2spe,
s0dif, s1dif, s2dif)
class Noise(DataAugmentation_difSpe_Base):
def __init__(self, path_list, bit, operationName, sigma):
super().__init__(path_list, bit, operationName)
self.sigma = sigma
def augmentation(self, imgs):
s0, s1, s2, gt, s0spe, s1spe, s2spe, s0dif, s1dif, s2dif = imgs
# reconstruct cosine curve
i_0, i_45, i_90, i_135 = calc_fourPolar_from_stokes(s0, s1, s2)
i_list = np.array([i_0, i_45, i_90, i_135])
# add gaussian noise
row, col, ch = s0.shape
for inum in range(i_list.shape[0]):
gauss = np.random.normal(0, self.sigma, (row, col, ch))
gauss = gauss.reshape(row, col, ch)
i_list[inum] += gauss
# calc new stokes images
i_0 = i_list[0]
i_45 = i_list[1]
i_90 = i_list[2]
i_135 = i_list[3]
s0, s1, s2 = calc_s0s1s2_from_fourPolar(i_0, i_45, i_90, i_135)
s0 = np.clip(s0, 0, 1)
s1 = np.clip(s1, -1, 1)
s2 = np.clip(s2, -1, 1)
s0, s1, s2 = de_normalize_s0s1s2(s0, s1, s2, self.max)
s0spe, s1spe, s2spe = de_normalize_s0s1s2(s0spe, s1spe, s2spe, self.max)
s0dif, s1dif, s2dif = de_normalize_s0s1s2(s0dif, s1dif, s2dif, self.max)
return (s0, s1, s2,
gt,
s0spe, s1spe, s2spe,
s0dif, s1dif, s2dif)
class Intensity(DataAugmentation_difSpe_Base):
def __init__(self, path_list, bit, operationName):
super().__init__(path_list, bit, operationName)
def augmentation(self, imgs):
s0, s1, s2, gt, s0spe, s1spe, s2spe, s0dif, s1dif, s2dif = imgs
"""mn = np.mean(s0) * self.max
newmn = np.random.normal(mn, mn/4)
gain = newmn / mn
if gain < 0:
raise ValueError("gain should be larger than 0!")"""
# random number [a, b)
a, b = 0.5, 1.5
gain = (b - a) * np.random.rand() + a
# reconstruct cosine curve
i_0, i_45, i_90, i_135 = calc_fourPolar_from_stokes(s0, s1, s2)
i_0spe, i_45spe, i_90spe, i_135spe = calc_fourPolar_from_stokes(
s0spe, s1spe, s2spe)
i_0dif, i_45dif, i_90dif, i_135dif = calc_fourPolar_from_stokes(
s0dif, s1dif, s2dif)
# multiply gain
i_0 *= gain
i_45 *= gain
i_90 *= gain
i_135 *= gain
i_0spe *= gain
i_45spe *= gain
i_90spe *= gain
i_135spe *= gain
i_0dif *= gain
i_45dif *= gain
i_90dif *= gain
i_135dif *= gain
# return back to s0s1s2
s0 = i_0 + i_90
s1 = i_0 - i_90
s2 = i_45 - i_135
s0spe = i_0spe + i_90spe
s1spe = i_0spe - i_90spe
s2spe = i_45spe - i_135spe
s0dif = i_0dif + i_90dif
s1dif = i_0dif - i_90dif
s2dif = i_45dif - i_135dif
# clipping
s0 = np.clip(s0, 0, 1)
s1 = np.clip(s1, -1, 1)
s2 = np.clip(s2, -1, 1)
s0spe = np.clip(s0spe, 0, 1)
s1spe = np.clip(s1spe, -1, 1)
s2spe = np.clip(s2spe, -1, 1)
s0dif = np.clip(s0dif, 0, 1)
s1dif = np.clip(s1dif, -1, 1)
s2dif = np.clip(s2dif, -1, 1)
# denormalize
s0, s1, s2 = de_normalize_s0s1s2(s0, s1, s2, self.max)
s0spe, s1spe, s2spe = de_normalize_s0s1s2(s0spe, s1spe, s2spe, self.max)
s0dif, s1dif, s2dif = de_normalize_s0s1s2(s0dif, s1dif, s2dif, self.max)
return (s0, s1, s2,
gt,
s0spe, s1spe, s2spe,
s0dif, s1dif, s2dif)
| [
"[email protected]"
] | |
0231a213452a73d2fdd8c4b936d9e3e39bf3e38a | b31939b7a0f0ecd5beedf9923a7801d320d59006 | /flathon/fbs_base.py | 858948e5cb92f98b91e3603a80bac1004fc85e6e | [
"MIT"
] | permissive | fish-ken/flathon | a0816e3dbd0ef72b3b9fac53aa563ce5b00e9a4a | ba4b580f9777fca977746c530988c3053392ce21 | refs/heads/main | 2023-07-31T16:13:54.503553 | 2021-09-30T09:38:08 | 2021-09-30T09:38:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 108 | py | import abc
class FbsBase(metaclass=abc.ABCMeta):
@abc.abstractmethod
def make(self):
pass
| [
"[email protected]"
] | |
5f2f23462b6afd7196c6acdafcd0d84f0fddcde9 | 78113982a16e56537cd11c64cd3034abc9d595d5 | /polindromeornot.py | 3b5f3fbfc5ed64fa3ceee625d18ff970800a18a5 | [] | no_license | rarnjithkumar/pro2 | ff623039c606abec47b37c6b8ef4691888b8d6f3 | 59f1e4b344218ac1e061fcd8d32cede1e4e76bf2 | refs/heads/master | 2022-02-06T07:23:28.957031 | 2019-08-01T09:38:22 | 2019-08-01T09:38:22 | 197,951,450 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 130 | py | n=int(input())
sum=n
f=0
while(n>0):
iyps=n%10
f=f*10+iyps
n=n//10
if(sum==f):
print('yes')
else:
print('no')
| [
"[email protected]"
] | |
e7cbf7f5f296c665afe04f9b2bdb65a83f0e22d5 | 217493eca509bd7ae2280e1aaa1ac0fa9e0fc491 | /project/project/urls.py | 64a272d6daca90086d366ac3624e0dbf3a3f416b | [] | no_license | sonnynomnom/myrepo | 6792702a91547fed11edbacb502d249751c84336 | ea09c52e18a198307972d188078c40eb301d528e | refs/heads/master | 2021-01-21T08:24:47.648353 | 2017-05-17T22:48:15 | 2017-05-17T22:48:15 | 91,626,668 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,041 | py | """project URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from django.conf.urls.static import static
from django.conf import settings
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^', include('records.urls', namespace = "records")),
]+ static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
| [
"[email protected]"
] | |
790b490a8f37ce4083ad561170267da2bb515c8c | decc8806df35aa8519166b04abc8cb44f046da05 | /Round 1/9.GlobalsAndBooleans/startscript.py | c1066034ad751d5d366d52937e28b9612da4d74e | [
"Unlicense"
] | permissive | beetlesoup/udemy-python-scripting-a-car | 2b24925769c918141bba03430c8768d2c2439ffc | ae41491161821a8f4e63fc86c368a71bb3d6cc15 | refs/heads/main | 2023-01-19T10:37:36.727543 | 2020-11-28T10:57:22 | 2020-11-28T10:57:22 | 316,712,751 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,224 | py |
from browser import document, window, alert, aio
print("hello world!")
env = window.env
env.step(0)
async def sleep():
await aio.sleep(0.2)
def isRoadAheadAvailable():
lidarState = env.getState().lidar
lidarState.reverse()
carFrontLidarLine = lidarState[2] # Radar line rigth in front of us
if (carFrontLidarLine == [0, 0, 0, 0, 0]): # We're about to hit something!
return False
else: # Road is clear!
return True
async def Turn(direction):
if (direction == 'RIGHT'):
env.step(-1)
elif (direction == 'LEFT'):
env.step(1)
await sleep()
######################
# Start Learning Here
######################
def MoveManyStepsForward(numberOfSteps):
for everySingleNumberInTheRange in range(numberOfSteps):
env.step(0)
async def MoveForwardAndThenTurn():
while (isRoadAheadAvailable() == True):
MoveManyStepsForward(30)
await sleep()
await Turn('LEFT')
async def main():
while (True):
await MoveForwardAndThenTurn()
print("Program End!")
#######################
## End Learning Here
#######################
aio.run(main()) | [
"[email protected]"
] | |
37c1014dc5c50ae71f64d41027a663cb77c4d721 | 7be8fcccbc566df92f49253964d2889ce1d4768c | /clients/face_detect_client.py | a8f4464997d5e174b9f6ea875e5e21a7a5989377 | [
"MIT"
] | permissive | miwaliu/face-services | 69ff97e8fc03668346f46847277de602fe409b5c | 1db24e0da2ce984a7c7f4bb6aa8917b31265198a | refs/heads/master | 2020-04-25T15:26:17.069879 | 2019-02-26T03:37:04 | 2019-02-26T03:37:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,754 | py | import grpc
import sys
import argparse
from getpass import getpass
import services.grpc.face_detect_pb2
import services.grpc.face_detect_pb2_grpc
from services.grpc.face_common_pb2 import ImageRGB
from snet_sdk import Snet
def make_request(filenamem, request_cls=None):
if request_cls is None:
request_cls = ImageRGB
with open(filename, 'rb') as infile:
data = infile.read()
return request_cls(content=data)
def find_faces(stub, image_fn):
return stub.FindFace(make_request(image_fn))
def run_snet(image_fn, private_key):
snet = Snet(private_key=private_key)
client = snet.client("snet", "face-detect")
stub = client.grpc.face_detect_pb2_grpc.FaceDetectStub(client.grpc_channel)
request = make_request(image_fn, client.grpc.translate_pb2.Request)
return stub.FindFace(request)
def run_local(image_fn, endpoint):
channel = grpc.insecure_channel(endpoint)
stub = services.grpc.face_detect_pb2_grpc.FaceDetectStub(channel)
print("-------------- FindFaces --------------")
return find_faces(stub, image_fn)
def main():
script_name = sys.argv[0]
parser = argparse.ArgumentParser(prog=script_name)
subcommand = parser.add_subparsers(dest='subcommand')
default_endpoint = 'http://localhost:50051'
parser.add_argument("--endpoint", help="jsonrpc server to connect to", default=default_endpoint,
type=str, required=False)
parser.add_argument("--snet", help="call service on SingularityNet - requires configured snet CLI",
action='store_true')
parser.add_argument("--image", help="path to image to apply face detection on",
type=str, required=True)
parser.add_argument("--algorithm", help="face detection algorithm to request",
type=str, default="dlib_cnn", action='store',
choices=['dlib_cnn','dlib_hog','haar_cascade'])
parser.add_argument("--out-image", help="Render bounding box on image and save",
type=str, required=False)
args = parser.parse_args(sys.argv[1:])
endpoint = args.endpoint
if args.snet:
private_key = getpass("Enter private key: ")
response = run_snet(args.image, private_key)
else:
response = run_local(args.image, args.endpoint)
print(response)
if args.out_image:
print("Rendering bounding box and saving to {}".format(args.out_image))
import cv2
image = cv2.imread(args.image)
for d in response['faces']:
cv2.rectangle(image, (d['x'], d['y']), (d['x'] + d['w'], d['y'] + d['h']), (0, 255, 0), 2)
cv2.imwrite(args.out_image, image)
if __name__ == '__main__':
main() | [
"[email protected]"
] | |
6904be9b78af333bc697a4294e9e16882663dcdb | c4a752b1291b0ac9c45928f5ebe6c46c9cbd0d5a | /django_policies/exceptions.py | 61c8ad3e083c2a8adad562894befad70dc477595 | [
"MIT"
] | permissive | fedenko/django-policies | f3947afa848450e52bf04c9581a566c1c5521ef1 | 97a2ed378af20d4c9cf2638893d3f319003e173a | refs/heads/main | 2023-04-13T14:33:35.992518 | 2021-04-25T16:03:57 | 2021-04-25T16:03:57 | 361,409,966 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 99 | py | class PolicyAlreadyRegistered(Exception):
pass
class PolicyNotRegistered(KeyError):
pass
| [
"[email protected]"
] | |
91201306752bc20bb7b45c0efd9d597c69cd071d | 2c8c41953db9b515cea9378a2c2117e47758c80f | /env/bin/wheel | bed604f5c711ce0ac26c53882804f15b154e1759 | [] | no_license | gaozhuolu/Python-Flask-AngularJS-Demo | 4625936409111033db4b259e83f7dc60bbb196a1 | 1738e7c3494d13cfddeddea204815ce9484ef751 | refs/heads/master | 2021-01-15T22:34:45.610871 | 2017-08-14T18:27:41 | 2017-08-14T18:27:41 | 99,902,892 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 244 | #!/opt/Python-Flask-AngularJS-Demo/env/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from wheel.tool import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
] | ||
0860d276052bc12fd05143c2aadb63ae15dca322 | c83e0cb3cccd664e2596ce608534c848f5e1aa6c | /TestGeo.py | af6cffdb8006526ff787136e0a282027de1d9fb2 | [] | no_license | MirsadHTX/Temp | 6509fbbf9931d6230d02c044b0acf04b093a4abb | 9190916c7689365f791eaaa5c999d03d44c006d8 | refs/heads/master | 2020-04-11T14:24:57.504751 | 2019-01-13T15:38:47 | 2019-01-13T15:38:47 | 161,854,193 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 136 | py | import geopandas
df = geopandas.read_file(geopandas.datasets.get_path('nybb'))
ax = df.plot(figsize=(10, 10), alpha=0.5, edgecolor='k') | [
"[email protected]"
] | |
aa0f46a7562108f2618e94a48fd65c03e0153c46 | 6ea1c11d8546bb5ac531a56120d0e29e9da25e3a | /examinator/scratch/test_joblib.py | d70f360cfff4a1365d84464e5ce5197d6ed74f14 | [
"MIT"
] | permissive | brl0/examinator | 2bd6ad62187a0283eaa7e4213c3a0c1ec4226148 | 52706108eea65b854f056fd53e17fef5c8dccbe8 | refs/heads/master | 2023-01-13T03:43:55.292714 | 2019-09-29T17:03:36 | 2019-09-29T17:03:36 | 211,695,739 | 0 | 0 | MIT | 2022-12-26T20:48:00 | 2019-09-29T16:56:59 | Jupyter Notebook | UTF-8 | Python | false | false | 1,352 | py | import sys
import os
import time
from pathlib import Path
from examinator import *
from multiprocessing import Queue
from joblib import Parallel, delayed
import asyncio
WORKERS = 8
LOGURU_ENQ = True
LOG_ON = True
LOG_LEVEL = "DEBUG"
basepaths = ['..']
basepaths = map(Path, basepaths)
file_q = Queue()
def joblib_proc(path):
if path.is_dir():
[*map(file_q.put, filter(Path.is_file, path.iterdir()))]
[*map(joblib_proc, filter(Path.is_dir, path.iterdir()))]
elif path.is_file():
file_q.put(path)
else:
dbg(f"Item is not a file or dir: {str(path)}")
def get_item():
while not file_q.empty():
yield file_q.get()
def proc_item(item):
log.debug(f"pid: {os.getpid()}")
log.debug(f"Processing item: {str(item)}")
result = asyncio.run(proc_file(item))
log.debug(f"Result: {result}")
return result
def main():
s = time.perf_counter()
[*map(joblib_proc, basepaths)]
results = Parallel(n_jobs=WORKERS, require='sharedmem')(map(delayed(proc_item), get_item()))
pp(results)
elapsed = time.perf_counter() - s
log.info(f"{__file__} executed in {elapsed:0.2f} seconds.".format())
dbg('\n\nFIN\n\n')
return 0
if __name__ == "__main__":
log = start_log(LOG_ON, LOG_LEVEL, mp=LOGURU_ENQ)
dbg = log.debug
sys.exit(main()) # pragma: no cover
| [
"[email protected]"
] | |
899b81818d8f74f3bee4c7e2334d4d9090a0628c | 96eaab9d51545ca68441da29979779339b939233 | /tree/cart.py | 8aced3f42850c42394d74f636d536f01e8def698 | [] | no_license | wangkainlp/MachineLearning | c85ad605f21656313deaa20394152109534a2700 | efeaf4b15ab3e32ff4df8567679b8f55258d36cd | refs/heads/master | 2021-04-15T03:08:18.478331 | 2019-11-25T11:04:26 | 2019-11-25T11:04:26 | 126,273,787 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,782 | py | #coding:utf-8
import sys
import numpy as np
# from tree_node import TreeNode
from tree_node import *
from ccp import *
def variance(numList):
if len(numList) <= 1:
return 0.
avg = 1.0 * sum(numList) / len(numList)
varSum = sum(map(lambda i : (i - avg) ** 2, numList))
return varSum
def dataVariance(data, fIdx, sp):
feaLabelPair = [ (it[fIdx], it[-1]) for it in data ]
leftList, rightList = [], []
for pair in feaLabelPair:
if pair[0] <= sp:
leftList.append(pair[1])
else:
rightList.append(pair[1])
return variance(leftList) + variance(rightList)
def findBestSpFea(data, fIdx):
feaList = []
for it in data:
feaList.append(it[fIdx])
feaSet = set(feaList)
uniFeaList = list(feaSet)
uniFeaList.sort()
minVar, minSp = np.inf, np.inf
if len(uniFeaList) <= 1:
return minSp, minVar
spList = []
for i in range(1, len(uniFeaList), 1):
spList.append( (uniFeaList[i - 1] + uniFeaList[i]) / 2.0 )
for sp in spList:
var = dataVariance(data, fIdx, sp)
if var < minVar:
minVar = var
minSp = sp
return minSp, minVar
def findBestSp(data, feaSize):
curFeaList = []
minFea, minSp, minVar = np.inf, np.inf, np.inf
for fIdx in range(feaSize):
sp, var = findBestSpFea(data, fIdx)
if var < minVar:
minFea = fIdx
minVar = var
minSp = sp
return minFea, minSp, minVar
# 节点覆盖的最小数据量
minCoverNum = 3
maxDepth = 5
maxLeaves = 30
def cart_loop(data, root, feaSize):
newNode = TreeNode(root, None, None)
newNode.size = len(data)
newNode.value = 1.0 * sum(map(lambda x : x[-1], data)) / len(data)
# 节点覆盖的最小数据量
if len(data) <= minCoverNum:
newNode.itemIds = map(lambda x : x[-2], data)
return newNode
minFea, minSp, minVar = findBestSp(data, feaSize)
if minFea == np.inf:
newNode.itemIds = map(lambda x : x[-2], data)
return newNode
newNode.depth = root.depth + 1
if newNode.depth >= maxDepth:
newNode.itemIds = map(lambda x : x[-2], data)
return newNode
newNode.idx = minFea
newNode.split = minSp
print minFea, minSp, newNode.depth
leftData = []
rightData = []
for it in data:
if it[minFea] <= minSp:
leftData.append(it)
else:
rightData.append(it)
newNode.left = cart_loop(leftData, newNode, feaSize)
newNode.right = cart_loop(rightData, newNode, feaSize)
return newNode
def cart_main(data):
feaSize = len(data[0]) - 2
itemSize = len(data)
print "feaSize:%d dataSize:%d" % (feaSize, itemSize)
tmpRoot = TreeNode(None, None, None)
tmpRoot.depth = 0
root = cart_loop(data, tmpRoot, feaSize)
root.father = None
# id
level = [root]
nodeId = 0
while len(level) > 0:
newLevel = []
for node in level:
if node.left:
newLevel.append(node.left)
if node.right:
newLevel.append(node.right)
#visit
node.id = nodeId
nodeId += 1
print nodeId
level = newLevel
# ccp
treeList = ccp(data, root)
'''
g_t = getCost(data, root)
treeList = []
treeList.append(root)
tmpTree = root
it = 0
while tmpTree.left and tmpTree.right:
print "iteratror:", it
g_t = getCost(data, tmpTree)
newTree = cloneTree(tmpTree)
ccp(data, newTree)
treeList.append(newTree)
tmpTree = newTree
it += 1
# return root, newTree
'''
return treeList
if __name__ == '__main__':
pass
| [
"[email protected]"
] | |
15f29ca53b049d9f402631c910ef1bf5da7b06bd | a2b4021104477a791e6e813fecfae76b739b9e0e | /firstapp/forms.py | 4df704f2bdccd49c97f9f283495340c94b278b9e | [] | no_license | mainuddinrussel73/Flea_Mart | 5ada456c536376d084f6937099ae821f76a736f8 | 47382715a4ca4228fba87073bd9c5fe22e0e3bae | refs/heads/master | 2021-05-12T03:41:33.893609 | 2019-04-30T03:42:09 | 2019-04-30T03:42:09 | 117,624,397 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 519 | py | from django.contrib.auth.models import User
from django import forms
from firstapp.models import UserProfileInfo
from django.views.generic.edit import UpdateView
class UserForm(forms.ModelForm):
password = forms.CharField(widget=forms.PasswordInput())
class Meta():
model = User
fields = ('username','email','password')
class UserProfileInform(forms.ModelForm):
class Meta():
model = UserProfileInfo
fields = ('address','portfolio','profilepic','coverpic')
| [
"[email protected]"
] | |
6d47fc113aa0e87777b1fe5c74c0a6126631db97 | 218cdd10062f7c1c859c9a2e51c048884d5b1c4f | /4/item.py | 767f84af4d3e7b3a4d2505c4570356412da806ff | [] | no_license | Jecosine/python-couse | b13c32fe86e29d6cba5af771a6e2d16ebb7388c8 | 4ddabc3b47df8c8cc01c56da87682259545553b3 | refs/heads/master | 2020-07-25T05:43:35.840681 | 2019-12-10T05:12:25 | 2019-12-10T05:12:25 | 208,184,613 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 658 | py | s = [10, 12, 15, 11, 14]
w = [210, 240, 270, 230, 300]
items = {}
for i, j in zip(s, w):
v = j / i
items[v] = i
sorted_value = sorted(items.keys(), key = lambda k:k, reverse = True)
method = {}
for i in sorted_value:
method[i] = [0, int(i*items[i])]
# print(sorted_value)
# print(items)
result = 0
total = 48
while(total):
for i in sorted_value:
if items[i]:
method[i][0] += 1
items[i] -= 1
result += i
break
total -= 1
print("Method:")
for i in method.keys():
print("Get {}g of value {}".format(method[i][0], method[i][1]))
# print(result)
print("Finally: {}".format(result)) | [
"[email protected]"
] | |
46401c4b722d15dc36268249a4d8ee52d0eed3aa | a94c7b5e47d0880858b422d5d6dd3ed32f6c4ca7 | /Temp_UI/cpu_ui.py | a8306df705219088341bee454aeaf41c85df7af6 | [] | no_license | YuriyGoryachkin/prototype | 22a540d56cfad43c65f5b110e49875795fc4f319 | e65e4caa5c67c2d0e5c75cfd540fd6f3bacab377 | refs/heads/master | 2020-04-23T08:11:50.577624 | 2019-02-17T16:00:42 | 2019-02-17T16:00:42 | 170,882,761 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 99 | py | import cpuinfo
import psutil
print(cpuinfo.get_cpu_info()['hz_actual'])
print(psutil.cpu_freq())
| [
"[email protected]"
] | |
a93d3736a5ccc63da05905c27982a9691a5a88c9 | 251b9ca6e4bab3e55146aad9376dbdac878b0bfd | /solr/app.py | 1bf018290933fffb6fe9a201d6446dd0c83bc1f9 | [
"MIT"
] | permissive | romanchyla/solr-service | 1637bea95b3bdb3976de33c8d583a5bb77d37e41 | 5df2f8a0afcc191b94ee1917053a5b3155df181c | refs/heads/master | 2021-01-18T00:57:32.681050 | 2015-01-26T14:39:19 | 2015-01-26T14:39:19 | 29,889,261 | 0 | 0 | null | 2015-01-27T00:23:25 | 2015-01-27T00:23:25 | null | UTF-8 | Python | false | false | 914 | py | import os
from flask import Blueprint
from flask import Flask, g
from views import StatusView, Resources, Tvrh, Search, Qtree
from flask.ext.restful import Api
def _create_blueprint_():
return Blueprint(
'solr',
__name__,
static_folder=None,
)
def create_app(blueprint_only=False):
app = Flask(__name__, static_folder=None)
app.url_map.strict_slashes = False
app.config.from_pyfile('config.py')
try:
app.config.from_pyfile('local_config.py')
except IOError:
pass
blueprint = _create_blueprint_()
api = Api(blueprint)
api.add_resource(StatusView,'/status')
api.add_resource(Resources,'/resources')
api.add_resource(Tvrh,'/tvrh')
api.add_resource(Search,'/query')
api.add_resource(Qtree,'/qtree')
if blueprint_only:
return blueprint
app.register_blueprint(blueprint)
return app
if __name__ == "__main__":
app = create_app()
app.run(debug=True)
| [
"[email protected]"
] | |
dbb97f0cbc36f2bfd81ed8cc7c03df74b429d7e7 | dfe2a52a1c36a28a8bf85af7efd42380d980b773 | /virtual/lib/python3.6/site-packages/social/tests/backends/test_skyrock.py | 6c9bc03d42c8bfa986caebbc4d6173b6d3d79df5 | [
"MIT"
] | permissive | virginiah894/Instagram-clone | 2c2a15d89fcdb25b22bd60428cf84a01f3bd553c | 4d8abe7bafefae06a0e462e6a47631c2f8a1d361 | refs/heads/master | 2022-12-10T06:56:21.105357 | 2020-01-07T14:14:50 | 2020-01-07T14:14:50 | 229,394,540 | 3 | 0 | MIT | 2022-12-08T03:23:40 | 2019-12-21T07:41:19 | Python | UTF-8 | Python | false | false | 1,343 | py | import json
from social.p3 import urlencode
from social.tests.backends.oauth import OAuth1Test
class SkyrockOAuth1Test(OAuth1Test):
backend_path = 'social.backends.skyrock.SkyrockOAuth'
user_data_url = 'https://api.skyrock.com/v2/user/get.json'
expected_username = 'foobar'
access_token_body = json.dumps({
'access_token': 'foobar',
'token_type': 'bearer'
})
request_token_body = urlencode({
'oauth_token_secret': 'foobar-secret',
'oauth_token': 'foobar',
})
user_data_body = json.dumps({
'locale': 'en_US',
'city': '',
'has_blog': False,
'web_messager_enabled': True,
'email': '[email protected]',
'username': 'foobar',
'firstname': 'Foo',
'user_url': '',
'address1': '',
'address2': '',
'has_profile': False,
'allow_messages_from': 'everybody',
'is_online': False,
'postalcode': '',
'lang': 'en',
'id_user': 10101010,
'name': 'Bar',
'gender': 0,
'avatar_url': 'http://www.skyrock.com/img/avatars/default-0.jpg',
'nb_friends': 0,
'country': 'US',
'birth_date': '1980-06-10'
})
def test_login(self):
self.do_login()
def test_partial_pipeline(self):
self.do_partial_pipeline()
| [
"[email protected]"
] | |
78345d7f47403a964273439346fc6f8a74a0103e | 8e42bbd69a3ce337b09bb9691e8f10889c2568a4 | /fimpute2geno.py | 97c6bb4615affc75c646efee8ee086f3ffb24f75 | [] | no_license | haraldgrove/snptranslate | c348ebdee9744767574ad486d5dc43d9f91bd984 | 79323a846ce0007ebd8a71f654a96caa97169b07 | refs/heads/master | 2020-04-06T06:56:26.603323 | 2016-09-05T00:39:24 | 2016-09-05T00:39:24 | 20,091,416 | 1 | 1 | null | 2016-07-14T00:55:49 | 2014-05-23T07:56:02 | Python | UTF-8 | Python | false | false | 3,509 | py | #!/usr/bin/env python
# Version 1.0
# from __future__ import division, print_function
import sys
import argparse
def readMarkers(markerfile):
"""
Columns options:
name,position,allele1,allele2 [,chromosome] (BEAGLE)
chromosome,name,gendist,position (PLINK)
name
"""
def trans(s):
if s in ['A','1']: return '1'
if s in ['C','2']: return '2'
if s in ['G','3']: return '3'
if s in ['T','4']: return '4'
return '0'
with open(markerfile,'r') as fin:
mark = {'marklist':[]}
count = 0
for line in fin:
if line.startswith('#'): continue
l = line.strip().split()
if len(l) == 0: continue
name,position,a1,a2,chrom,rank,alias = '0',0,None,None,'0',0,None
if name == 'marklist':
sys.stderr.write('"marklist" is not a legal markername\n')
sys.exit(1)
if len(l) >= 7: # Plink MAP, with three more columns showing reference and alternative alleles and an alias
chrom,name,gendist,position,a1,a2,alias = l[0],l[1],l[2],l[3],l[4],l[5],l[6]
elif len(l) == 6: # Plink MAP, with two more columns showing major and minor alleles
chrom,name,gendist,position,a1,a2 = l[0],l[1],l[2],l[3],l[4],l[5]
else:
raise Exception('Map file requires columns 5 and 6 to be marker alleles\n')
if name not in mark:
mark[name] = {'chrom':chrom,
'pos':position,
'a1':trans(a1),
'a1x':0,
'a2':trans(a2),
'a2x':0,
'rank':count,
'alias': alias}
count += 1
mark['marklist'].append(name)
return mark
def convertFile(args):
def trans(a,m1,m2):
if a == '0': return m1+m1
if a == '1': return m1+m2
if a == '2': return m2+m2
if a == '3': return m1+m2
if a == '4': return m2+m1
return '00'
mark = readMarkers(args.mapfile)
with open(args.infile,'r') as fin, open(args.output,'w') as fout:
fout.write('#\t%s\n' % ('\t'.join([m for m in mark['marklist'] if mark[m]['chrom']==args.chrom])))
fin.next()
for line in fin:
l = line.strip().split()
if len(l) < 1: continue
animal,chip,geno = l
father,mother = '0','0'
fout.write('%s\t%s\t%s' % (animal,father,mother))
for i,m in enumerate(mark['marklist']):
if mark[m]['chrom'] != args.chrom: continue
g = trans(geno[i],mark[m]['a1'],mark[m]['a2'])
fout.write('\t%s\t%s' % (g[0],g[1]))
fout.write('\n')
def main():
parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter,
description='Converts from FImpute to Genos')
parser.add_argument('-i','--infile', help='FImpute file')
parser.add_argument('-o','--output',help='Output file')
parser.add_argument('-v','--verbose',help='Prints runtime info')
parser.add_argument('-m','--mapfile',help='Map file')
parser.add_argument('-p','--ped',help='Pedigree')
parser.add_argument('-c','--chrom',help='Chromosome')
args = parser.parse_args()
convertFile(args)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
af30bd9609124f4e5b5d8875b39026c3d7f66fae | b0691f14d2c03435beec9ac7f9d045eceac01f93 | /ML-Research/Archives/download_lung_data.py | 414ede9c75f82b3fd5a2f841f643300cda8d7738 | [
"Apache-2.0"
] | permissive | haichandar/MachineLearning | 148b04014897aa6c556ada071ae1ca8efcdb40a5 | 658e65ea0a4bc5bd98fd1db5f8b513d9e2e84e2b | refs/heads/master | 2022-01-07T17:38:00.262584 | 2019-05-18T16:46:05 | 2019-05-18T16:46:05 | 166,527,765 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 578 | py | from urllib.request import urlopen
import os
urls = {'pneumothorax_test':'https://www.dropbox.com/s/x74ykyivipwnozs/pneumothorax_test.h5?dl=1',
'pneumothorax_train':'https://www.dropbox.com/s/pnwf67qzztd1slc/pneumothorax_train.h5?dl=1'}
data_dir = '.\Lung_Data\\'
for (name,url) in urls.items():
if not os.path.isfile(data_dir+name+'.h5'):
print('Downloading '+name+'...')
u = urlopen(url)
data = u.read()
u.close()
with open(data_dir+name+'.h5', "wb") as f :
f.write(data)
print('Files have been downloaded.') | [
"[email protected]"
] | |
54c4dc43a68b2bd716177955582c0550c3a14108 | a9cb9599d6114186ce8b07c234e9fe561f47d933 | /start_docker_agent.py | 2cf5597c36c1eacdaade46b01f07e7e553585ed0 | [] | no_license | mstefferson/prefect-demo | 9a91a65b40225fd67259571d17182109713c957f | d10cfbdf5ad03c09486e6b7235964ee5bdc30060 | refs/heads/master | 2023-07-19T07:10:34.121458 | 2021-08-30T00:26:44 | 2021-08-30T00:26:44 | 400,655,266 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 184 | py | import os
from prefect.agent.docker import DockerAgent
volumes = []
# volumes = [f"{os.getcwd()}:/mnt"]
DockerAgent(labels=["docker"], volumes=volumes, show_flow_logs=True).start()
| [
"[email protected]"
] | |
0739b43c2a2b79b8090050a8acbd7aadc5e93df2 | 341e9638416d6687344ed2ef2ebb70cf107ea429 | /python/recommendation/mmoe.py | 56821d878dc5464ae4cc6589ca676d1cc5c8e314 | [] | no_license | xiaoSUM/PyTorch-On-Angel | 2825d9afb750d710e4b63afa69eca0a8c83857f9 | cbb7f09deb4e3ee390afc1f04321eae6dc1a1022 | refs/heads/master | 2023-07-27T17:07:47.598975 | 2021-09-11T06:59:22 | 2021-09-11T06:59:22 | 385,549,679 | 0 | 0 | null | 2021-07-13T09:31:20 | 2021-07-13T09:31:19 | null | UTF-8 | Python | false | false | 6,142 | py | from __future__ import print_function
import argparse
from torch import Tensor
from typing import List
import torch
import torch.nn.functional as F
class MMOE(torch.nn.Module):
def __init__(self, input_dim, experts_num, experts_out, experts_hidden, towers_hidden, tasks):
super(MMOE, self).__init__()
# params
self.loss_fn = torch.nn.BCELoss()
self.input_dim = input_dim
self.experts_num = experts_num
self.experts_out = experts_out
self.experts_hidden = experts_hidden
self.towers_hidden = towers_hidden
self.tasks = tasks
self.softmax = torch.nn.Softmax(dim=1)
"""input layers embedding"""
# bias
self.bias = torch.nn.Parameter(torch.zeros(1, 1))
# weights
self.weights = torch.nn.Parameter(torch.zeros(1, 1))
# expert_weight
self.w_expert = [torch.nn.Parameter(torch.zeros(input_dim, experts_hidden)),torch.nn.Parameter(torch.zeros(1, experts_hidden)), \
torch.nn.Parameter(torch.zeros(experts_hidden,experts_out)),torch.nn.Parameter(torch.zeros(1, experts_out))]*experts_num
# gates_weight
self.w_gates = [torch.nn.Parameter(torch.zeros(input_dim, experts_num))]*tasks
# tower_weight
self.w_towers = [torch.nn.Parameter(torch.zeros(experts_out, towers_hidden)),torch.nn.Parameter(torch.zeros(1, towers_hidden)),\
torch.nn.Parameter(torch.zeros(towers_hidden,1)),torch.nn.Parameter(torch.zeros(1, 1))]*tasks
# mats
self.mats = self.w_expert + self.w_gates+self.w_towers
# init
for i in self.mats:
torch.nn.init.xavier_uniform_(i)
def forward_(self,batch_size, index, feats, values,mats):
# type: (int, Tensor, Tensor, Tensor,List[Tensor]) -> Tensor
index = index.view(-1)
values = values.view(1, -1)
# get the experts output
w_expert = mats[0:self.experts_num*4]
expers_outs = []
for w_expert0,b_expert0,w_expert1,b_expert1 in zip(w_expert[0::4], w_expert[1::4],w_expert[2::4],w_expert[3::4]):
w_expert0 = F.embedding(feats, w_expert0)
srcs = w_expert0.mul(values.view(-1,1)).transpose_(0,1)
expert_out = torch.zeros(self.experts_hidden,batch_size, dtype=torch.float32)
index_expert = index.repeat(self.experts_hidden).view(self.experts_hidden, -1)
expert_out.scatter_add_(1, index_expert, srcs).transpose_(0,1)
expert_out = expert_out + b_expert0
expert_out = torch.relu(expert_out)
expert_out = expert_out @ w_expert1
expert_out = expert_out + b_expert1
expers_outs.append(expert_out)
expers_out_tensor = torch.stack(expers_outs)
#get the gates output
# w_gates = mats.pop(self.tasks)
w_gates = mats[self.experts_num*4:self.experts_num*4+self.tasks]
srcs = [F.embedding(feats,w_gate).mul(values.view(-1,1)).transpose_(0,1) for w_gate in w_gates]
index_gate = index.repeat(self.experts_num).view(self.experts_num, -1)
gates_out = torch.zeros(self.experts_num,batch_size, dtype=torch.float32)
gates_outs = [gates_out.scatter_add_(1,index_gate,src) for src in srcs]
gates_outs = [self.softmax(gates_out)for gates_out in gates_outs]
# towers_input = []
towers_input = [(g.unsqueeze(2).expand(-1, -1, self.experts_out)) * expers_out_tensor for g in gates_outs]
towers_input = [torch.sum(ti, dim=0) for ti in towers_input]
# get the final output from the towers
w_towers = mats[self.experts_num*4+self.tasks:]
final_output = []
for w_tower0,b_tower0, w_tower1,b_tower1,i in zip(w_towers[0::4], w_towers[1::4],w_towers[2::4], w_towers[3::4],range(self.tasks)):
tower_out = towers_input[i] @ w_tower0
tower_out = tower_out + b_tower0
tower_out = torch.relu(tower_out)
tower_out = tower_out @ w_tower1
tower_out = tower_out + b_tower1
tower_out = torch.sigmoid(tower_out)
final_output.append(tower_out)
# get the output of the towers, and stack them
final_output = torch.stack(final_output, dim=1)
return final_output
def forward(self, batch_size: int, index, feats, values):
return self.forward_(batch_size, index, feats, values, self.mats)
@torch.jit.export
def loss(self, output, targets):
return self.loss_fn(output, targets)
@torch.jit.export
def get_type(self):
return "BIAS_WEIGHT_EMBEDDING_MATS"
@torch.jit.export
def get_name(self):
return "mmoe"
def main():
mmoe = MMOE(FLAGS.input_dim, FLAGS.experts_num, FLAGS.experts_out, FLAGS.experts_hidden, FLAGS.towers_hidden, FLAGS.tasks)
# mmoe = MMOE(input_dim=5, experts_num=3, experts_out=4, experts_hidden=2, towers_hidden=2, tasks=2)
mmoe_script_module = torch.jit.script(mmoe)
mmoe_script_module.save("mmoe.pt")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.register("type", "bool", lambda v: v.lower() == "true")
parser.add_argument(
"--input_dim",
type=int,
default=-1,
help="data input dim."
)
parser.add_argument(
"--experts_num",
type=int,
default=-1,
help="experts num"
)
parser.add_argument(
"--experts_out",
type=int,
default=-1,
help="experts out dim"
)
parser.add_argument(
"--experts_hidden",
type=int,
default=-1,
help="experts hidden dim"
)
parser.add_argument(
"--towers_hidden",
type=int,
default=-1,
help="towers hidden dim"
)
parser.add_argument(
"--tasks",
type=int,
default=-1,
help="tasks num"
)
FLAGS, unparsed = parser.parse_known_args()
main()
# python mmoe.py --input_dim 5 --experts_num 3 --experts_out 4 --experts_hidden 2 --towers_hidden 2 --tasks 2
# train.py model = mmoe.MMOE(dim,3,4,2,2,1) | [
"[email protected]"
] | |
c296bcf5d763803370519dbc7b0cfa134d9b4fc7 | fd3f0fdc6af4d0b0205a70b7706caccab2c46dc0 | /0x08-python-more_classes/1-rectangle.py | 89807a014a51f03ba7255d4d66673efba41e72ac | [] | no_license | Maynot2/holbertonschool-higher_level_programming | b41c0454a1d27fe34596fe4aacadf6fc8612cd23 | 230c3df96413cd22771d1c1b4c344961b4886a61 | refs/heads/main | 2023-05-04T05:43:19.457819 | 2021-05-12T14:51:56 | 2021-05-12T14:51:56 | 319,291,958 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,042 | py | #!/usr/bin/python3
"""This module contains geometric shape classe(s)"""
class Rectangle:
"""Simulates a real world rectangle"""
def __init__(self, width=0, height=0):
"""Initialises a rectangle of a given width and height"""
self.width = width
self.height = height
@property
def width(self):
"""Retrieves the width"""
return self.__width
@width.setter
def width(self, size):
"""Sets the width"""
if not isinstance(size, int):
raise TypeError('width must be an integer')
if size < 0:
raise ValueError('width must be >= 0')
self.__width = size
@property
def height(self):
"""Retrieves the height"""
return self.__height
@height.setter
def height(self, size):
"""Sets the height"""
if not isinstance(size, int):
raise TypeError('height must be an integer')
if size < 0:
raise ValueError('height must be >= 0')
self.__height = size
| [
"[email protected]"
] | |
c692de65698326ab9e089d44a441e077f6fc5116 | 358d21d74a5a58c68e06b08226d293e155cc0d7b | /pyless/pyless/settings.py | 2e79eaa8bcebc44eea762656db93c6188e417b3c | [] | no_license | nymoral/pyless | bfd8a5b3c77e67759c13cb606d082d119bf26f1a | 8abbb0bb80ae59d66cadf5ac5f6b9e31ecb62856 | refs/heads/master | 2021-01-17T17:20:15.940478 | 2016-06-16T18:37:18 | 2016-06-16T18:37:18 | 60,605,406 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,357 | py | import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = 't5vd^#!f30ihkqd3a#uu^*a+9n^v+)ud67iebkg$#u*!kh8c4a'
DEBUG = True
ALLOWED_HOSTS = ['localhost']
INSTALLED_APPS = [
'football.apps.FootballConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.contrib.messages.middleware.MessageMiddleware'
]
ROOT_URLCONF = 'pyless.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': DEBUG,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
],
},
},
]
if not DEBUG:
temp = TEMPLATES[0]
temp['DIRS'] = [os.path.join(BASE_DIR, 'football/templates')]
temp['OPTIONS']['loaders'] = [
('django.template.loaders.cached.Loader', (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
))]
WSGI_APPLICATION = 'pyless.wsgi.application'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
}
}
AUTH_PASSWORD_VALIDATORS = []
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Europe/Vilnius'
USE_I18N = True
USE_L10N = True
USE_TZ = True
STATIC_URL = '/static/'
LOGIN_URL = '/auth/login/'
EMAIL_HOST = 'smtp-mail.outlook.com'
EMAIL_PORT = '587'
EMAIL_HOST_USER = ''
EMAIL_HOST_PASSWORD = ''
EMAIL_USE_TLS = True
# Non-django settings values:
REGISTER_ENABLED = True
| [
"[email protected]"
] | |
5023d19ebac999654492258beacf94e2dad6ccac | 53ae08f886b3b803471b8a8d27148b542844134a | /indicator_heuristic_strategy/indicator_analysis/indicatorManager.py | be9f155b72d3eb6f17b0a2bfd7c9db492c518aa2 | [] | no_license | suku-h/quant | ead6fc819baee69c6d6828332c4c0bf1df314742 | 8e478c34a22edaa60776cf7d35e5d38d7cfd1600 | refs/heads/master | 2021-01-20T09:49:23.442304 | 2017-11-17T01:23:26 | 2017-11-17T01:23:26 | 90,290,004 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,871 | py | import pickle
import talib
import pandas as pd
import numpy as np
import csv
# Check the answer https://stackoverflow.com/a/20627316/5512020
# to prevent the warning: A value is trying to be set on a copy of a slice from a DataFrame.
pd.options.mode.chained_assignment = None # default='warn'
total_res = np.zeros(13)
table_name = 'stock_data'
with open('../../sp500tickers.pickle', 'rb') as f:
tickers = pickle.load(f)
with open('../data.pickle', 'rb') as f:
df_total = pickle.load(f)
def analysePrevDays(prevCloses):
plusCount = 0
for i in range(1, len(prevCloses)):
if prevCloses[i] > prevCloses[i - 1]:
plusCount += 1
return plusCount
def analyze_indicator(indicator, max_buy_val, min_sell_val, period, needs_HL):
# without count the ticker name is incorrect
for count, ticker in enumerate(tickers):
# if ticker == 'AAP':
df = df_total[df_total['Ticker'] == ticker]
df.reset_index(inplace=True)
df.set_index(['Ticker', 'Date'], inplace=True)
df.drop(['index'], axis=1, inplace=True)
if not needs_HL:
df[indicator] = getattr(talib, indicator)(df['Close'].values, timeperiod=period)
else:
df[indicator] = getattr(talib, indicator)(df['High'].values, df['Low'].values, df['Close'].values,
timeperiod=period)
df['Res'] = np.where(df[indicator] < max_buy_val, 1, np.where(df[indicator] > min_sell_val, -1, 0))
df['maV'] = df['Volume'].rolling(100).mean()
df['anaV'] = np.where(df['Volume'] > 1.25 * df['maV'], 1, 0)
op = df['Res'].values
vals = df['Close'].values
amt = np.zeros(len(df['Res']))
gain = np.zeros(len(df['Res']))
day_diff = np.zeros(len(df['Res'])).astype(int)
sell_days = np.zeros(len(df['Res'])).astype(int)
anaV = df['anaV'].values
buy_signal = 0
sell_signal = 0
closePosition = -1
sl = open('sl.csv', 'a', newline='')
writer = csv.writer(sl)
for k in range(len(op)):
if op[k] > buy_signal and anaV[k] == 1 and k > closePosition:
for i in range(k + 3, k + 23):
if i - k == 3 or i - k == 4:
plusCount = analysePrevDays(vals[k:i])
else:
plusCount = analysePrevDays(vals[i-5:i])
if plusCount >= 3:
sl = False
priceDiffRow = []
for j in range(i + 1, len(op)):
change = (vals[j] - vals[i]) * 100 / vals[i]
priceDiffRow.append(change)
if change < - 10 and not sl:
sl = True
if op[j] < sell_signal:
amt[i] = vals[j] - vals[i]
gain[i] = (vals[j] - vals[i]) * 100 / vals[i]
day_diff[i] = j - i
sell_days[j] += 1
closePosition = j
if sl:
if gain[i] > 0:
total_res[11] += 1
else:
total_res[12] += 1
writer.writerow(priceDiffRow)
break
if closePosition > i:
break
df['Amt'] = amt[:]
df['Gain'] = gain[:]
df['Day_Diff'] = day_diff[:]
df['Sell_Days'] = sell_days[:]
total_res[0] += len(df['Gain'])
total_res[1] += len(df['Gain'][df['Gain'] > 0])
total_res[2] += len(df['Gain'][df['Gain'] < 0])
total_res[3] += df['Gain'][(df['Gain'] > 0) | (df['Gain'] < 0)].sum()
total_res[4] += df['Gain'][df['Gain'] > 0].sum()
total_res[5] += df['Gain'][df['Gain'] < 0].sum()
total_res[6] += df['Day_Diff'][(df['Gain'] > 0) | (df['Gain'] < 0)].sum()
total_res[7] += df['Day_Diff'][df['Gain'] > 0].sum()
total_res[8] += df['Day_Diff'][df['Gain'] < 0].sum()
total_res[9] += len(df['Gain'][(df['Gain'] > 0) | (df['Gain'] < 0)])
total_res[10] += len(df['Sell_Days'][df['Sell_Days'] > 0])
if ticker == 'AAP':
print(df.tail(80))
print('Total days', len(df['Gain']))
print('+', len(df['Gain'][df['Gain'] > 0]))
print('-', len(df['Gain'][df['Gain'] < 0]))
print('avg gain', df['Gain'][(df['Gain'] > 0) | (df['Gain'] < 0)].mean())
print('avg + gain', df['Gain'][df['Gain'] > 0].mean())
print('avg - gain', df['Gain'][df['Gain'] < 0].mean())
print('avg day diff', df['Day_Diff'][(df['Gain'] > 0) | (df['Gain'] < 0)].mean())
print('avg + day diff', df['Day_Diff'][df['Gain'] > 0].mean())
print('avg - day diff', df['Day_Diff'][df['Gain'] < 0].mean())
print('Total buys', len(df['Gain'][df['Gain'] != 0]))
print('Total sells', len(df['Sell_Days'][df['Sell_Days'] > 0]))
total_res[3] = total_res[3] / total_res[9]
total_res[4] = total_res[4] / total_res[1]
total_res[5] = total_res[5] / total_res[2]
total_res[6] = total_res[6] / total_res[9]
total_res[7] = total_res[7] / total_res[1]
total_res[8] = total_res[8] / total_res[2]
print('Total days', total_res[0])
print('Total buys', total_res[9])
print('Total sell days', total_res[10])
print('+', total_res[1])
print('-', total_res[2])
print('avg gain', total_res[3])
print('avg + gain', total_res[4])
print('avg - gain', total_res[5])
print('avg day diff', total_res[6])
print('avg + day diff', total_res[7])
print('avg - day diff', total_res[8])
print('stop loss positive count', total_res[11])
print('stop loss negative count', total_res[12])
row = [indicator, max_buy_val, min_sell_val, period]
# check for diff append and extend https://stackoverflow.com/a/252711/5512020
row.extend([element for element in total_res])
csvRow = ','.join(map(str, row))
with open('analysis.csv', 'a', newline='') as f:
writer = csv.writer(f)
writer.writerow(csvRow)
f.close()
analyze_indicator(
indicator='RSI',
max_buy_val=25,
min_sell_val=65,
period=14,
needs_HL=False
)
| [
"[email protected]"
] | |
5c6620edec199d9f5d5a8418a074133844d17c7c | e34d69f33d9bf3d9de99343ba24ad78bc5197a93 | /scripts/cmp_lj_sync | f25d123129b22ec54a527691be2a502d7f0f1e29 | [] | no_license | cms-ttH/ttH-TauRoast | 8e8728a49d02d9e8d7dc119376a4aefb6e8fd77d | 3fe6529d7270dc091db00f95997ca6add8b95ac9 | refs/heads/master | 2021-01-24T06:13:06.485445 | 2017-10-11T14:04:05 | 2017-10-11T14:04:05 | 10,819,593 | 2 | 5 | null | 2016-09-15T07:19:20 | 2013-06-20T12:46:59 | Python | UTF-8 | Python | false | false | 656 | #!/usr/bin/env python
import sys
def read(fn):
evts = {}
with open(fn) as f:
for line in f:
if not line.startswith('1'):
continue
run, lumi, event, stub = line.split(',', 3)
evts[(run, lumi, event)] = stub
return evts
me = read(sys.argv[1])
kit = read(sys.argv[2])
mkeys = set(me.keys())
kkeys = set(kit.keys())
for k in mkeys - kkeys:
print "me", ",".join(list(k) + [me[k]]).strip()
for k in kkeys - mkeys:
print "kit", ",".join(list(k) + [kit[k]]).strip()
print len(mkeys - kkeys), "events unique in first file"
print len(kkeys - mkeys), "events unique in second file"
| [
"[email protected]"
] | ||
3118897c4d67a9ea8aec95354c48a5a19f5ba665 | 08244f684b9be8c8d0f05f5100f273c78467b748 | /currency.py | b487ed3d525b986b1057c55a5fdd73c3d1df9c4b | [] | no_license | Adroso/Python-Utility-App | 54b3d04dda09f3d5f1a0e038f91a0fb2ed5a619b | 0e8ded5d87c10c8a16a52d9411ba8988469ab958 | refs/heads/master | 2021-03-30T17:03:27.018427 | 2015-11-06T01:18:42 | 2015-11-06T01:18:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,791 | py | __author__ = 'Adroso'
import web_utility
def convert(amount, home_currency_code, location_currency_code):
""" This function takes in an amount, generates a url to use Goggles currency conversion service and extracts
the converted value """
amount_as_string = str(amount) # Convert to string here because you can not join str and int implicitly
try:
input_url = "https://www.google.com/finance/converter?a=" + amount_as_string + "&from=" + home_currency_code + \
"&to=" + location_currency_code # building the search URL
output_code = web_utility.load_page(input_url)
span_start = output_code.find('class=bld>') # The value is in the only span class
span_end = output_code.find('</span>')
span_container = output_code[span_start:span_end] # gives the span container as a string
currency_with_code = span_container.split('class=bld>') # Takes the leading span tag out
separated_code_currency = currency_with_code[1] # separates value and country code
converted_currency = separated_code_currency.split(' ') # isolates value
return float(converted_currency[0])
except IndexError: # Raised when a sequence subscript is out of range.
return -1
def get_details(country_name):
"""This function interprets data from a file"""
currency_details = open("currency_details.txt", 'r', encoding='utf-8')
for line in currency_details:
current_line = line.split(",") # Splits data by commas to be exact matched in the following lines
if country_name in line:
if country_name == current_line[0]: # Checks if the country name exactly matches the one in line.
return current_line
else:
return ()
currency_details.close()
return ()
def get_all_details():
"""This function returns a dictionary of currency details"""
all_country_details = {}
details_file = open("currency_details.txt", 'r', encoding='utf-8')
for line in details_file:
split_details = line.split(",")
all_country_details[split_details[0]] = (split_details[0], split_details[1], split_details[2])
return all_country_details
""" Module Testing"""
# if __name__ == "__main__":
# print('TESTING')
# print('')
# print('Test of convert()')
# print("invalid conversion expect: -1", "1", "AUD", "->", "AUD", "=", convert(1, "AUD", "AUD"))
# print("invalid conversion expect: -1", "1", "JPY", "->", "ABC", "=", convert(1, "JPY", "ABC"))
# print("invalid conversion expect: -1", "1", "ABC", "->", "USD", "=", convert(1, "ABC", "USD"))
# print("valid conversion", "10.95", "AUD", "->", "JPY", "=", convert(10.95, "AUD", "JPY"))
# print("valid conversion reverse", "965.71", "JPY", "->", "AUD", "=", convert(965.71, "JPY", "AUD"))
# print("valid conversion", "10.95", "AUD", "->", "BGN", "=", convert(10.95, "AUD", "BGN"))
# print("valid conversion reverse", "13.82", "BGN", "->", "AUD", "=", convert(13.82, "BGN", "AUD"))
# print("valid conversion", "200.15", "BGN", "->", "JPY", "=", convert(200.15, "BGN", "JPY"))
# print("valid conversion reverse", "13390.51", "JPY", "->", "BGN", "=", convert(13390.51, "JPY", "BGN"))
# print("valid conversion", "100", "JPY", "->", "USD", "=", convert(100, "JPY", "USD"))
# print("valid conversion reverse", "0.83", "USD", "->", "JPY", "=", convert(0.83, "USD", "JPY"))
# print("valid conversion", "19.99", "USD", "->", "BGN", "=", convert(19.99, "USD", "BGN"))
# print("valid conversion reverse", "34.39", "BGN", "->", "USD", "=", convert(34.39, "BGN", "USD"))
# print("valid conversion", "19.99", "USD", "->", "AUD", "=", convert(19.99, "USD", "AUD"))
# print("valid conversion reverse", "27.26", "AUD", "->", "USD", "=", convert(27.26, "AUD", "USD"))
# print('')
# print('Testing get_details()')
#
# print("invalid details expect ():", get_details("Unknown"))
# print("invalid details expect ():", get_details("Japanese"))
# print("invalid details expect ():", get_details(""))
# print("valid details expect details of AUS:", get_details("Australia"))
# print("valid details expect details of JPN:", get_details("Japan"))
# print("valid details expect details of HK:", get_details("Hong Kong"))
#
# # New get_all_details testing:
# print(get_all_details())
# testing = get_all_details()
# print("keys", testing.keys())
# print(testing["Australia"])
# Manual Testing
# converted_save = 0
#
# loop_check = True
# while loop_check:
#
# amount = input('Amount to change:') # Assume value will be a number
# home = str(input('Home Currency Code:'))
# away = str(input('Away Currency Code:'))
#
# converted = convert(amount, home, away)
#
# if converted == -1:
# check = 'invalid conversion'
# elif converted_save == amount:
# check = 'valid conversion reverse'
# else:
# check = 'valid conversion'
#
# converted_save = converted
#
# print(check, ' ', amount, ' ', home, '->', away, ' ', converted)
#
# loop_check = input('check again or move on to next check? Y or N').upper()
# if loop_check == 'Y':
# loop_check = True
# else:
# loop_check = False
#
# loop_check = True
# while loop_check:
# country_name = input("Input Test Country:").title()
# details = get_details(country_name)
#
# if not details:
# check = 'invalid details'
# else:
# check = 'valid details'
#
# print(check, ' ', country_name, ' ', details)
#
# loop_check = input('check again or finnish? Y or N').upper()
# if loop_check == 'Y':
# loop_check = True
# else:
# loop_check = False
# print('Thank You :)')
| [
"[email protected]"
] | |
59b265a6b37a1d3ca27525a1826488ed52ce1794 | 2915e5a34da17dab218ad615dc30cebaf80ccaf5 | /model/CNN_BN.py | d31d8d1e660e059525d4f75800b6de3525133aee | [] | no_license | NanoporeAna/EEGBaseDL | cef074a410bf679c681547141a012b578daa2602 | c663e70cbefe1f82695d3978cb5213ba95b6b07f | refs/heads/master | 2023-02-09T01:46:56.665085 | 2019-07-10T02:33:43 | 2019-07-10T02:33:43 | 195,966,600 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,020 | py | import os
import tensorflow as tf
import numpy as np
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.training import moving_averages
from data.Tailor_Test_Data import tailor_test_batch
from data.Tailor_Train_Data import tailor_train_batch
from data.test_data import test_batch
from data.train_data import train_batch
from data.load_single_data import load_object_batch
from datasets.DataSet import DataSet
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
def one_hot(labels, n_class=7):
""" One-hot encoding """
expansion = np.eye(n_class)
y = expansion[:, labels - 1].T
assert y.shape[1] == n_class, "Wrong number of labels!"
return y
def standardize(train):
""" Standardize data """
# Standardize data
X_train = (train - np.mean(train, axis=0)[None, :, :]) / np.std(train, axis=0)[None, :, :]
return X_train
def batch_norm(x, train, scope='bn'):
with tf.variable_scope(scope):
n_out = x.get_shape()[-1].value
beta = tf.Variable(tf.constant(0.0, shape=[n_out]), name='beta', trainable=True)
gamma = tf.Variable(tf.constant(1.0, shape=[n_out]), name='gamma', trainable=True)
tf.add_to_collection('biases', beta)
tf.add_to_collection('weights', gamma)
batch_mean, batch_var = tf.nn.moments(x, [0, 1, 2], name='moments')
ema = tf.train.ExponentialMovingAverage(decay=0.99)
def mean_var_with_update():
ema_apply_op = ema.apply([batch_mean, batch_var])
with tf.control_dependencies([ema_apply_op]):
return tf.identity(batch_mean), tf.identity(batch_var)
# mean, var = control_flow_ops.cond(phase_train,
# mean, var = control_flow_ops.cond(phase_train,
# mean_var_with_update,
# lambda: (ema.average(batch_mean), ema.average(batch_var)))
mean, var = mean_var_with_update()
normed = tf.nn.batch_normalization(x, mean, var, beta, gamma, 1e-3)
return normed
def batch_norm_template(inputs, is_training, scope, moments_dims, bn_decay):
""" Batch normalization on convolutional maps and beyond...
Ref.: http://stackoverflow.com/questions/33949786/how-could-i-use-batch-normalization-in-tensorflow
Args:
inputs: Tensor, k-D input ... x C could be BC or BHWC or BDHWC
is_training: boolean tf.Varialbe, true indicates training phase
scope: string, variable scope
moments_dims: a list of ints, indicating dimensions for moments calculation
bn_decay: float or float tensor variable, controling moving average weight
Return:
normed: batch-normalized maps
"""
with tf.variable_scope(scope) as sc:
num_channels = inputs.get_shape()[-1].value
beta = tf.Variable(tf.constant(0.0, shape=[num_channels]),
name='beta', trainable=True)
gamma = tf.Variable(tf.constant(1.0, shape=[num_channels]),
name='gamma', trainable=True)
batch_mean, batch_var = tf.nn.moments(inputs, moments_dims, name='moments')
decay = bn_decay if bn_decay is not None else 0.9
ema = tf.train.ExponentialMovingAverage(decay=decay)
# Operator that maintains moving averages of variables.
# Update moving average and return current batch's avg and var.
def mean_var_with_update():
ema_apply_op = tf.cond(is_training,
lambda: ema.apply([batch_mean, batch_var]),
lambda: tf.no_op())
with tf.control_dependencies([ema_apply_op]):
return tf.identity(batch_mean), tf.identity(batch_var)
# ema.average returns the Variable holding the average of var.
mean, var = tf.cond(is_training,
mean_var_with_update,
lambda: (ema.average(batch_mean), ema.average(batch_var)))
normed = tf.nn.batch_normalization(inputs, mean, var, beta, gamma, 1e-3)
return normed
def batch_norm_for_fc(inputs, is_training, bn_decay, scope):
""" Batch normalization on FC data.
Args:
inputs: Tensor, 2D BxC input
is_training: boolean tf.Varialbe, true indicates training phase
bn_decay: float or float tensor variable, controling moving average weight
scope: string, variable scope
Return:
normed: batch-normalized maps
"""
return batch_norm_template(inputs, is_training, scope, [0, ], bn_decay)
def batch_norm_for_conv2d(inputs, is_training, bn_decay, scope):
""" Batch normalization on 2D convolutional maps.
Args:
inputs: Tensor, 4D BHWC input maps
is_training: boolean tf.Varialbe, true indicates training phase
bn_decay: float or float tensor variable, controling moving average weight
scope: string, variable scope
Return:
normed: batch-normalized maps
"""
return batch_norm_template(inputs, is_training, scope, [0, 1, 2], bn_decay)
def BaseCNN(input_tensor, train, regularizer):
# 卷积网第一层架构 输入为500*9*16 的矩阵
with tf.variable_scope('layer1-conv'):
conv1_weights = tf.get_variable("weight", [3, 3, 1, 16],
initializer=tf.truncated_normal_initializer(stddev=0.1))
conv1_biases = tf.get_variable('biases', [16], initializer=tf.constant_initializer(0.0))
conv1 = tf.nn.conv2d(input_tensor, conv1_weights, strides=[1, 1, 1, 1], padding='SAME', name='conv1')
res1 = tf.nn.bias_add(conv1, conv1_biases)
bn1 = batch_norm_for_conv2d(res1, train, bn_decay, scope='BN')
elu1 = tf.nn.elu(bn1)
with tf.variable_scope('layer1-pool'):
pool1 = tf.nn.max_pool(elu1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool1')
# 卷积网第二层架构输入为250*5*32
with tf.variable_scope('layer2-conv'):
conv2_weights = tf.get_variable("weight", [3, 3, 16, 32],
initializer=tf.truncated_normal_initializer(stddev=0.1))
conv2_biases = tf.get_variable('biases', [32], initializer=tf.constant_initializer(0.0))
conv2 = tf.nn.conv2d(pool1, conv2_weights, strides=[1, 1, 1, 1], padding='SAME', name='conv2')
res2 = tf.nn.bias_add(conv2, conv2_biases)
bn2 = batch_norm_for_conv2d(res2, train, bn_decay, scope='BN')
elu2 = tf.nn.elu(bn2)
with tf.variable_scope('layer2-pool'):
pool2 = tf.nn.max_pool(elu2, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool2')
# 卷积网第二层架构输入为125*3*64
with tf.variable_scope('layer3-conv'):
conv3_weights = tf.get_variable("weight", [3, 3, 32, 64],
initializer=tf.truncated_normal_initializer(stddev=0.1))
conv3_biases = tf.get_variable('biases', [64], initializer=tf.constant_initializer(0.0))
conv3 = tf.nn.conv2d(pool2, conv3_weights, strides=[1, 1, 1, 1], padding='SAME', name='conv3')
res3 = tf.nn.bias_add(conv3, conv3_biases)
bn3 = batch_norm_for_conv2d(res3, train, bn_decay, scope='BN')
elu3 = tf.nn.elu(bn3)
with tf.variable_scope('layer3-pool'):
pool3 = tf.nn.max_pool(elu3, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool3')
# 卷积网第二层架构输入为63*2*128
with tf.variable_scope('layer4-conv'):
conv4_weights = tf.get_variable("weight", [3, 3, 64, 128],
initializer=tf.truncated_normal_initializer(stddev=0.1))
conv4_biases = tf.get_variable('biases', [128], initializer=tf.constant_initializer(0.0))
conv4 = tf.nn.conv2d(pool3, conv4_weights, strides=[1, 1, 1, 1], padding='SAME', name='conv4')
res4 = tf.nn.bias_add(conv4, conv4_biases)
bn4 = batch_norm_for_conv2d(res4, train, bn_decay, scope='BN')
elu4 = tf.nn.elu(bn4)
with tf.variable_scope('layer4-pool'):
pool4 = tf.nn.max_pool(elu4, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool4')
# 将第四层池化层的输出转化为全连接层的输入格式,注意,因为每一层神经网络的输入输出都为一个batch的矩阵,
# 所以这里得到的维度也包含一个batch的数据的个数
pool_shape = pool4.get_shape().as_list()
# 计算将矩阵拉直成向量之后的长度,这个长度就是矩阵长宽及深度的乘积,注意这里pool_shape[0]为一个batch中数据的个数
nodes = pool_shape[1] * pool_shape[2] * pool_shape[3]
# 将第四层的输出转变为一个batch的向量
reshaped = tf.reshape(pool4, [pool_shape[0], nodes])
with tf.variable_scope('layer5-fc1'):
fc1_weights = tf.get_variable('weight', [nodes, 512], initializer=tf.truncated_normal_initializer(stddev=0.1))
# 只有全连接层的权重需要加入正则化
if regularizer != None:
tf.add_to_collection('losses', regularizer(fc1_weights))
fc1_biases = tf.get_variable("bias", [512], initializer=tf.constant_initializer(0.1))
fbn1 = tf.matmul(reshaped, fc1_weights) + fc1_biases
fc_BN = batch_norm_for_fc(fbn1, train, bn_decay, scope='fc_BN')
fc1 = tf.nn.elu(fc_BN, name='fc1')
if train is not None: fc1 = tf.nn.dropout(fc1, 0.5)
with tf.variable_scope('layer6-fc2'):
fc2_weights = tf.get_variable('weight', [512, 7], initializer=tf.truncated_normal_initializer(0.1))
if regularizer != None:
tf.add_to_collection('losses', regularizer(fc2_weights))
fc2_biases = tf.get_variable('biase', [7], initializer=tf.constant_initializer(0.1))
logit = tf.matmul(fc1, fc2_weights) + fc2_biases
out = {
'conv1': conv1,
'pool1': pool1,
'conv2': conv2,
'pool2': pool2,
'conv3': conv3,
'pool3': pool3,
'conv4': conv4,
'pool4': pool4,
'fc1': fc1,
'logit': logit
}
return out
bat = [5424, 10376, 7310, 5228, 4284, 2870, 6015, 4192, 8457, 8026, 7781, 10608, 916, 4338, 2089]
# 配置神经网络参数
Batch_Size = 128
Learning_Rate_Base = 0.0005
Learning_Rate_Decay = 0.99
Regularazition_Rate = 0.0005
Training_Steps = 967 # 裁剪后的数据量为123807/128 = 967
bn_decay = 0.9
Moving_Average_Decay = 0.99
Model_Save_Path = "CNN_BN_v"
Model_Name = "model.ckpt"
def train(train, label, num):
with tf.name_scope("input"):
input_x = tf.placeholder(tf.float32, [128, 500, 9, 1], name='EEG-input') # 数据的输入,第一维表示一个batch中样例的个数,100要加上,
# failed to convert object of type <class 'list'> to Tensor. Contents: [None, 4096]. Consider casting elements to a supported type.
input_y = tf.placeholder(tf.float32, [None, 7], name='EEG-lable') # 一个batch里的lable
# reshaped_x = np.reshape(input_x,(100,500,9,1))#类似于将输入的训练数据格式调整为一个四维矩阵,并将这个调整的数据传入sess.run过程
regularlizer = tf.contrib.layers.l2_regularizer(Regularazition_Rate)
is_training = tf.cast(True, tf.bool)
out = BaseCNN(input_x, is_training, regularlizer) # 将数据放进去训练,得到最后全连接层输出结果
# pre_y = BaseCNN(input_x,False,None)
y = out['logit']
with tf.name_scope("loss_function"):
# 损失代价
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y, labels=tf.argmax(input_y, 1))
cross_mean = tf.reduce_mean(cross_entropy)
# 加上L2正则化来计算损失函数
loss = cross_mean + tf.add_n(tf.get_collection('losses'))
tf.summary.scalar('loss', loss)
# loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=y, labels=tf.argmax(input_y, 1)), name='cost')
# 给定滑动平均衰减率和训练轮数的变量
with tf.name_scope("moving_average"):
global_step = tf.Variable(0, trainable=False)
variable_averages = tf.train.ExponentialMovingAverage(Moving_Average_Decay, global_step)
variable_averages_op = variable_averages.apply(tf.trainable_variables())
with tf.name_scope("train_step"):
# 学习率的更新:滑动平均模型
learning_rate = tf.train.exponential_decay(
Learning_Rate_Base,
global_step,
Training_Steps,
Learning_Rate_Decay
)
# 优化损失函数
train_step = tf.train.AdamOptimizer(learning_rate).minimize(loss, global_step=global_step)
# 在训练神经网络模型是,每过一遍数据既要反向传播更新参数,又要更新每一个参数滑动平均值。
with tf.control_dependencies([train_step, variable_averages_op]):
train_op = tf.no_op(name='train')
# optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cost)
with tf.name_scope("train_acc"):
# 得到的准确度
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(input_y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32), name='accuracy')
tf.summary.scalar('train_acc', accuracy)
# 开始训练网络
merged = tf.summary.merge_all()
saver = tf.train.Saver()
with tf.Session() as sess:
writer = tf.summary.FileWriter("CNN_BN_v" + str(num) + "_logs/", sess.graph)
tf.global_variables_initializer().run()
ds = DataSet(train, label)
epochs = 100
for e in range(epochs):
for i in range(Training_Steps):
x, y = ds.next_batch(128)
# x = train[i*100:(i+1)*100]
# y = label[i*100:(i+1)*100]
# xs = standardize(xs)
xs = np.reshape(x, (128, 500, 9, 1))
ys = one_hot(y)
# Feed dictionary
feed = {input_x: xs, input_y: ys}
# train_s,lo,acc,step = sess.run([train_op,loss,accuracy,global_step],feed_dict=
summary, trainloss, trainacc, _ = sess.run([merged, loss, accuracy, train_op], feed_dict=feed)
writer.add_summary(summary, i)
# writer.add_summary(lo, i)
# writer.add_summary(acc, i)
if i % 80 == 0:
print("after %g epoch: train loss: %g ,Train acc: %g" % (e,trainloss, trainacc))
saver.save(sess, os.path.join(Model_Save_Path + str(num), Model_Name), global_step=global_step)
# test_acc = sess.run(accuracy,feed_dict={input_x:test_x,input_y:test_y})
# print("测试精度为:%g" % test_acc)
writer.close()
def evaluate(train, label, batnum, num):
with tf.name_scope("input"):
input_x = tf.placeholder(tf.float32, [batnum, 500, 9, 1], name='EEG-input') # 数据的输入,第一维表示一个batch中样例的个数
input_y = tf.placeholder(tf.float32, [None, 7], name='EEG-lable') # 一个batch里的lable
# regularlizer = tf.contrib.layers.l2_regularizer(Regularazition_Rate) # 本来测试的时候不用加这个
no_training = tf.cast(False, tf.bool)
out = BaseCNN(input_x, no_training, None)
y = out['logit']
with tf.name_scope("test_acc"):
correct_predection = tf.equal(tf.argmax(y, 1), tf.argmax(input_y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_predection, tf.float32))
tf.summary.scalar('test_acc', accuracy)
variable_averages = tf.train.ExponentialMovingAverage(Moving_Average_Decay)
variables_to_restore = variable_averages.variables_to_restore()
saver = tf.train.Saver(variables_to_restore)
acc = []
with tf.Session() as sess:
ckpt = tf.train.get_checkpoint_state(Model_Save_Path + str(num))
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
# x, y = test_batch()
# x = x[100: 200]
# y = y[100: 200]
# xs = standardize(x)
for i in range(4):
x_test,y_label = train[i*batnum:(i+1)*batnum],label[i*batnum:(i+1)*batnum]
reshape_xs = np.reshape(x_test, (-1, 500, 9, 1))
ys = one_hot(y_label)
acc_score = sess.run(accuracy, feed_dict={input_x: reshape_xs, input_y: ys})
print("Afer %s training step, test accuracy = %g" % (global_step, acc_score))
acc.append(acc_score)
else:
print("No checkpoint file found")
return acc
def main(argv=None):
"""`
x,y = test_batch()
# 十倍交叉验证法,数组无法劈分开too many indices for array
for i in range(10):
x_test,y_test = x[i*13400:(i+1)*13400],y[i*13400:(i+1)*13400]
x_train1,y_train1 =x[0:i*13400],y[0:i*13400]
x_train = np.concatenate((x_train1, x[(i+1)*13400:134000]), axis=0)
y_train = np.concatenate((y_train1, y[(i + 1) * 13400:134000]), axis=0)
train(x_train,y_train,i)
mean = evaluate(x_test,y_test,13400,i)
"""
# x_train, y_train = train_batch()
x_train, y_train = tailor_train_batch()
train(x_train, y_train, 21)
# x_test, y_test = test_batch() #7500
# x_test, y_test = tailor_test_batch() # 18524
# mean = evaluate(x_test, y_test, 1875, 1) #batnum = len(test)/4 4631 1875
# print(np.mean(mean))
if __name__ == '__main__':
tf.app.run()
# direct to the local dir and run this in terminal:
# $ tensorboard --logdir logs
| [
"[email protected]"
] | |
9f9b0420444650d8f9ce67002d4ec0cb16000ab6 | c1ccf743b07e21ef86a1d7d2f68e63cdd2af0dce | /uboone/UBFlashFinder/mac/unit/check_ophits_to_flashes.py | 9102576cb84a11e31bcb5f29e1ed74b0cd65fb8f | [] | no_license | MicroBooNE/uboonecode | 383425806d3ae0a89f3930e20b87016deda1d478 | b575e518da1098a274cb1f4cef57e9f76c11c279 | refs/heads/master | 2021-01-12T03:19:44.325785 | 2017-01-30T19:25:50 | 2017-01-30T19:25:50 | 78,196,104 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,946 | py | import numpy as np
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import matplotlib.colors as mpc
import matplotlib.dates as dts
import matplotlib.patches as patches
import numpy as np
import ROOT as rt
from larlite import larlite as fmwk
event_ophit = rt.larlite.event_ophit()
channel = int(np.floor(32*np.random.random()))
readout_window = 23.4
t_starts = []
ophits = []
#add some random flashes...
for i in xrange(0,10): #10 flashes
t_peak = 23.4*np.random.random_sample()
width = 10.0
area = 100*np.random.random_sample()
peakheight = np.random.uniform(1,15)
pe = peakheight/2.0
fastototal = 0.0;
hit = rt.larlite.ophit(channel,
t_peak,
0,
0,
width,
area,
peakheight,
pe,
fastototal)
ophits.append(hit)
#add a train of pulses
for i in xrange(0,20):
t_peak = 10 + i*np.random.uniform(0,0.1)
width = 3
area = 100;
peakheight = 30 - float(i)/2.0
pe = 30 - float(i)/2.0
fastototal = 0.0;
hit = rt.larlite.ophit(channel,
t_peak,
0,
0,
width,
area,
peakheight,
pe,
fastototal)
ophits.append(hit)
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[52]:
mgr = fmwk.storage_manager()
mgr.set_io_mode(fmwk.storage_manager.kWRITE)
mgr.set_out_filename("temp.root")
mgr.open()
mgr.set_id(1,0,1)
mgr.event_id()
event_ophit = mgr.get_data(fmwk.data.kOpHit,'OpHitFinder')
for h in ophits:
event_ophit.push_back(h)
mgr.next_event()
mgr.close()
# In[53]:
my_proc = fmwk.ana_processor()
my_proc.add_input_file ("temp.root")
my_proc.set_output_file("temp2.root")
my_proc.set_io_mode(fmwk.storage_manager.kBOTH)
my_module = fmwk.FlashFinder()
my_module.Configure("../flashfindermodule.fcl")
my_proc.add_process(my_module)
my_proc.run()
mgr = fmwk.storage_manager()
mgr.set_io_mode(mgr.kREAD)
mgr.add_in_filename("temp2.root")
mgr.open()
mgr.next_event()
event_opflash = mgr.get_data(fmwk.data.kOpFlash,'FlashFinder')
plt.rcParams.update({'font.size': 16})
fig,ax = plt.subplots(figsize=(15,6))
for i in np.linspace(0.1,23.4,234):
ax.vlines(i,0,100,lw=1,color='red',linestyles='dashed',alpha=0.7)
for h in ophits:
ax.vlines(h.PeakTime(),0,h.PE(),lw=4,color='black',alpha=1)
for i in xrange(event_opflash.size()):
flash = event_opflash[i]
ax.vlines(flash.Time(),0,flash.TotalPE(),lw=2,color='orange',alpha=0.8)
plt.xlim(0,24)
plt.xlabel('Time [us]')
plt.ylabel('PE')
plt.show()
import os
os.system('rm -r temp.root')
os.system('rm -r temp2.root')
| [
"[email protected]"
] | |
d2c9feb3215b5259ef876c741186d832dd828a33 | b4da11e5acb90a3d698f05870643c37674a32eaf | /Alibaba/Alibaba/urls.py | 1bd21809679a1792707591d7f0a15e546c66b471 | [] | no_license | joae/fiuba-robots | d4446e157f61621565d7c3c2227c3e7e0b830378 | 6b4bf93dc505bf5d3bd54cdd3e6ef093a3d5ea94 | refs/heads/master | 2020-05-18T11:23:24.453136 | 2015-07-05T13:35:47 | 2015-07-05T13:35:47 | 35,906,107 | 0 | 0 | null | 2015-07-05T13:35:48 | 2015-05-19T20:15:36 | Python | UTF-8 | Python | false | false | 757 | py | """Alibaba URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, url
from django.contrib import admin
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
]
| [
"[email protected]"
] | |
0ee4d64d9b1e4ffc0fcbe7cb06b71dad025f7e6b | 95f0d18cfabeb3806d6a54f80a47aa3b7f996dfb | /forum/migrations/0075_comraderybetainvitation.py | f60750797ee261e012e6d049a12d98c66f488daf | [
"MIT"
] | permissive | thedeadwoods/Comradery-API | 14eade52bc41b4b5e2fdbf5746bff468c62c75d0 | 0d30df376f5542d01a04b96c5fdf9363d3103679 | refs/heads/master | 2023-03-23T07:52:27.476182 | 2021-03-06T20:01:36 | 2021-03-06T20:01:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 541 | py | # Generated by Django 2.2.7 on 2020-03-29 07:25
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('forum', '0074_auto_20200329_0435'),
]
operations = [
migrations.CreateModel(
name='ComraderyBetaInvitation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('registration_code', models.CharField(max_length=40)),
],
),
]
| [
"[email protected]"
] | |
14ad83e0917beb59f8fe567d25182b21bb54ff30 | f4adff7fb331154be8b897c6f011008f7ac1de99 | /submit/urls.py | 58384fcaf687a90a4ad46602c35f95c788d511c3 | [] | no_license | Benjji/liahen | 39f218627269d94670c43593ad8d3f8610faf0c6 | 4e93f35c8acad027ae7ceb736c11233776083da4 | refs/heads/master | 2020-05-29T11:52:49.129440 | 2015-09-26T14:57:43 | 2015-09-26T14:57:43 | 43,208,761 | 0 | 0 | null | 2015-09-26T14:39:31 | 2015-09-26T14:39:29 | null | UTF-8 | Python | false | false | 1,247 | py | from django.conf.urls import patterns, url
from submit import views
urlpatterns = patterns ('',
#submits/protocol/2/ (detaily submitu)
url(r'^protocol/(?P<pk>\d+)/$', views.protocol_view, name = 'protocol'),
#submits/update
url(r'^update/$', views.update_submit),
#submits/ (vsetky moje)
url(r'^$', views.judge_view, {'type':'me'}, name = 'index'),
#submits/now (teraz na testovaci)
url(r'^now/$', views.judge_view, {'type':'now'}, name = 'now'),
#submits/task/popolvar (vsetky uspesne v ulohe)
url(r'^task/(?P<task>\w+)/$', views.judge_view, {'type':'task'}, name='task'),
#admin vidi aj:
#submits/user/fero (vsetky user-ove)
url(r'^user/(?P<user>\w+)/$', views.judge_view, {'type':'user'}, name='user'),
#submits/user/fero/task/popolvar (vsetky user-ove v ulohe)
url(r'^user/(?P<user>\w+)/task/(?P<task>\w+)/$', views.judge_view, {'type':'user_task'}, name='user_task'),
)
'''
#V buducnosti: statistiky
#submits/users
url(r'^users/$', views.users_view, name = 'users'),
#submits/tasks
url(r'^tasks/$', views.index_view, name = 'tasks'),
#submits/schools
url(r'^schools/$', views.index_view, name = 'schools'),
)
'''
| [
"[email protected]"
] | |
6d4fde5671e5072285e7d7257570788ad7fdcaa1 | 724e1cb403ff723130ba8746df657e21da05349c | /scripts/f_logger.py | 751d79cf22337fe121b7c529e62f904e1817e82d | [
"MIT"
] | permissive | bereketkibru/Pharmaceutical-Sales-Prediction | 3f670634647bedcb89beac3224a762bd7ccdf8e1 | 056a832a46cc6a74bd9314d69f25f071a54e9cd5 | refs/heads/main | 2023-07-13T13:28:28.334620 | 2021-08-25T17:48:44 | 2021-08-25T17:48:44 | 389,676,518 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 592 | py | import logging
class F_Logger:
def __init__(self, file_name: str, basic_level=logging.INFO):
# Gets or creates a logger
logger = logging.getLogger(__name__)
# set log level
logger.setLevel(basic_level)
# define file handler and set formatter
file_handler = logging.FileHandler(file_name)
formatter = logging.Formatter('%(asctime)s : %(levelname)s : %(name)s : %(message)s')
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
self.logger = logger
def get_f_logger(self) -> logging.Logger:
return self.logger | [
"[email protected]"
] | |
7b09b1728c37bc3fb8d427b17b52b9f0d610bfaa | cff5b894daf7595a23f05df2984ce01e8b0fbfbc | /challenge2and3/get_instance_metadata.py | 2b3ac642618ac03662a026453ba51993a1ee3abd | [] | no_license | basant1988/tech-challenge | 0da063cdb3434656bb2534ebd341643a38636cfc | 6589cf1c8b926254a1a9b44b05103c5a6abbab14 | refs/heads/master | 2023-06-10T16:38:28.681754 | 2021-07-01T08:20:01 | 2021-07-01T08:20:01 | 381,766,439 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,480 | py | #!/usr/bin/env python
import requests
import json
import sys
def get_instance_metadata(key):
baseurl = 'http://169.254.169.254/latest/meta-data/'
metadata = {}
traverse_data(baseurl, metadata, key)
return metadata
def traverse_data(baseurl, metadata, key):
req = requests.get(baseurl)
if req.status_code == 404:
return
for line in req.text.split('\n'):
if not line:
continue
updated_url = '{0}{1}'.format(baseurl, line)
if line.endswith('/'):
newsection = line.split('/')[-2]
if key is None:
metadata[newsection] = {}
traverse_data(updated_url, metadata[newsection], key)
else:
traverse_data(updated_url, metadata, key)
else:
req = requests.get(updated_url)
if req.status_code != 404:
if key is not None:
if key==line:
metadata[line] = req.text
break
else:
try:
metadata[line] = json.loads(req.text)
except ValueError:
metadata[line] = req.text
else:
metadata[line] = None
if __name__ == "__main__":
arguments = sys.argv
arglength = len(arguments)
key = None
if arglength > 1:
key = arguments[1]
print(json.dumps(get_instance_metadata(key))) | [
"[email protected]"
] | |
02b6251c2bd8c8129cb29b2bdc91c06ec9f699d9 | 7d586f404967e98a498f6cb8654fb00abcab12a0 | /blog/views.py | f046b44b45c8ee4f0137d46ae3f27ca4b76c5e49 | [] | no_license | shoaibshrafi/Python-Django-Blog-App | ba6ef57f473b0b0cb28b7cce0f2d11bbee7a3f6a | 54fb2b10abd952b581bb52f12f240bbe13cb1352 | refs/heads/master | 2020-03-28T17:32:16.397668 | 2018-09-14T14:28:12 | 2018-09-14T14:28:12 | 148,799,110 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 806 | py | from django.shortcuts import render
def home(request):
posts = [
{
'author' : 'Shoaib',
'date_posted' : 'September 14, 2018',
'title' : 'Blog Title 1',
'content' : 'First blog post'
},
{
'author' : 'Najib',
'date_posted' : 'September 13, 2018',
'title' : 'Blog Title 2',
'content' : 'Second blog post'
},
{
'author' : 'Raheema',
'date_posted' : 'September 13, 2018',
'title' : 'Blog Title 3',
'content' : 'Third blog post'
}
]
return render(request, 'blog/home.html', {'posts' : posts})
def about(request):
return render(request, 'blog/about.html')
| [
"[email protected]"
] | |
60cb82c1e51bb3a3fdb94a776df5958da5755132 | fe7153e1738c2c09f76aa270ae6417f432ab70c5 | /taxes/models/tax.py | 4dcc3e7527b7164c4ead46e90cf287e8bd3ee794 | [] | no_license | mdzakir/mysuru | 9e83e12d02e13bb0e9abaf859c9b25bf420eb795 | d6121a79f42d5fd57172f4b39f482927f5fedd62 | refs/heads/master | 2021-01-13T16:37:41.396037 | 2017-07-18T15:59:58 | 2017-07-18T15:59:58 | 79,259,805 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,132 | py | import datetime
from collections import defaultdict
from collections import namedtuple
import json
from django.conf import settings
from mongoengine import *
class RatePlanTaxDetails(EmbeddedDocument):
tax_type = FloatField()
tax_value = FloatField()
rate_id = StringField()
def get_tax_type(self):
return self.tax_type
def get_tax_value(self):
return self.tax_value
def get_rate_id(self):
return self.rate_id
class DateRangeTaxDetails(EmbeddedDocument):
tax_type = FloatField()
tax_value = FloatField()
rate_id = StringField()
start = DateTimeField()
end = DateTimeField()
def get_tax_type(self):
return self.tax_type
def get_tax_value(self):
return self.tax_value
def get_rate_id(self):
return self.rate_id
def get_start(self):
return self.start
def get_end(self):
return self.end
class TaxEntity(Document):
product_id = StringField()
rate_tax = ListField(EmbeddedDocumentField(RatePlanTaxDetails))
date_tax = ListField(EmbeddedDocumentField(DateRangeTaxDetails))
| [
"[email protected]"
] | |
6ff2b89d3fcdd2f0c997fad4706a19b7a18525b0 | 3068d24ee80e0f5719306476f7495c8fede02592 | /google_test/bunny_escape/bunnyEscape.py | 2fb40cfe7a818ebb945810ad3de9c1b68da2d07c | [
"Artistic-2.0"
] | permissive | perlygatekeeper/glowing-robot | 1926a8143168fee58e83ae78c72674a7bd1c10ed | 1c15dbc4f4f7a160c226734f5b258b8e17acfc7f | refs/heads/master | 2023-08-21T19:57:41.861644 | 2023-08-12T01:21:13 | 2023-08-12T01:21:13 | 27,408,610 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,686 | py | def pathFinder(x, y, map, steps, lastX, lastY, wall):
# count possible moves
options = []
if x-1 >= 0: # East
options.append([-1, 0])
if x+1 <= lastX: # West
options.append([ 1, 0])
if y-1 >= 0: # North
options.append([ 0,-1])
if y+1 <= lastY: # South
options.append([ 0, 1])
# increment step
steps += 1
for option in options:
# new x and y
newX = x + option[0]
newY = y + option[1]
# if statements
if map[newY][newX] == 0:
map[newY][newX] = steps
if newX != 0 or newY != 0:
pathFinder(newX, newY, map, steps, lastX, lastY, wall)
elif ( map[newY][newX] == 1 or map[newY][newX] < 0 ) and not wall:
wall = True
map[newY][newX] = steps * -1
pathFinder(newX, newY, map, steps, lastX, lastY, wall)
wall = False
elif map[newY][newX] > 1 and steps < abs(map[newY][newX]):
if(map[newY][newX] < 0):
map[newY][newX] = steps * -1
if(map[newY][newX] > 0):
map[newY][newX] = steps
if newX != 0 or newY != 0:
pathFinder(newX, newY, map, steps, lastX, lastY, wall)
def solution(map):
steps = 1
lastX = len(map[0]) - 1
lastY = len(map) - 1
x = lastX
y = lastY
testMap = map[:]
testMap[y][x] = 1
pathFinder(x, y, testMap, steps, lastX, lastY, False)
return(testMap[0][0])
# print(solution([[0, 1], [0, 0]]))
# print(solution([[0, 1, 1, 0], [0, 0, 0, 1], [1, 1, 0, 0], [1, 1, 1, 0]]))
# print(solution([[0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 0], [0, 0, 0, 0, 0, 0], [0, 1, 1, 1, 1, 1], [0, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0]]))
# print(solution([[0, 0, 0, 0, 0, 0], [0, 1, 1, 1, 1, 0], [0, 0, 0, 0, 0, 0], [0, 1, 1, 1, 1, 1], [0, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0]]))
'''
print(solution([
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1],
[0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0],
[0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0],
[0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0],
[0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0],
[0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1],
[0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0],
[0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0],
[0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0],
[0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1],
[0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0],
[0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0],
[0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0],
[0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1],
[0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0],
[0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0],
[0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]))
'''
print(solution([
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]))
| [
"[email protected]"
] | |
22d8d701e93efaddc5bfc903051441ed4376ae45 | 719b657774faec0d8e353d49bc44e6f4cd7ba5c2 | /Treap(Random Binary Search Tree) implementation.py | b0427349da5e700b2148a09081b1bda7d851a517 | [] | no_license | ayesha607/Treap-Random-Binary-Search-Tree- | 378c322d8fddce0aa09341be574de5be1d750b6c | 6c23c3b47e559ab77f6ed2c73038f09909b0a30f | refs/heads/main | 2023-02-28T03:48:06.678257 | 2021-02-05T20:18:35 | 2021-02-05T20:18:35 | 336,376,656 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,263 | py | #!/usr/bin/env python
# coding: utf-8
# In[36]:
import random
class Node:
def __init__(self, key, value):
self.key = key
self.p = random.random()
self.value = value
self.left = None
self.right = None
def leftrotate(self):
x = self
y = x.right
x.right = y.left
y.left = x
x = y
y = x.left
return x
def rightrotate(self):
x = self
y = x.left
x.left = y.right
y.right = x
x = y
y = x.right
return x
def __repr__(self):
return "(node key:%s )" % (str(self.key))
class Treap:
def __init__(self):
self.root = None
#Func to add node
def __add(self, node, key, value):
if node is None:
node = Node(key, value)
return node
#if ky less than node key add in left subtree, else in right subtree
if key < node.key:
node.left = self.__add(node.left, key, value)
if node.left.p < node.p:
node = node.rightrotate()
elif key >= node.key:
node.right = self.__add(node.right, key, value)
if node.right.p < node.p:
node = node.leftrotate()
return node
def add(self, key, value):
self.root = self.__add(self.root, key, value)
#finds node
def __find(self, node, key):
if node == None:
return None
if node.key == key:
return node
if key < node.key:
return self.__find(node.left, key)
else:
return self.__find(node.right, key)
def find(self, key):
return self.__find(self.root, key)
#func to remove node
def __remove(self, node, key):
#if tree is empty
if node is None:
return False
if node.key == key:
#case 1: leaf node
if node.left is None and node.right is None:
return None
#case 2: has right child
elif node.left is None:
return node.right
#case 3: has left child
elif node.right is None:
return node.left
else:
if node.left.p < node.right.p:
node = node.rightrotate()
node.right = self.__remove(node.right, key)
else:
node = node.leftrotate()
node.left = self.__remove(node.left, key)
elif key < node.key:
node.left = self.__remove(node.left, key)
else:
node.right = self.__remove(node.right, key)
return node
def remove(self, key):
if self.find(key) is None:
return False
self.root = self.__remove(self.root, key)
return True
#traverse the tree
def __traverse(self, node):
if node == None:
return
self.__traverse(node.left)
print (node.key)
self.__traverse(node.right)
def traverse(self):
self.__traverse(self.root)
def __repr__(self):
return str(self.root)
| [
"[email protected]"
] | |
0ab2eb81e04a15ed1db8d179bf11f3e13a7dd7d0 | defc76148d2fb22162e61ab34657f169c52be666 | /journalism_workflow/v1/adjust_photos.py | 82b4cb3af3e25cd00f3216b5df0cd8b9d95c58df | [
"Apache-2.0",
"CC-BY-3.0"
] | permissive | skamille/orchestra | e29b4b2e6319476c145f5d305c347cd62ced0a29 | c9a58bf76c4521b41ce38b23db2ba93180ee9000 | refs/heads/master | 2021-01-16T21:12:57.205991 | 2016-02-01T19:08:24 | 2016-02-01T19:08:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,652 | py | import os
import tempfile
from django.conf import settings
from PIL import Image
from orchestra.google_apps.convenience import create_folder_with_permissions
from orchestra.google_apps.convenience import download_file
from orchestra.google_apps.convenience import upload_file
from orchestra.google_apps.permissions import write_with_link_permission
from orchestra.google_apps.service import Service
import logging
logger = logging.getLogger(__name__)
def autoadjust_photos(project_data, prerequisites):
"""Resize all images in a google drive directory."""
task_data = {}
parent_folder_id = project_data['project_folder_id']
# Create a directory to output the photos
output_folder = create_folder_with_permissions(
parent_folder_id,
'Processed Photos',
permissions=[write_with_link_permission],
)
task_data['processed_photo_folder'] = output_folder['id']
# List the existing photos
raw_photo_folder_id = (prerequisites
.get('photography')
.get('prerequisites')
.get('article_planning')
.get('prerequisites')
.get('document_creation')
.get('task')
.get('data')
.get('raw_photo_folder'))
service = Service(settings.GOOGLE_P12_PATH,
settings.GOOGLE_SERVICE_EMAIL)
photos_metadata = service.list_folder(raw_photo_folder_id)
# Iterate over the input photos and process them.
task_data['photos_for_caption'] = []
for photo_metadata in photos_metadata:
photo, title, mimetype = download_file(photo_metadata)
adjusted_photo_tmpfile = adjust_photo(photo)
upload = upload_file(
task_data['processed_photo_folder'],
adjusted_photo_tmpfile.name,
title,
'image',
mimetype
)
os.unlink(adjusted_photo_tmpfile.name)
embed_link = upload['webContentLink'].replace('&export=download', '')
task_data['photos_for_caption'].append(embed_link)
return task_data
def adjust_photo(photo):
# Write the photo to a temporary file
temp = tempfile.NamedTemporaryFile(mode='wb', delete=False)
temp.write(photo)
# Open it up and play with it in PIL
im = Image.open(temp.name)
im = im.convert('L') # convert to greyscale
# Save it back out into a new temporary file
os.unlink(temp.name)
temp2 = tempfile.NamedTemporaryFile(mode='wb', delete=False)
im.save(temp2, format='jpeg')
return temp2
| [
"[email protected]"
] | |
37104a18852afc5a21815369e7dd0b0f5adc9217 | 1f5659544eebcf5ed67c8b2a52d2803cb01588f2 | /rule_induction_classifier/rule_induction_classifier/__init__.py | 224d4dcb8000b3cdd4345e9d3df4250bc604dffc | [] | no_license | Apokrbk/rule_classifier | 3e8077dff643512ba1d6eb3e4ae4577d80aae5ed | 13db09418abb8875390a1f8e9d31274539e9eda1 | refs/heads/master | 2020-03-20T22:24:36.883422 | 2019-01-26T18:38:32 | 2019-01-26T18:38:32 | 137,796,976 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 32 | py | name="rule_induction_classifier" | [
"[email protected]"
] | |
d7e3d08b747976c322d9047ebd236257f13e0442 | e0370bd9d08a5307678f08110b0919e4f9c60750 | /dao/user_mapper.py | e95d82d6070a60354b3d7066cb85f3e97630e149 | [] | no_license | wangyingsm/flasksql | cadc1f3d014f2218c04990bc55db2991919cf665 | 914bd50720adbb7b439c6cf0d44e5b3040713ad2 | refs/heads/master | 2021-04-15T13:07:57.652078 | 2018-03-24T20:46:28 | 2018-03-24T20:46:28 | 126,487,784 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,216 | py | # -*- coding: utf8 -*-
from app import db
from model.User import User
from sqlalchemy.exc import DatabaseError
from sqlalchemy import text
from traceback import print_exc
def findAllUsers():
return db.session.query(User).all()
def findUserById(id):
return db.session.query(User).filter(User.id == id).one_or_none()
def insertUser(user):
try :
db.session.add(user)
db.session.commit()
except Exception as e:
db.session.rollback()
print_exc()
raise DatabaseError('插入用户数据时发生错误', e.message, e)
def updateUser(params):
try :
sql = text('update t_user set name=:name, fullname=:fullname, password=:password '
'where id=:id')
print(sql)
db.session.execute(sql, params)
db.session.commit()
except Exception as e:
db.session.rollback()
print_exc()
raise DatabaseError('插入用户数据时发生错误', e.message, e)
def deleteUser(user):
try:
db.session.delete(user)
db.session.commit()
except Exception as e:
db.session.rollback()
print_exc()
raise DatabaseError('删除用户数据时发生错误', e.message, e) | [
"wangying"
] | wangying |
57483cde5bfd742605f867681de8e02461cca452 | 38098dfc4da17b8c65575cfec033ad3ecc9ebb9b | /Intermediate Python /exceptions/try_with_finally.py | 92e03b0fbef01c09e14e8aacb778d00905d93d19 | [] | no_license | milena-marcinik/review_SDA | 3db1a348e8d606586ac63a5d233d611fa0bf1b02 | d5017fd694ec7f25861bdeb89d57c93680c8d748 | refs/heads/main | 2023-01-28T05:03:30.444484 | 2020-12-03T21:42:28 | 2020-12-03T21:42:28 | 312,909,277 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 320 | py | # The try block will raise an error when trying to write to a read-only file
try:
file = open("temp.txt")
file.write("I saw a lion")
except FileNotFoundError:
print("An error occurred while processing the file!")
finally:
file.close()
# The program can continue, without leaving the file object open
| [
"[email protected]"
] | |
6cc1398e6926a640d43d204cf2e3cac3e631e85f | b5bbf6f6ea96fc3de9230d90f02e23c5da873c22 | /tango_with_django_project/urls.py | 2bf93c81cc5a80c33e43c7a2ff4b2b2f75d9faf5 | [] | no_license | harshgupta16/Tango-with-Django | b6e2bcf01febd05285076bee6b94fea979f98dae | cbd592f8a238d02021eccfede33837587a397437 | refs/heads/master | 2020-07-07T10:22:47.993977 | 2015-02-14T14:21:31 | 2015-02-14T14:21:31 | 30,798,278 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 364 | py | from django.conf.urls import patterns, include, url
from django.contrib import admin
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'tango_with_django_project.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^rango/', include('rango.urls')),
url(r'^admin/', include(admin.site.urls)),
# ADD THIS NEW TUPLE!
)
| [
"[email protected]"
] | |
2b0dff7ddd5721512ac228512d41790c5aedf75a | be0ec4f1621aca800d1cf90deccf262f55251911 | /GUI_PKSS_v3/GUI_PKSS/Python_projekty/entry_box.py | d9d49860c87dc9b9b537b9fbf2cf20eca1cd0c7b | [] | no_license | dorotawejdman/Heating-System--model-GUI- | 9e12076fa17834915343df389199de6b6fe11b6e | e69bfad4fbfdc49494783a5438a09f316bb0afc5 | refs/heads/master | 2022-11-15T04:52:25.016976 | 2020-07-13T19:41:08 | 2020-07-13T19:41:08 | 279,396,128 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,168 | py | # -*- coding: utf-8 -*-
"""
Created on Sat Dec 7 16:18:21 2019
@author: dorot
"""
from tkinter import *
import sys, os, csv, time
#Um
#P_Um=0.1
#I_Um=0.005
#D_Um=0.05
#
##Ub1
#P_Ub1=0.1
#I_Ub1=0.005
#D_Ub1=0.05
#
##Ub2
#P_Ub2=0.1
#I_Ub2=0.005
#D_Ub2=0.05
nastawy=[[0.1,0.005,0.05], [0.1,0.005,0.05], [0.1,0.005,0.05]]
def writePID(P,I,D,d):
f = open("PID.txt", "w")
f.truncate()
nastawy[d]=[P,I,D]
os.chdir('E:\Air\PKSS\GUI_PKSS\Python_projekty')
with open("E:\Air\PKSS\GUI_PKSS\Python_projekty\PID.txt", 'a', encoding='utf-8', newline='') as csvfile:
csvwriter=csv.writer(csvfile,delimiter=";")
csvwriter.writerow([str(nastawy)])
# f.write(P_Um+"\n"+I_Um+"\n"+D_Um+"\n")
# f.write(P_Ub1+"\n"+I_Ub1+"\n"+D_Ub1+"\n")
# f.write(P_Ub2+"\n"+I_Ub2+"\n"+D_Ub2+"\n")
f.close()
def pidum_ok():
P_Um = Pent1.get()
I_Um = Ient1.get()
D_Um = Dent1.get()
P1label = Label(window, text=P_Um,font="none 12")
P1label.place( x =1344, y = 125)
P2label = Label(window, text=I_Um,font="none 12")
P2label.place( x =1344, y = 135)
P3label = Label(window, text=D_Um,font="none 12")
P3label.place( x =1344, y = 145)
writePID(P_Um,I_Um,D_Um,0)
def pidub1_ok():
P_Ub1 = Pent2.get()
I_Ub1 = Ient2.get()
D_Ub1 = Dent2.get()
P1label = Label(window, text=P_Ub1,font="none 12")
P1label.place( x =1344, y = 125)
P2label = Label(window, text=I_Ub1,font="none 12")
P2label.place( x =1344, y = 135)
P3label = Label(window, text=D_Ub1,font="none 12")
P3label.place( x =1344, y = 145)
writePID(P_Ub1,I_Ub1,D_Ub1,1)
def pidub2_ok():
P_Ub2 = Pent3.get()
I_Ub2 = Ient3.get()
D_Ub2 = Dent3.get()
P1label = Label(window, text=P_Ub2,font="none 12")
P1label.place( x =1344, y = 125)
P2label = Label(window, text=I_Ub2,font="none 12")
P2label.place( x =1344, y = 135)
P3label = Label(window, text=D_Ub2,font="none 12")
P3label.place( x =1344, y = 145)
writePID(P_Ub2,I_Ub2,D_Ub2,2)
window= Tk()
window.title("PKSS")
Pent1=StringVar()
Ient1=StringVar()
Dent1=StringVar()
Pent2=StringVar()
Ient2=StringVar()
Dent2=StringVar()
Pent3=StringVar()
Ient3=StringVar()
Dent3=StringVar()
button1=Button(window,text='OK',command=pidum_ok).pack()
button2=Button(window,text='OK',command=pidub1_ok).pack()
button3=Button(window,text='OK',command=pidub2_ok).pack()
P1 = Entry (window,textvariable=Pent1).pack()
I1 = Entry (window,textvariable=Ient1).pack()
D1 = Entry (window,textvariable=Dent1).pack()
P2 = Entry (window,textvariable=Pent2).pack()
I2 = Entry (window,textvariable=Ient2).pack()
D2 = Entry (window,textvariable=Dent2).pack()
P3 = Entry (window,textvariable=Pent3).pack()
I3 = Entry (window,textvariable=Ient3).pack()
D3 = Entry (window,textvariable=Dent3).pack()
window.mainloop()
#entry = Label(window, text=txt, fg="black",bg="orange", font="none 12")
#entry.place(x=10,y=10)
# with open('test.csv', 'a', encoding='utf-8', newline='') as csvfile:
# csvwriter=csv.writer(csvfile,delimiter=";")
# csvwriter.writerow([txt])
#txtLabel=Label(window,text=txt).pack()
| [
"[email protected]"
] | |
80287b28c93d6db5eda425b2514c0bcdac242368 | 7224f601a074d62d1ca27bc3dcee454163532202 | /miscellaneous/Raspberry-Pi/Cloud server/mqttt/mqttt client.py | b1d589bb433a997a5b4493b5c0d17e08b5e67a89 | [] | no_license | abuzneid/IoT-Lab---Spring-2019 | ac37b264e10969b8ba98c6184d417174834de5b4 | aef0fc8097e4650a36a1fda9d59cb3cd5cb387b1 | refs/heads/master | 2020-04-23T18:20:19.321428 | 2019-07-01T20:45:44 | 2019-07-01T20:45:44 | 171,363,593 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,272 | py | import paho.mqtt.client as mqtt
import encodings
# The callback for when the client receives a CONNACK response from the server.
def on_connect(client, userdata, flags, rc):
print("Connected with result code "+str(rc))
# Subscribing in on_connect() - if we lose the connection and
# reconnect then subscriptions will be renewed.
client.subscribe("ABC/test")
client.subscribe("ABC/topic")
# The callback for when a PUBLISH message is received from the server.
def on_message(client, userdata, msg):
print(msg.topic+" "+str(msg.payload))
data=msg.payload.decode('ascii')
if data == "Hello":
print("Received message #1, do something")
# Do something
if data== "World!":
print("Received message #2, do something else")
# Do something else
# Create an MQTT client and attach our routines to it.
client = mqtt.Client()
client.on_connect = on_connect
client.on_message = on_message
client.connect("test.mosquitto.org", 1883, 60)
# Process network traffic and dispatch callbacks. This will also handle
# reconnecting. Check the documentation at
# https://github.com/eclipse/paho.mqtt.python
# for information on how to use other loop*() functions
client.loop_forever()
while 1:
client.on_message | [
"[email protected]"
] | |
d0be707b6b95674e7a55339a7774568045b2a525 | 6a7058009587e78b5c758ff783410325ad7c2a4b | /educative/slidingWindow/non_repeat_substring.py | 2883b8e112151f20e144a97d63367dc9680d312d | [
"Apache-2.0"
] | permissive | stacykutyepov/python-cp-cheatsheet | 8b96b76403c501f5579befd07b3c4a4c69fe914e | a00a57e1b36433648d1cace331e15ff276cef189 | refs/heads/master | 2023-07-16T13:26:35.130763 | 2021-08-30T11:23:39 | 2021-08-30T11:23:39 | 401,442,535 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 537 | py | """
time: 13 min
errors: none!
"""
def non_repeat_substring(str):
maxLen, i = 0, 0
ht = {}
for i, c in enumerate(str):
if c in ht:
maxLen = max(maxLen, len(ht))
ht.clear()
ht[c] = True
maxLen = max(len(ht), maxLen)
return maxLen
def main():
print("Length of the longest substring: " + str(non_repeat_substring("aabccbb")))
print("Length of the longest substring: " + str(non_repeat_substring("abbbb")))
print("Length of the longest substring: " + str(non_repeat_substring("abccde")))
main() | [
"[email protected]"
] | |
94b4c2e35a301f411b1555575ce77c4ce920d0e0 | 1a48b9b03463eb1b34628e1cc76239dba875e32f | /Parallel computing/使用ProcessPoolExecutor并发执行任务.py | dab13e723dc33e2b4ed8d8280d7ed3407b254187 | [] | no_license | songyachun/Python-Workbook | e25dd021dd669e234356c859aa0f416b259e2ae6 | ad13c35030fc0754bbe01557bc86eb29c83d0179 | refs/heads/master | 2021-05-20T02:00:05.654599 | 2020-06-16T12:32:33 | 2020-06-16T12:32:33 | 252,138,216 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,077 | py | """
# @Author : SYACHUN
# @Date : 2020-06-16 19:45:15
# @LastEditTime: 2020-06-16 19:53:51
# @LastEditors: SYACHUN
# @Description : 求解最大公约数
# @FilePath : \weikeban\使用ProcessPoolExecutor并发执行任务.py
"""
import time
import concurrent.futures as cf
def gcd(pair):
a, b = pair
low = min(a, b)
for i in range(low, 0, -1):
if a % i == 0 and b % i == 0:
return i
if __name__ == "__main__":
TEST_DATA = [
(11880774, 83664910),
(13961044, 17644234),
(10112000, 13380625)
]
# 传统串行方法
start_time = time.time()
res1 = list(map(gcd, TEST_DATA))
end_time = time.time()
print("串行处理结果:{},消耗时间:{}".format(res1, end_time-start_time))
# 使用ProcessPoolExecutor并行处理
start_time = time.time()
pool = cf.ProcessPoolExecutor(max_workers=4)
res2 = list(pool.map(gcd, TEST_DATA))
end_time = time.time()
print("并行处理结果:{},消耗时间:{}".format(res2, end_time-start_time))
| [
"[email protected]"
] | |
bbbf2ff35e64f1f9cf83d0315339612b100e6464 | ae43b412092c4a417bd46573942ff261d00ecb26 | /ML/appleorange.py | 560d8e9ab5aab733566b92562844b4466edcef3c | [] | no_license | hydrophyl/Python-1.0 | 1bb191456b4a53b8c420593392236a262cc969aa | b1f3ff5a5e06ed5d589fc12b29f3a1672b257749 | refs/heads/master | 2020-05-02T10:10:21.394958 | 2019-09-24T20:25:52 | 2019-09-24T20:25:52 | 177,890,139 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 282 | py | #Scikit-learn
from sklearn import tree
#1 is 'smooth' 0 is 'bumpy'
features = [[140,1], [130, 1], [150,0], [170,0]]
#0 is 'apple, 1 is orange
labels = [0, 0, 1, 1]
#Train classifier
clf = tree.DecisionTreeClassifier()
clf = clf.fit(features,labels)
print(clf.predict([[155,0]]))
| [
"[email protected]"
] | |
bcc7c1b0c6a0c863489a0d79114e5dac8fc93df1 | 4ca5211d80d5c4c8c068b3384307b809388b30c1 | /pegSolitaireUtils.py | 37bf544cddce5d6d69ac671f6f7889999d33e0d9 | [] | no_license | atanu1991/Peg-Solitaire | 6f82183ac12c74083cb89dac69213f6d18101085 | 64ee81642f0e0c75d3f1b71aef414657b1a8465b | refs/heads/master | 2016-08-12T03:23:23.664380 | 2016-01-24T03:05:04 | 2016-01-24T03:05:04 | 50,269,554 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,842 | py | import readGame
class game:
def __init__(self, filePath):
self.gameState = readGame.readGameState(filePath)
self.nodesExpanded = 0
self.trace = []
def get_gameState(self):
return self.gameState
def manhattan_distance_heuristic(self):
"""This heuristic calculates the sum of manhattan distances
of all the pegs from the centre peg (3,3)
"""
value = 0
for i in range(0,7):
for j in range(0,7):
if self.gameState[i][j] == 1:
value += abs(i - 3) + abs(j - 3)
return value
def possible_moves_heuristic(self):
"""This heuristic increments the cost by 1 for every move which
is possible from that given state.
"""
value = 0
for i in range(0,7):
for j in range(0,7):
if self.gameState[i][j] == 1:
if self.is_validMove((i,j),'S'):
value = value + 1
if self.is_validMove((i,j),'N'):
value = value + 1
if self.is_validMove((i,j),'E'):
value = value + 1
if self.is_validMove((i,j),'W'):
value = value + 1
return value
def is_corner(self, pos):
# Checking if the given position is out of the board.
if pos[0] < 0 or pos[0] > 6 or pos[1] < 0 or pos[1] > 6:
return True
# Checking if the given position is in top left 2 x 2 square.
for i in range(0,2):
for j in range(0,2):
if pos == (i,j):
return True
# Checking if the given position is in bottom left 2 x 2 square.
for i in range(5,7):
for j in range(0,2):
if pos == (i,j):
return True
# Checking if the given position is in top right 2 x 2 square.
for i in range(0,2):
for j in range(5,7):
if pos == (i,j):
return True
# Checking if the given position is in bottom right 2 x 2 square.
for i in range(5,7):
for j in range(5,7):
if pos == (i,j):
return True
return False
def getNextPosition(self, oldPos, direction):
# This function just returns the next position from the current
# position in the given direction.
# It does not check the validity of the position.
newPos = list(oldPos)
if(direction == 'N'):
newPos[0] = oldPos[0] - 2
newPos[1] = oldPos[1]
if(direction == 'S'):
newPos[0] = oldPos[0] + 2
newPos[1] = oldPos[1]
if(direction == 'E'):
newPos[0] = oldPos[0]
newPos[1] = oldPos[1] + 2
if(direction == 'W'):
newPos[0] = oldPos[0]
newPos[1] = oldPos[1] - 2
return tuple(newPos)
def is_validMove(self, oldPos, direction):
#########################################
# In this we have got the next peg position and
# below lines check for if the new move is a corner
newPos = self.getNextPosition(oldPos, direction)
if self.is_corner(newPos):
return False
#########################################
#If the new position is out of the board return False
if newPos[0] < 0 or newPos[0] > 6 or newPos[1] < 0 or newPos[1] > 6:
return False
#If there is already a marble in new position or the old position does not have a marble, return false
if self.gameState[newPos[0]][newPos[1]] == 1 or self.gameState[oldPos[0]][oldPos[1]] == 0:
return False
midPos = list(oldPos)
#If there is no marble in the intermediate position while
# reaching new position from old position, return False
if(direction == 'N'):
midPos[0] = midPos[0] - 1
if self.gameState[midPos[0]][midPos[1]] == 0:
return False
elif(direction == 'S'):
midPos[0] = midPos[0] + 1
if self.gameState[midPos[0]][midPos[1]] == 0:
return False
elif(direction == 'E'):
midPos[1] = midPos[1] + 1
if self.gameState[midPos[0]][midPos[1]] == 0:
return False
elif(direction == 'W'):
midPos[1] = midPos[1] - 1
if self.gameState[midPos[0]][midPos[1]] == 0:
return False
#If all checks are passed, then it is a valid move
return True
def getNextState(self, oldPos, direction):
# This function actually modifies the game state given the current position
# and the direction to move.
# This function cross verifies if it is valid to move in the direction mention.
###############################################
self.nodesExpanded += 1
if not self.is_validMove(oldPos, direction):
print "Error, You are not checking for valid move"
exit(0)
###############################################
# If it is valid to move from current position in the given direction,
# update the game state by actually moving the marbles.
newPos = self.getNextPosition(oldPos, direction)
self.gameState[oldPos[0]][oldPos[1]] = 0 #Remove marble from current position
self.gameState[newPos[0]][newPos[1]] = 1 #Put the removed marble to the position
# Also remove the marble which is in between current position and new position.
midPos = list(oldPos)
if(direction == 'N'):
midPos[0] = midPos[0] - 1
self.gameState[midPos[0]][midPos[1]] = 0
elif(direction == 'S'):
midPos[0] = midPos[0] + 1
self.gameState[midPos[0]][midPos[1]] = 0
elif(direction == 'E'):
midPos[1] = midPos[1] + 1
self.gameState[midPos[0]][midPos[1]] = 0
elif(direction == 'W'):
midPos[1] = midPos[1] - 1
self.gameState[midPos[0]][midPos[1]] = 0
return self.gameState
| [
"[email protected]"
] | |
6f2fb33ac581df43102825b6f3735508999d4a00 | 144f2c0f977d5c1537bfaca8c6f108af783dc06e | /tests/test_qtile_tools.py | 16e2a40c936c8804d4080338c7deb79b854db5ab | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | frostidaho/python-qtile-tools | b3abf70de692e59f57a9fc6ca0347ad68e4b48c7 | d36e8f4dd2afbb52f5d719bfbac8040979806df7 | refs/heads/master | 2021-01-19T01:15:34.399209 | 2017-04-05T01:07:04 | 2017-04-05T01:07:04 | 87,235,732 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 66 | py |
from qtile_tools.cli import main
def test_main():
main([])
| [
"[email protected]"
] | |
474a96f640ec999c7442bc9e0186af716ac3bf80 | b6ae8ec90432020e0f1fb249a2ba69dd4bbcfe99 | /Day2/list/tuple.py | 89fc32c507b5f1c3e125906ac040e508f3dd8445 | [] | no_license | hf-hf/Python3-Study | 6d634e9ef04c2c4654b292e3e263337b779e4d40 | 9c3d8b843bb4356bd4e5ef4d7ef59c61f8f37889 | refs/heads/master | 2022-12-16T14:57:33.067373 | 2020-09-29T08:17:54 | 2020-09-29T08:17:54 | 291,682,774 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,906 | py | # 不可变有序列表 - 元组 tuple ( )#
if __name__ == '__main__':
# tuple 和 list 非常类似,但是 tuple 一旦初始化就不能修改,比如同样是列出同学的名字:
classmates = ('Michael', 'Bob', 'Tracy')
# 现在,classmates 这个 tuple 不能变了,它也没有 append(),insert() 这样的方法。
# 其他获取元素的方法和list是一样的,你可以正常地使用classmates[0],classmates[-1],
# 但不能赋值成另外的元素。
# 不可变的 tuple 有什么意义?因为 tuple 不可变,所以代码更安全。
# 如果可能,能用 tuple 代替 list 就尽量用 tuple。
# empty tuple
t = ()
print(t) # ()
# 定义的不是 tuple,是1这个数!这是因为括号()既可以表示 tuple,
# 又可以表示数学公式中的小括号,这就产生了歧义,
# 因此,Python规定,这种情况下,按小括号进行计算,计算结果自然是1。
t = (1)
print(t) # 1
# 所以,只有 1 个元素的 tuple 定义时必须加一个逗号,,来消除歧义:
t = (1,)
print(t) # (1,)
# “可变的”tuple :
t = ('a', 'b', ['A', 'B'])
t[2][0] = 'X'
t[2][1] = 'Y'
print(t) # ('a', 'b', ['X', 'Y'])
# 当我们把list的元素'A'和'B'修改为'X'和'Y'后
# 表面上看,tuple的元素确实变了,但其实变的不是tuple的元素,
# 而是list的元素。tuple一开始指向的list并没有改成别的list,
# 所以,⭐ tuple所谓的“不变”是说,tuple的每个元素,指向永远不变。即指向'a',
# 就不能改成指向'b',指向一个list,就不能改成指向其他对象,但指向的这个list本身是可变的!
# 理解了“指向不变”后,要创建一个内容也不变的tuple怎么做?那就必须保证tuple的每一个元素本身也不能变。 | [
"[email protected]"
] | |
da06c4db439c1b85c8eec435a58a63d8d792b9b4 | b2b55f2aea101c050852de97e06b230e1f918014 | /number40.py | 011ffcd027840e8fc929133548a8c1a7916823b2 | [] | no_license | hansol4412/algorithm | d505e202969417fdb0da10421989ef84aa937dbe | a6194b55b1a8f2cf21c91b1f02d92abb3bd10cd2 | refs/heads/master | 2023-03-22T11:45:01.257350 | 2021-03-21T11:19:42 | 2021-03-21T11:19:42 | 268,396,247 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 788 | py | # 40. 교집합 (투포인트 알고리즘)
# 두 집합 A, B가 주어지면 두 집합의 교집합을 출력하는 프로그램을 작성하세요.
# 교집합은 오름차순으로 정렬하시오.
# 교집합 구한 후 정렬하시오.
a=[]
b=[]
c=[]
n=int(input("첫번째 배열의 크기를 입력하세요.:"))
for i in range(0,n):
a.append(int(input()))
m=int(input("두번째 배열의 크기를 입력하세요.:"))
for i in range(0,m):
b.append(int(input()))
for i in range(0,n):
for j in range(0,m):
if(a[i]==b[j]):
c.append(a[i])
#교집합 정렬하기-버블정렬
for i in range(0,len(c)-1):
for j in range(0,len(c)-1-i):
if(c[j]>c[j+1]):
temp=c[j]
c[j]=c[j+1]
c[j+1]=temp
print(c)
| [
"[email protected]"
] | |
a5ca186385d589d484d9ce43724d8471804013cc | 949999ba1f6afb82f652b2287c8de3ddd37f79aa | /templates/ikev1_pcapReader.py | e7ae26be9986d0d0a3c98d0222a5dd7dba99fe1c | [] | no_license | OnurOzcan35/ikev1-psk-aggressive-mode-dict-attack | 5eba3b6c025cfa42fbb5e72da78e0022e666c8b6 | b593300e05d7f3c7bb064ce5dfdd9def16c23fca | refs/heads/main | 2023-02-27T01:23:46.451956 | 2021-02-04T12:54:04 | 2021-02-04T12:54:04 | 335,952,525 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 710 | py | from scapy.all import *
def openPCAPFile(path: str) -> scapy.plist.PacketList:
#TODO Read a pcap or pcapng file and return a packet list
try:
return rdpcap(path)
except:
raise Exception("File is not found")
raise NotImplementedError('Reading packets not implemented.')
def getISAKMPPackets(packets: scapy.plist.PacketList) -> []:
#TODO returns a list containing only the ISAKMP Layers of the packets in packetList
packetList = []
for packet in packets:
try:
packetList.append(packet["ISAKMP"])
except:
continue
return packetList
raise NotImplementedError('Getting ISAKMP Layer from PacketList not implemented.')
| [
"[email protected]"
] | |
0431fea0e0dcf9db6c90a0cf03b79cb49f3e17e8 | b1cb67067656271be22c293f51c1373293a28795 | /tests/test_planner.py | e7f5ab4629d2368ea3ac3811344b1185f16c697d | [
"BSD-2-Clause"
] | permissive | NPRA/EmissionCalculatorLib | b4b5445984ead114236715c3f8ed895c360a2bc5 | 750e7137c8115d26b2eec354ab3f5a65f76a8e21 | refs/heads/master | 2022-07-11T06:01:47.857745 | 2021-02-09T10:43:41 | 2021-02-09T10:43:41 | 99,795,865 | 10 | 5 | BSD-2-Clause | 2022-06-21T21:11:31 | 2017-08-09T10:24:36 | Python | UTF-8 | Python | false | false | 480 | py | from emission import Planner
from emission import PollutantTypes
from emission import vehicles
class TestPlanner:
def test_construct(self):
start = [271809.847394, 7039133.17755]
stop = [265385.432115, 7031118.13344]
vehicle = vehicles.Car(vehicles.FuelTypes.PETROL)
planner = Planner(start, stop, vehicle)
assert planner.pollutants == {}
planner.add_pollutant(PollutantTypes.NOx)
assert len(planner.pollutants) > 0
| [
"[email protected]"
] | |
9610d71e683b7cf6ba117adf541c9de69f52aee6 | 7b5828edda7751700ca7002b40a214e39e5f48a8 | /EA/core/sims4/gsi/command_buffer.py | e1301ea4581704ced936304f7533eab0b6fbd36f | [] | no_license | daniela-venuta/Sims-4-Python-Script-Workspace | 54c33dac02f84daed66f46b7307f222fede0fa62 | f408b28fb34626b2e3b2953152343d591a328d66 | refs/heads/main | 2023-03-29T18:08:39.202803 | 2021-03-30T19:00:42 | 2021-03-30T19:00:42 | 353,111,243 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,504 | py | import collections
try:
import threading
_threading_enabled = True
except ImportError:
import dummy_threading as threading
_threading_enabled = False
import sims4.commands
import sims4.log
import sims4.service_manager
logger = sims4.log.Logger('GSI')
_Command = collections.namedtuple('_Command', ('command_string', 'callback', 'output_override', 'zone_id', 'connection_id'))
def _execute_command(command):
real_output = sims4.commands.output
sims4.commands.output = command.output_override
result = False
try:
if command.zone_id is not None:
sims4.commands.execute(command.command_string, command.connection_id)
else:
sims4.commands.execute(command.command_string, None)
result = True
except Exception:
result = False
logger.exception('Error while executing game command for')
finally:
sims4.commands.output = real_output
command.callback(result)
if _threading_enabled:
class CommandBufferService(sims4.service_manager.Service):
def __init__(self):
self.pending_commands = None
self._lock = threading.Lock()
def start(self):
with self._lock:
self.pending_commands = []
def stop(self):
with self._lock:
self.pending_commands = None
def add_command(self, command_string, callback=None, output_override=None, zone_id=None, connection_id=None):
with self._lock:
if self.pending_commands is not None:
command = _Command(command_string, callback, output_override, zone_id, connection_id)
self.pending_commands.append(command)
def on_tick(self):
with self._lock:
if not self.pending_commands:
return
local_pending_commands = list(self.pending_commands)
del self.pending_commands[:]
for command in local_pending_commands:
_execute_command(command)
else:
class CommandBufferService(sims4.service_manager.Service):
def add_command(self, command_string, callback=None, output_override=None, zone_id=None, connection_id=None):
command = _Command(command_string, callback, output_override, zone_id, connection_id)
_execute_command(command)
def on_tick(self):
pass
| [
"[email protected]"
] | |
9652dc3c1561ddace6104626a615a020f3d8d906 | 21b5ab9a8e7c1534c4a92cb97a30b0e30a424777 | /djblogapp/articles/admin.py | 09fd4f4ddbd87b05dc271bc1644dd675addd6c97 | [] | no_license | Gazalkhan/BloggingApp | afa7190abf75b3eb854becebf3a43feda71a9dc0 | c9d6e813c3c0f39d455568ebc20a2c6e4b534d0e | refs/heads/master | 2020-03-19T09:50:09.901586 | 2018-06-06T11:26:47 | 2018-06-06T11:26:47 | 136,320,795 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 185 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib import admin
from .models import Article
# Register your models here.
admin.site.register(Article)
| [
"[email protected]"
] | |
a7a8a4fc6a4d8860a7cbcf0990e903217b21bb30 | fd40d6375ddae5c8613004a411341f0c984e80d5 | /src/visions/core/implementations/types/visions_datetime.py | f8a11f0c6b5bf06f1ed01080801bdba0c704c5d0 | [
"LicenseRef-scancode-public-domain",
"MIT"
] | permissive | ieaves/tenzing | 93c3353e62621c90adefc5a174a2dcde9aacbc46 | 92d39c1c3a5633d8074e0ffe8c2687c465aebbc8 | refs/heads/master | 2020-04-25T07:14:31.388737 | 2020-01-07T02:51:13 | 2020-01-07T02:51:13 | 172,608,080 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,360 | py | import pandas.api.types as pdt
import pandas as pd
from typing import Sequence
from visions.core.model.relations import (
IdentityRelation,
InferenceRelation,
TypeRelation,
)
from visions.core.model.type import VisionsBaseType
from visions.core.implementations.types import visions_string
from visions.utils.coercion import test_utils
def to_datetime(series: pd.Series) -> pd.Series:
return pd.to_datetime(series)
def _get_relations() -> Sequence[TypeRelation]:
from visions.core.implementations.types import visions_generic
relations = [
IdentityRelation(visions_datetime, visions_generic),
InferenceRelation(
visions_datetime,
visions_string,
relationship=test_utils.coercion_test(to_datetime),
transformer=to_datetime,
),
]
return relations
class visions_datetime(VisionsBaseType):
"""**Datetime** implementation of :class:`visions.core.model.type.VisionsBaseType`.
Examples:
>>> x = pd.Series([pd.datetime(2017, 3, 5), pd.datetime(2019, 12, 4)])
>>> x in visions_datetime
True
"""
@classmethod
def get_relations(cls) -> Sequence[TypeRelation]:
return _get_relations()
@classmethod
def contains_op(cls, series: pd.Series) -> bool:
return pdt.is_datetime64_any_dtype(series)
| [
"[email protected]"
] | |
9eb21a225d99d72d993744068321b270fe85c8e0 | f9d564f1aa83eca45872dab7fbaa26dd48210d08 | /huaweicloud-sdk-iotda/huaweicloudsdkiotda/v5/model/mysql_forwarding.py | 1bd1a82ee4f076d2f13ebb0d6b9e7b5b2c2a94ed | [
"Apache-2.0"
] | permissive | huaweicloud/huaweicloud-sdk-python-v3 | cde6d849ce5b1de05ac5ebfd6153f27803837d84 | f69344c1dadb79067746ddf9bfde4bddc18d5ecf | refs/heads/master | 2023-09-01T19:29:43.013318 | 2023-08-31T08:28:59 | 2023-08-31T08:28:59 | 262,207,814 | 103 | 44 | NOASSERTION | 2023-06-22T14:50:48 | 2020-05-08T02:28:43 | Python | UTF-8 | Python | false | false | 8,910 | py | # coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class MysqlForwarding:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
sensitive_list.append('password')
openapi_types = {
'address': 'NetAddress',
'db_name': 'str',
'username': 'str',
'password': 'str',
'enable_ssl': 'bool',
'table_name': 'str',
'column_mappings': 'list[ColumnMapping]'
}
attribute_map = {
'address': 'address',
'db_name': 'db_name',
'username': 'username',
'password': 'password',
'enable_ssl': 'enable_ssl',
'table_name': 'table_name',
'column_mappings': 'column_mappings'
}
def __init__(self, address=None, db_name=None, username=None, password=None, enable_ssl=None, table_name=None, column_mappings=None):
"""MysqlForwarding
The model defined in huaweicloud sdk
:param address:
:type address: :class:`huaweicloudsdkiotda.v5.NetAddress`
:param db_name: **参数说明**:连接MYSQL数据库的库名。 **取值范围**:长度不超过64,只允许字母、数字、下划线(_)、连接符(-)的组合。
:type db_name: str
:param username: **参数说明**:连接MYSQL数据库的用户名
:type username: str
:param password: **参数说明**:连接MYSQL数据库的密码
:type password: str
:param enable_ssl: **参数说明**:客户端是否使用SSL连接服务端,默认为true
:type enable_ssl: bool
:param table_name: **参数说明**:MYSQL数据库的表名
:type table_name: str
:param column_mappings: **参数说明**:MYSQL数据库的列和流转数据的对应关系列表。
:type column_mappings: list[:class:`huaweicloudsdkiotda.v5.ColumnMapping`]
"""
self._address = None
self._db_name = None
self._username = None
self._password = None
self._enable_ssl = None
self._table_name = None
self._column_mappings = None
self.discriminator = None
self.address = address
self.db_name = db_name
self.username = username
self.password = password
if enable_ssl is not None:
self.enable_ssl = enable_ssl
self.table_name = table_name
self.column_mappings = column_mappings
@property
def address(self):
"""Gets the address of this MysqlForwarding.
:return: The address of this MysqlForwarding.
:rtype: :class:`huaweicloudsdkiotda.v5.NetAddress`
"""
return self._address
@address.setter
def address(self, address):
"""Sets the address of this MysqlForwarding.
:param address: The address of this MysqlForwarding.
:type address: :class:`huaweicloudsdkiotda.v5.NetAddress`
"""
self._address = address
@property
def db_name(self):
"""Gets the db_name of this MysqlForwarding.
**参数说明**:连接MYSQL数据库的库名。 **取值范围**:长度不超过64,只允许字母、数字、下划线(_)、连接符(-)的组合。
:return: The db_name of this MysqlForwarding.
:rtype: str
"""
return self._db_name
@db_name.setter
def db_name(self, db_name):
"""Sets the db_name of this MysqlForwarding.
**参数说明**:连接MYSQL数据库的库名。 **取值范围**:长度不超过64,只允许字母、数字、下划线(_)、连接符(-)的组合。
:param db_name: The db_name of this MysqlForwarding.
:type db_name: str
"""
self._db_name = db_name
@property
def username(self):
"""Gets the username of this MysqlForwarding.
**参数说明**:连接MYSQL数据库的用户名
:return: The username of this MysqlForwarding.
:rtype: str
"""
return self._username
@username.setter
def username(self, username):
"""Sets the username of this MysqlForwarding.
**参数说明**:连接MYSQL数据库的用户名
:param username: The username of this MysqlForwarding.
:type username: str
"""
self._username = username
@property
def password(self):
"""Gets the password of this MysqlForwarding.
**参数说明**:连接MYSQL数据库的密码
:return: The password of this MysqlForwarding.
:rtype: str
"""
return self._password
@password.setter
def password(self, password):
"""Sets the password of this MysqlForwarding.
**参数说明**:连接MYSQL数据库的密码
:param password: The password of this MysqlForwarding.
:type password: str
"""
self._password = password
@property
def enable_ssl(self):
"""Gets the enable_ssl of this MysqlForwarding.
**参数说明**:客户端是否使用SSL连接服务端,默认为true
:return: The enable_ssl of this MysqlForwarding.
:rtype: bool
"""
return self._enable_ssl
@enable_ssl.setter
def enable_ssl(self, enable_ssl):
"""Sets the enable_ssl of this MysqlForwarding.
**参数说明**:客户端是否使用SSL连接服务端,默认为true
:param enable_ssl: The enable_ssl of this MysqlForwarding.
:type enable_ssl: bool
"""
self._enable_ssl = enable_ssl
@property
def table_name(self):
"""Gets the table_name of this MysqlForwarding.
**参数说明**:MYSQL数据库的表名
:return: The table_name of this MysqlForwarding.
:rtype: str
"""
return self._table_name
@table_name.setter
def table_name(self, table_name):
"""Sets the table_name of this MysqlForwarding.
**参数说明**:MYSQL数据库的表名
:param table_name: The table_name of this MysqlForwarding.
:type table_name: str
"""
self._table_name = table_name
@property
def column_mappings(self):
"""Gets the column_mappings of this MysqlForwarding.
**参数说明**:MYSQL数据库的列和流转数据的对应关系列表。
:return: The column_mappings of this MysqlForwarding.
:rtype: list[:class:`huaweicloudsdkiotda.v5.ColumnMapping`]
"""
return self._column_mappings
@column_mappings.setter
def column_mappings(self, column_mappings):
"""Sets the column_mappings of this MysqlForwarding.
**参数说明**:MYSQL数据库的列和流转数据的对应关系列表。
:param column_mappings: The column_mappings of this MysqlForwarding.
:type column_mappings: list[:class:`huaweicloudsdkiotda.v5.ColumnMapping`]
"""
self._column_mappings = column_mappings
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, MysqlForwarding):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
6fb3a746e9ba0290370ef755c66cb55f09785135 | 7cda87756f0b0621e2aae1a5bd627b8ec386dff6 | /flaskLogin/app.py | 79038bbf9c55ee97127ad9e9a5c4ee069a973733 | [] | no_license | angshukdutta92/python-projects | 9bf9d191a5a85efd18f2293fa41ed08ff79ec130 | 4eb108d3d66f5106fc64e45ca190b41cb0742ff7 | refs/heads/master | 2021-05-20T00:21:39.702393 | 2016-05-03T16:12:24 | 2016-05-03T16:12:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,728 | py | # import the Flask class from the flask module
from flask import Flask, render_template, redirect, url_for, request, session, flash
from functools import wraps
# create the application object
app = Flask(__name__)
# config
app.secret_key = 'my precious'
# login required decorator
def login_required(f):
@wraps(f)
def wrap(*args, **kwargs):
if 'logged_in' in session:
return f(*args, **kwargs)
else:
flash('You need to login first.')
return redirect(url_for('login'))
return wrap
# use decorators to link the function to a url
@app.route('/')
@login_required
def home():
return render_template('index.html') # render a template
# return "Hello, World!" # return a string
@app.route('/welcome')
def welcome():
return render_template('welcome.html') # render a template
# route for handling the login page logic
@app.route('/login', methods=['GET', 'POST'])
def login():
error = None
if request.method == 'POST':
if request.form['username'] != 'admin' or request.form['password'] != 'admin':
error = 'Invalid Credentials. Please try again.'
else:
session['logged_in'] = True
flash('You were logged in.')
return redirect(url_for('home'))
return render_template('login.html', error=error)
@app.route('/logout')
@login_required
def logout():
session.pop('logged_in', None)
flash('You were logged out.')
return redirect(url_for('welcome'))
# start the server with the 'run()' method
#start the server with the run command
if __name__=='__main__':
app.debug=True
app.run(host='0.0.0.0',port=5003)
| [
"[email protected]"
] | |
118d8bcfddca20813b19f8d6d4567735c4ac5899 | 10c1760dd158ce9b9450b45c8d79335596d68035 | /web1/migrations/0011_customer_profile_picture.py | 55efd49ff82e9b3b2fab58e3cc1d6894cb3f1701 | [] | no_license | dalemesser/Mycode | 239508ce3feac579d65537480bd3f5d5e810f190 | d83d10278d02f50eb2fcc970079c9e710fca6e14 | refs/heads/main | 2023-03-01T10:16:14.099413 | 2021-02-09T10:44:36 | 2021-02-09T10:44:36 | 330,345,745 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 410 | py | # Generated by Django 3.1.4 on 2021-01-20 15:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('web1', '0010_customer_user'),
]
operations = [
migrations.AddField(
model_name='customer',
name='profile_picture',
field=models.ImageField(blank=True, null=True, upload_to=''),
),
]
| [
"[email protected]"
] | |
309cd04173b4d096eb7b590ed67fc399ef2c0877 | cf668ede675f5b5a49912e8ca2170b5d5dba85c3 | /FullDesign/LsRand_OnlyTau_4.py | 3d9e3b41a25a22b071485ba0d01de3c81c709d52 | [] | no_license | amemil/MasterThesisRaw | b6c97a671e740871be541539384192684f5f1966 | bb357481cc47ef3a2b241f4b1df85fd0a4ff1de0 | refs/heads/main | 2023-06-09T22:49:06.082380 | 2021-06-25T09:38:20 | 2021-06-25T09:38:20 | 327,104,381 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,631 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed May 12 19:29:28 2021
@author: emilam
"""
import sys, os
import numpy as np
sys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
import UtilitiesMaster as ut
s1init1 = np.load('s1init_16.npy')
s2init1 = np.load('s2init_16.npy')
Winit1 = np.load('Winit_16.npy')
s1init2 = np.load('s1init_17.npy')
s2init2 = np.load('s2init_17.npy')
Winit2 = np.load('Winit_17.npy')
s1init3 = np.load('s1init_18.npy')
s2init3 = np.load('s2init_18.npy')
Winit3 = np.load('Winit_18.npy')
s1init4 = np.load('s1init_19.npy')
s2init4 = np.load('s2init_19.npy')
Winit4= np.load('Winit_19.npy')
s1init5 = np.load('s1init_20.npy')
s2init5 = np.load('s2init_20.npy')
Winit5 = np.load('Winit_20.npy')
indx = [16,17,18,19,20]
s1s = [s1init1,s1init2,s1init3,s1init4,s1init5]
s2s = [s2init1,s2init2,s2init3,s2init4,s2init5]
ws = [Winit1,Winit2,Winit3,Winit4,Winit5]
for i in range(5):
design = ut.ExperimentDesign(freqs_init=np.array([20,50,100,200]),maxtime=60,trialsize=5\
,Ap=0.005, tau=0.02, genstd=0.0001,b1=-3.1, b2=-3.1, w0=1.0,binsize = 1/500.0,reals = 20,longinit = 60\
,s1init = s1s[i],s2init = s2s[i],Winit = ws[i])
means,entrs,optms,W,posts = design.onlineDesign_wh_tau(nofreq =False,constant = False, random = True, optimised = False)
np.save('RandEstimatesTau_'+str(indx[i]),means)
np.save('RandEntropiesTau_'+str(indx[i]),entrs)
np.save('RandWTau_'+str(indx[i]),W)
np.save('RandPostsTau_'+str(indx[i]),posts)
np.save('RandFreqsTau_'+str(indx[i]),optms) | [
"[email protected]"
] | |
9cfda2d3b33fe057ffac602c8a45eb41a9ec05e5 | adea9fc9697f5201f4cb215571025b0493e96b25 | /napalm_yang/models/openconfig/network_instances/network_instance/protocols/protocol/isis/interfaces/interface/levels/level/afi_safi/af/segment_routing/prefix_sids/prefix_sid/config/__init__.py | 2c88a90da723c84456e7fd1bc37de0086e2d37c7 | [
"Apache-2.0"
] | permissive | andyjsharp/napalm-yang | d8a8b51896ef7c6490f011fe265db46f63f54248 | ef80ebbfb50e188f09486380c88b058db673c896 | refs/heads/develop | 2021-09-09T02:09:36.151629 | 2018-03-08T22:44:04 | 2018-03-08T22:44:04 | 114,273,455 | 0 | 0 | null | 2018-03-08T22:44:05 | 2017-12-14T16:33:35 | Python | UTF-8 | Python | false | false | 34,558 | py |
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
unicode = str
elif six.PY2:
import __builtin__
class config(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/isis/interfaces/interface/levels/level/afi-safi/af/segment-routing/prefix-sids/prefix-sid/config. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Configuration parameters for the IGP Prefix SID.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_extmethods', '__prefix','__sid_id','__label_options',)
_yang_name = 'config'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__label_options = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'EXPLICIT_NULL': {}, u'NO_PHP': {}},), is_leaf=True, yang_name="label-options", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='enumeration', is_config=True)
self.__prefix = YANGDynClass(base=[RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/(([0-9])|([1-2][0-9])|(3[0-2]))'}),RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(/(([0-9])|([0-9]{2})|(1[0-1][0-9])|(12[0-8])))'}),], is_leaf=True, yang_name="prefix", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='inet:ip-prefix', is_config=True)
self.__sid_id = YANGDynClass(base=[RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'16..1048575']}),RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'NO_LABEL': {}, u'IPV6_EXPLICIT_NULL': {'value': 2}, u'ENTROPY_LABEL_INDICATOR': {'value': 7}, u'IPV4_EXPLICIT_NULL': {'value': 0}, u'ROUTER_ALERT': {'value': 1}, u'IMPLICIT_NULL': {'value': 3}},),RestrictedClassType(base_type=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'}), restriction_dict={'pattern': u'[0-9a-fA-F:\\.]*'}),], is_leaf=True, yang_name="sid-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='sr-sid-type', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'network-instances', u'network-instance', u'protocols', u'protocol', u'isis', u'interfaces', u'interface', u'levels', u'level', u'afi-safi', u'af', u'segment-routing', u'prefix-sids', u'prefix-sid', u'config']
def _get_prefix(self):
"""
Getter method for prefix, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/levels/level/afi_safi/af/segment_routing/prefix_sids/prefix_sid/config/prefix (inet:ip-prefix)
YANG Description: The IP prefix for which the IGP prefix SID should be advertised. The
value specified is a local prefix on the interface which is advertised
into the IGP.
"""
return self.__prefix
def _set_prefix(self, v, load=False):
"""
Setter method for prefix, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/levels/level/afi_safi/af/segment_routing/prefix_sids/prefix_sid/config/prefix (inet:ip-prefix)
If this variable is read-only (config: false) in the
source YANG file, then _set_prefix is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_prefix() directly.
YANG Description: The IP prefix for which the IGP prefix SID should be advertised. The
value specified is a local prefix on the interface which is advertised
into the IGP.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=[RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/(([0-9])|([1-2][0-9])|(3[0-2]))'}),RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(/(([0-9])|([0-9]{2})|(1[0-1][0-9])|(12[0-8])))'}),], is_leaf=True, yang_name="prefix", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='inet:ip-prefix', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """prefix must be of a type compatible with inet:ip-prefix""",
'defined-type': "inet:ip-prefix",
'generated-type': """YANGDynClass(base=[RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/(([0-9])|([1-2][0-9])|(3[0-2]))'}),RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(/(([0-9])|([0-9]{2})|(1[0-1][0-9])|(12[0-8])))'}),], is_leaf=True, yang_name="prefix", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='inet:ip-prefix', is_config=True)""",
})
self.__prefix = t
if hasattr(self, '_set'):
self._set()
def _unset_prefix(self):
self.__prefix = YANGDynClass(base=[RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/(([0-9])|([1-2][0-9])|(3[0-2]))'}),RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(/(([0-9])|([0-9]{2})|(1[0-1][0-9])|(12[0-8])))'}),], is_leaf=True, yang_name="prefix", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='inet:ip-prefix', is_config=True)
def _get_sid_id(self):
"""
Getter method for sid_id, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/levels/level/afi_safi/af/segment_routing/prefix_sids/prefix_sid/config/sid_id (sr-sid-type)
YANG Description: The Segment Identifier to be used when advertising the IGP Prefix SID.
"""
return self.__sid_id
def _set_sid_id(self, v, load=False):
"""
Setter method for sid_id, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/levels/level/afi_safi/af/segment_routing/prefix_sids/prefix_sid/config/sid_id (sr-sid-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_sid_id is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_sid_id() directly.
YANG Description: The Segment Identifier to be used when advertising the IGP Prefix SID.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=[RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'16..1048575']}),RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'NO_LABEL': {}, u'IPV6_EXPLICIT_NULL': {'value': 2}, u'ENTROPY_LABEL_INDICATOR': {'value': 7}, u'IPV4_EXPLICIT_NULL': {'value': 0}, u'ROUTER_ALERT': {'value': 1}, u'IMPLICIT_NULL': {'value': 3}},),RestrictedClassType(base_type=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'}), restriction_dict={'pattern': u'[0-9a-fA-F:\\.]*'}),], is_leaf=True, yang_name="sid-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='sr-sid-type', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """sid_id must be of a type compatible with sr-sid-type""",
'defined-type': "openconfig-network-instance:sr-sid-type",
'generated-type': """YANGDynClass(base=[RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'16..1048575']}),RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'NO_LABEL': {}, u'IPV6_EXPLICIT_NULL': {'value': 2}, u'ENTROPY_LABEL_INDICATOR': {'value': 7}, u'IPV4_EXPLICIT_NULL': {'value': 0}, u'ROUTER_ALERT': {'value': 1}, u'IMPLICIT_NULL': {'value': 3}},),RestrictedClassType(base_type=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'}), restriction_dict={'pattern': u'[0-9a-fA-F:\\.]*'}),], is_leaf=True, yang_name="sid-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='sr-sid-type', is_config=True)""",
})
self.__sid_id = t
if hasattr(self, '_set'):
self._set()
def _unset_sid_id(self):
self.__sid_id = YANGDynClass(base=[RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'16..1048575']}),RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'NO_LABEL': {}, u'IPV6_EXPLICIT_NULL': {'value': 2}, u'ENTROPY_LABEL_INDICATOR': {'value': 7}, u'IPV4_EXPLICIT_NULL': {'value': 0}, u'ROUTER_ALERT': {'value': 1}, u'IMPLICIT_NULL': {'value': 3}},),RestrictedClassType(base_type=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'}), restriction_dict={'pattern': u'[0-9a-fA-F:\\.]*'}),], is_leaf=True, yang_name="sid-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='sr-sid-type', is_config=True)
def _get_label_options(self):
"""
Getter method for label_options, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/levels/level/afi_safi/af/segment_routing/prefix_sids/prefix_sid/config/label_options (enumeration)
YANG Description: The options associated with the IGP prefix SID for MPLS. The value
of this leaf specifies the option that the SID should be advertised
into the IGP with.
"""
return self.__label_options
def _set_label_options(self, v, load=False):
"""
Setter method for label_options, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/levels/level/afi_safi/af/segment_routing/prefix_sids/prefix_sid/config/label_options (enumeration)
If this variable is read-only (config: false) in the
source YANG file, then _set_label_options is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_label_options() directly.
YANG Description: The options associated with the IGP prefix SID for MPLS. The value
of this leaf specifies the option that the SID should be advertised
into the IGP with.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'EXPLICIT_NULL': {}, u'NO_PHP': {}},), is_leaf=True, yang_name="label-options", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='enumeration', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """label_options must be of a type compatible with enumeration""",
'defined-type': "openconfig-network-instance:enumeration",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'EXPLICIT_NULL': {}, u'NO_PHP': {}},), is_leaf=True, yang_name="label-options", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='enumeration', is_config=True)""",
})
self.__label_options = t
if hasattr(self, '_set'):
self._set()
def _unset_label_options(self):
self.__label_options = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'EXPLICIT_NULL': {}, u'NO_PHP': {}},), is_leaf=True, yang_name="label-options", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='enumeration', is_config=True)
prefix = __builtin__.property(_get_prefix, _set_prefix)
sid_id = __builtin__.property(_get_sid_id, _set_sid_id)
label_options = __builtin__.property(_get_label_options, _set_label_options)
_pyangbind_elements = {'prefix': prefix, 'sid_id': sid_id, 'label_options': label_options, }
class config(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/isis/interfaces/interface/levels/level/afi-safi/af/segment-routing/prefix-sids/prefix-sid/config. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Configuration parameters for the IGP Prefix SID.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_extmethods', '__prefix','__sid_id','__label_options',)
_yang_name = 'config'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__label_options = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'EXPLICIT_NULL': {}, u'NO_PHP': {}},), is_leaf=True, yang_name="label-options", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='enumeration', is_config=True)
self.__prefix = YANGDynClass(base=[RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/(([0-9])|([1-2][0-9])|(3[0-2]))'}),RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(/(([0-9])|([0-9]{2})|(1[0-1][0-9])|(12[0-8])))'}),], is_leaf=True, yang_name="prefix", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='inet:ip-prefix', is_config=True)
self.__sid_id = YANGDynClass(base=[RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'16..1048575']}),RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'NO_LABEL': {}, u'IPV6_EXPLICIT_NULL': {'value': 2}, u'ENTROPY_LABEL_INDICATOR': {'value': 7}, u'IPV4_EXPLICIT_NULL': {'value': 0}, u'ROUTER_ALERT': {'value': 1}, u'IMPLICIT_NULL': {'value': 3}},),RestrictedClassType(base_type=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'}), restriction_dict={'pattern': u'[0-9a-fA-F:\\.]*'}),], is_leaf=True, yang_name="sid-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='sr-sid-type', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'network-instances', u'network-instance', u'protocols', u'protocol', u'isis', u'interfaces', u'interface', u'levels', u'level', u'afi-safi', u'af', u'segment-routing', u'prefix-sids', u'prefix-sid', u'config']
def _get_prefix(self):
"""
Getter method for prefix, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/levels/level/afi_safi/af/segment_routing/prefix_sids/prefix_sid/config/prefix (inet:ip-prefix)
YANG Description: The IP prefix for which the IGP prefix SID should be advertised. The
value specified is a local prefix on the interface which is advertised
into the IGP.
"""
return self.__prefix
def _set_prefix(self, v, load=False):
"""
Setter method for prefix, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/levels/level/afi_safi/af/segment_routing/prefix_sids/prefix_sid/config/prefix (inet:ip-prefix)
If this variable is read-only (config: false) in the
source YANG file, then _set_prefix is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_prefix() directly.
YANG Description: The IP prefix for which the IGP prefix SID should be advertised. The
value specified is a local prefix on the interface which is advertised
into the IGP.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=[RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/(([0-9])|([1-2][0-9])|(3[0-2]))'}),RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(/(([0-9])|([0-9]{2})|(1[0-1][0-9])|(12[0-8])))'}),], is_leaf=True, yang_name="prefix", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='inet:ip-prefix', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """prefix must be of a type compatible with inet:ip-prefix""",
'defined-type': "inet:ip-prefix",
'generated-type': """YANGDynClass(base=[RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/(([0-9])|([1-2][0-9])|(3[0-2]))'}),RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(/(([0-9])|([0-9]{2})|(1[0-1][0-9])|(12[0-8])))'}),], is_leaf=True, yang_name="prefix", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='inet:ip-prefix', is_config=True)""",
})
self.__prefix = t
if hasattr(self, '_set'):
self._set()
def _unset_prefix(self):
self.__prefix = YANGDynClass(base=[RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/(([0-9])|([1-2][0-9])|(3[0-2]))'}),RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(/(([0-9])|([0-9]{2})|(1[0-1][0-9])|(12[0-8])))'}),], is_leaf=True, yang_name="prefix", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='inet:ip-prefix', is_config=True)
def _get_sid_id(self):
"""
Getter method for sid_id, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/levels/level/afi_safi/af/segment_routing/prefix_sids/prefix_sid/config/sid_id (sr-sid-type)
YANG Description: The Segment Identifier to be used when advertising the IGP Prefix SID.
"""
return self.__sid_id
def _set_sid_id(self, v, load=False):
"""
Setter method for sid_id, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/levels/level/afi_safi/af/segment_routing/prefix_sids/prefix_sid/config/sid_id (sr-sid-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_sid_id is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_sid_id() directly.
YANG Description: The Segment Identifier to be used when advertising the IGP Prefix SID.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=[RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'16..1048575']}),RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'NO_LABEL': {}, u'IPV6_EXPLICIT_NULL': {'value': 2}, u'ENTROPY_LABEL_INDICATOR': {'value': 7}, u'IPV4_EXPLICIT_NULL': {'value': 0}, u'ROUTER_ALERT': {'value': 1}, u'IMPLICIT_NULL': {'value': 3}},),RestrictedClassType(base_type=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'}), restriction_dict={'pattern': u'[0-9a-fA-F:\\.]*'}),], is_leaf=True, yang_name="sid-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='sr-sid-type', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """sid_id must be of a type compatible with sr-sid-type""",
'defined-type': "openconfig-network-instance:sr-sid-type",
'generated-type': """YANGDynClass(base=[RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'16..1048575']}),RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'NO_LABEL': {}, u'IPV6_EXPLICIT_NULL': {'value': 2}, u'ENTROPY_LABEL_INDICATOR': {'value': 7}, u'IPV4_EXPLICIT_NULL': {'value': 0}, u'ROUTER_ALERT': {'value': 1}, u'IMPLICIT_NULL': {'value': 3}},),RestrictedClassType(base_type=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'}), restriction_dict={'pattern': u'[0-9a-fA-F:\\.]*'}),], is_leaf=True, yang_name="sid-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='sr-sid-type', is_config=True)""",
})
self.__sid_id = t
if hasattr(self, '_set'):
self._set()
def _unset_sid_id(self):
self.__sid_id = YANGDynClass(base=[RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'16..1048575']}),RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'NO_LABEL': {}, u'IPV6_EXPLICIT_NULL': {'value': 2}, u'ENTROPY_LABEL_INDICATOR': {'value': 7}, u'IPV4_EXPLICIT_NULL': {'value': 0}, u'ROUTER_ALERT': {'value': 1}, u'IMPLICIT_NULL': {'value': 3}},),RestrictedClassType(base_type=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'}), restriction_dict={'pattern': u'[0-9a-fA-F:\\.]*'}),], is_leaf=True, yang_name="sid-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='sr-sid-type', is_config=True)
def _get_label_options(self):
"""
Getter method for label_options, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/levels/level/afi_safi/af/segment_routing/prefix_sids/prefix_sid/config/label_options (enumeration)
YANG Description: The options associated with the IGP prefix SID for MPLS. The value
of this leaf specifies the option that the SID should be advertised
into the IGP with.
"""
return self.__label_options
def _set_label_options(self, v, load=False):
"""
Setter method for label_options, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/levels/level/afi_safi/af/segment_routing/prefix_sids/prefix_sid/config/label_options (enumeration)
If this variable is read-only (config: false) in the
source YANG file, then _set_label_options is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_label_options() directly.
YANG Description: The options associated with the IGP prefix SID for MPLS. The value
of this leaf specifies the option that the SID should be advertised
into the IGP with.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'EXPLICIT_NULL': {}, u'NO_PHP': {}},), is_leaf=True, yang_name="label-options", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='enumeration', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """label_options must be of a type compatible with enumeration""",
'defined-type': "openconfig-network-instance:enumeration",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'EXPLICIT_NULL': {}, u'NO_PHP': {}},), is_leaf=True, yang_name="label-options", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='enumeration', is_config=True)""",
})
self.__label_options = t
if hasattr(self, '_set'):
self._set()
def _unset_label_options(self):
self.__label_options = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'EXPLICIT_NULL': {}, u'NO_PHP': {}},), is_leaf=True, yang_name="label-options", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='enumeration', is_config=True)
prefix = __builtin__.property(_get_prefix, _set_prefix)
sid_id = __builtin__.property(_get_sid_id, _set_sid_id)
label_options = __builtin__.property(_get_label_options, _set_label_options)
_pyangbind_elements = {'prefix': prefix, 'sid_id': sid_id, 'label_options': label_options, }
| [
"[email protected]"
] | |
463f2f0409cff14924c1315fe6bb57f0ec627eab | e8c256cc56b72a35eae6676fa12fd1e89ecd9eb4 | /main.py | ce2c5cc175db1a25d309d96fa373f9e8336504c8 | [] | no_license | abdulfataiaka/spider-task-python | 6b26e404c0fa5ca173bd9a8d2f677b5537630b58 | 52ddc3f4e4fa67b12b908df20529b3774e5c61c9 | refs/heads/main | 2022-12-22T13:42:39.363274 | 2020-09-23T21:33:31 | 2020-09-23T21:35:18 | 298,099,847 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 67 | py | # Show examples of how you would use ALL your implementations here
| [
"[email protected]"
] | |
654f45649174dbb73f30e8600f1607e86a067184 | 160b41b070344658c1f8e1cca3ec2eb8fc0ad253 | /commander.py | b48d0e4890be20701d2d9b92c8ed0bb25038e1f6 | [] | no_license | lemin2601/easyTools | c1e6fec40703d1e016659ece64746d7a451a172d | c16e16300dcb0cb939376bef899ccf4390186e0b | refs/heads/master | 2020-04-09T02:15:11.132072 | 2018-12-17T15:41:03 | 2018-12-17T15:41:03 | 159,934,045 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,440 | py | from logger import L
from parse import Parser
from utils import parse_args
import config
import json
import sys
import gen
import os
import gv
import utils
import update_manifest
import info
import http_server
import network
import android_build
def execute_func(key, args):
key = key.strip().lower()
if key in config.key:
if key == 'info':
info.main(args)
if key == 'gen':
gen.main(args)
if key == 'cdn':
update_manifest.main(args)
if key == 'update-dev':
gv.cdn_set_package_url(network.update_host(gv.cdn_package_url()))
gv.save()
if key == "update-cdn-url":
gv.cdn_set_package_url(network.update_host(gv.cdn_package_url()))
gv.save()
if key == "android-gen":
android_build.main(args)
if key == 'cdn-run':
print(gv.ROOT_DIR)
path = os.path.join(gv.ROOT_DIR, "http_server.py")
path = os.path.abspath(path)
# cmd = "python {0} -port {1} -path {2}".format(path, gv.cdn_port(), utils.abs_path(gv.cdn_path()))
print(gv.cdn_package_url())
http_server.run(gv.cdn_port(), "", utils.abs_path(gv.cdn_path()))
# os.system(cmd)
if key == 'jslist':
cmd = 'jslist -f "{0}"'.format(utils.abs_path(utils.join_path(gv.client_path(), "./project.json")))
print(cmd)
os.system(cmd)
if key == 'run':
cmd = 'cocos run --proj-dir="{0}" -p web -m debug'.format(utils.abs_path(gv.client_path()))
print(cmd)
os.system(cmd)
if key == 'quit' or key == 'exit' or key == 'q':
L.info(">>Quit!")
sys.exit(0)
pass
def main(args):
L.debug("main commander")
parser = Parser(prog='commander.py')
parser.add_argument("key", default=None, help=get_help())
if len(args) > 0:
arguments = [args.pop(0)]
arguments = parser.parse_args(arguments)
if arguments is None:
pass
else:
key_found = arguments.key
if key_found is not None:
L.debug("find key:" + key_found)
execute_func(key_found, args)
else:
parser.print_help()
def parse(str_line):
arguments = parse_args(str_line)
main(arguments)
def get_help():
return json.dumps(config.key, indent=4, sort_keys=True)
| [
"[email protected]"
] | |
633b89e36668336997acfca2e4957d233f54f0c5 | fb0b9a1137b5aaaff9842e9aeeb1dee98a8dafa6 | /step-8/app.py | 1d414ceff6610126066b82efe8f2226fe1451466 | [] | no_license | davinirjr/treinamento-python | 86f90b10352a602be95f3bb09417dcb0224b0c1f | 9176452726d801e56ace8783a83baf1647913467 | refs/heads/master | 2021-01-18T19:09:40.824159 | 2013-10-09T20:55:18 | 2013-10-09T20:55:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 601 | py | #!/usr/bin/env python
# coding: utf-8
from time import strftime
from bottle import route, run
def fat(number):
if number in [0, 1]:
return 1
return number * fat(number - 1)
@route('/')
@route('/<name>')
def index(name='Esquecidinho'):
return 'Hello world, %s. São %s.' % (name, strftime('%H:%M:%S'))
@route('/fatorial/<number>')
def fatorial(number):
return 'Fatorial de %s é %s' % (number, str(fat(int(number))))
@route('/lista/<number>')
def lista(number):
return ', '.join([str(i) for i in range(int(number))])
run(host='localhost', port=8080, debug=True)
| [
"[email protected]"
] | |
055cbaf2ea541c498797309600cdf56c93d6dad9 | d81d06298965eaf773d16b31fd3ee63bf726a82b | /nexus_test/utils/other.py | 1c2d896e6b581e7263efc2b6c2f444271011a1ec | [] | no_license | MakhmoodSodikov/nexus-test | 081ddcd7181beef2be2dc6ffff977b3e44b3df27 | c15530269ef237fb6fb3144a97dea1ea4cad7609 | refs/heads/master | 2023-06-18T00:09:27.806493 | 2021-07-04T15:49:35 | 2021-07-04T15:49:35 | 382,351,811 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 811 | py | import os
from logging import getLogger
logger = getLogger('main logger')
def write_lines_to_file(text: list, filepath: str) -> int:
"""
:param text: Text to be written to the file
:param filepath: Path to the file
:return: 1 if Done correctly, -1 otherwise
"""
try:
with open(filepath, 'w') as f:
f.writelines(text)
return 1
except FileExistsError as e:
logger.error("File doesn't exist!")
return -1
except EOFError as e:
logger.error("File EOF error!")
return -1
except Exception as e:
logger.error(e)
return -1
def get_extension(file_path: str) -> str:
"""
:param file_path: Path to the file
:return: Extension of input file
"""
return os.path.splitext(file_path)[1]
| [
"[email protected]"
] | |
29ae359c329e7ea32fa6df4622e13b1757dc1c55 | caa13ef90cff357108cb55286faf8e7f5b69e13d | /11-ejercicios/ejercicio4.py | c785dee035add7d32003a48feaf01df16709dacc | [] | no_license | Terminal-Redux/master-python | f075bc11408e5d8234763182244e2bc92bb4fc6c | bd4dee2b3d6e2a3e5669f0f412fb12812228cd74 | refs/heads/master | 2022-11-20T01:39:08.722004 | 2020-07-21T00:57:25 | 2020-07-21T00:57:25 | 281,259,245 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 984 | py | """
Ejercicio 4.
Crear un script que tenga 4 variables, una lista, un string,
un etnero y un booleano y que imprima un mensaje
segun el tipo de dato de cada variable
"""
def traducirTipo(tipo):
result = ""
if tipo == list:
result = "LISTA"
elif tipo == str:
result = "CADENA DE TEXTO"
elif tipo == int:
result = "NUMERO ENTERO"
elif tipo == bool:
result = "BOOLEANO"
return result
def revisarTipoVariable(data, tipo):
test = isinstance(data, tipo)
resultado = ""
if test:
resultado = f"Tipo de variable de {data}: {traducirTipo(tipo)}"
else:
result = "El tipo de dato no es correcto"
return resultado
lista = ["hola", 13, 94, "adios"]
cadena = "Hola mundo"
entero = 45
booleano = True
print(revisarTipoVariable(lista, type(lista)))
print(revisarTipoVariable(cadena, type(cadena)))
print(revisarTipoVariable(entero, type(entero)))
print(revisarTipoVariable(booleano, type(booleano))) | [
"[email protected]"
] |
Subsets and Splits