blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b5fdf682f928aef41c6625b6e5d1e70bb65baa49 | cfc49e6e65ed37ddf297fc7dffacee8f905d6aa0 | /exercicios_seccao4/35.py | f774259ca92b71fb8f2bb8f0eeece2cbe180ede4 | [] | no_license | IfDougelseSa/cursoPython | c94cc1215643f272f935d5766e7a2b36025ddbe2 | 3f9ceb9701a514106d49b2144b7f2845416ed8ec | refs/heads/main | 2023-06-12T16:51:29.413031 | 2021-07-07T00:20:53 | 2021-07-07T00:20:53 | 369,268,883 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 195 | py | # Hipotenusa
import math
a = int(input('Digite o cateto a: '))
b = int(input('Digite o cateto b: '))
hipotenusa = math.sqrt(a ** 2 + b ** 2)
print(f'O valor da hipotenusa é {hipotenusa}.')
| [
"[email protected]"
] | |
4c699101fa8582289ec996b5664bd8ab5b3ec4f5 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03032/s297706816.py | d7371f5e563b20937599d014765a4d6f1b0ebd4c | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 743 | py | n,k=map(int,input().split())
v=list(map(int, input().split()))
if k<n*2:
ans=0
for i in range(k+1):
for j in range(k-i+1):
v_r=v[:i]
v_l=v[(n-j):]
sute_cnt=k-(i+j)
v_new=v_r+v_l
v_new.sort()
# print(i, j, v_r, v_l, sute_cnt, v_new)
s=sum(v_new)
if not v_new:
continue
for indx in range(len(v_new)):
if v_new[indx]<0 and sute_cnt>0:
s-=v_new[indx]
sute_cnt-=1
else:
break
ans=max(ans,s)
print(ans)
else:
ans=0
for i in range(n):
if v[i]>=0:
ans+=v[i]
print(ans)
| [
"[email protected]"
] | |
991dca36ac04de7fc66617c9dc6b5b69955f62de | 175522feb262e7311fde714de45006609f7e5a07 | /code/OCE/oce_ba_toy.py | b0d5e7d5d33e9d60e03b134751d925d68012207c | [] | no_license | m-hahn/predictive-rate-distortion | a048927dbc692000211df09da09ad1ed702525df | 1ff573500a2313e0a79d68399cbd83970bf05e4d | refs/heads/master | 2020-04-17T13:49:36.961798 | 2019-06-20T12:37:28 | 2019-06-20T12:37:28 | 166,631,865 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,806 | py | # Computes estimates also from held-out data.
# Was called zNgramIB_5.py.
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--language", type=str, dest="language", default="RIP")
parser.add_argument("--horizon", type=int, dest="horizon", default=1)
parser.add_argument("--code_number", type=int, dest="code_number", default=100)
parser.add_argument("--beta", type=float, dest="beta", default=0.1)
parser.add_argument("--dirichlet", type=float, dest="dirichlet", default=0.00001)
args_names = ["language", "horizon", "code_number", "beta", "dirichlet"]
args = parser.parse_args()
args.beta = 1/args.beta
assert args.beta <= 1.0
import random
import sys
header = ["index", "word", "lemma", "posUni", "posFine", "morph", "head", "dep", "_", "_"]
from corpusIteratorToy import CorpusIteratorToy
ngrams = {}
lastPosUni = ("EOS",)*(2*args.horizon-1)
for sentence in CorpusIteratorToy(args.language,"train", storeMorph=True).iterator():
for line in sentence:
nextPosUni = line["posUni"]
ngram = lastPosUni+(nextPosUni,)
ngrams[ngram] = ngrams.get(ngram, 0) + 1
lastPosUni = lastPosUni[1:]+(nextPosUni,)
nextPosUni = "EOS"
ngram = lastPosUni+(nextPosUni,)
ngrams[ngram] = ngrams.get(ngram, 0) + 1
lastPosUni = lastPosUni[1:]+(nextPosUni,)
#import torch.distributions
import torch.nn as nn
import torch
from torch.autograd import Variable
ngrams = list(ngrams.iteritems())
#ngrams = [x for x in ngrams if x[1] > 100]
#print(ngrams)
print(["Number of ngrams", len(ngrams)])
keys = [x[0] for x in ngrams]
total = sum([x[1] for x in ngrams])
frequencies = [x[1] for x in ngrams]
pasts = [x[:args.horizon] for x in keys] #range(horizon:range(horizon, 2*horizon)]
futures = [x[args.horizon:] for x in keys]
itos_pasts = list(set(pasts)) + ["_OOV_"]
itos_futures = list(set(futures)) + ["_OOV_"]
stoi_pasts = dict(zip(itos_pasts, range(len(itos_pasts))))
stoi_futures = dict(zip(itos_futures, range(len(itos_futures))))
import torch
pasts_int = torch.LongTensor([stoi_pasts[x] for x in pasts])
futures_int = torch.LongTensor([stoi_futures[x] for x in futures])
marginal_past = torch.zeros(len(itos_pasts))
for i in range(len(pasts)):
marginal_past[pasts_int[i]] += frequencies[i]
marginal_past[-1] = args.dirichlet * len(itos_futures)
marginal_past = marginal_past.div(marginal_past.sum())
print(marginal_past)
print(len(marginal_past))
future_given_past = torch.zeros(len(itos_pasts), len(itos_futures))
for i in range(len(pasts)):
future_given_past[pasts_int[i]][futures_int[i]] = frequencies[i]
future_given_past[-1].fill_(args.dirichlet)
future_given_past[:,-1].fill_(args.dirichlet)
future_given_past += 0.00001
print(future_given_past.sum(1))
#quit()
future_given_past = future_given_past.div(future_given_past.sum(1).unsqueeze(1))
print(future_given_past[0].sum())
def logWithoutNA(x):
y = torch.log(x)
y[x == 0] = 0
return y
marginal_future = torch.zeros(len(itos_futures))
for i in range(len(futures)):
marginal_future[futures_int[i]] += frequencies[i]
marginal_future[-1] = args.dirichlet * len(itos_pasts)
marginal_future = marginal_future.div(marginal_future.sum())
print(marginal_future)
print(len(marginal_future))
encoding = torch.empty(len(itos_pasts), args.code_number).uniform_(0.000001, 1)
encoding = encoding.div(encoding.sum(1).unsqueeze(1))
decoding = torch.empty(args.code_number, len(itos_futures)).uniform_(0.000001, 1)
decoding = decoding.div(decoding.sum(1).unsqueeze(1))
print(decoding[0].sum())
#quit()
marginal_hidden = torch.matmul(marginal_past.unsqueeze(0), encoding).squeeze(0)
import torch.nn.functional
def runOCE():
global decoding
global encoding
global marginal_hidden
objective = 10000000
for t in range(500):
print("Iteration", t)
divergence_by_past = (future_given_past * logWithoutNA(future_given_past))
divergence_by_past = divergence_by_past.sum(1)
log_future_given_past = logWithoutNA(future_given_past)
log_decoding = logWithoutNA(decoding)
ratios = log_future_given_past.unsqueeze(1) - log_decoding.unsqueeze(0)
divergence2 = (future_given_past.unsqueeze(1) * ratios).sum(2)
total_distortion = torch.matmul(marginal_past.unsqueeze(0), divergence2 * encoding).sum()
assert total_distortion >= 0, total_distortion
logNewEncoding = logWithoutNA(marginal_hidden.unsqueeze(0)) + (-args.beta * divergence2)
logNewEncoding = torch.nn.functional.log_softmax( logNewEncoding, dim=1) # logNewEncoding - logNorm
newEncoding = torch.exp(logNewEncoding)
new_marginal_hidden = torch.matmul(marginal_past.unsqueeze(0), newEncoding).squeeze(0)
newEncodingInverted = (newEncoding * marginal_past.unsqueeze(1)).div(new_marginal_hidden.unsqueeze(0))
newEncodingInverted[new_marginal_hidden.unsqueeze(0).expand(len(itos_pasts), -1) == 0] = 0
newDecoding = torch.matmul(future_given_past.t(), newEncodingInverted).t()
assert abs(newDecoding[0].sum()) < 0.01 or abs(newDecoding[0].sum() - 1.0) < 0.01 , newDecoding[0].sum()
entropy = new_marginal_hidden * logWithoutNA(new_marginal_hidden)
entropy = -torch.sum(entropy)
print("Entropy", entropy)
encoding = newEncoding
decoding = newDecoding
marginal_hidden = new_marginal_hidden
logDecoding = logWithoutNA(decoding)
logFutureMarginal = logWithoutNA(marginal_future)
miWithFuture = torch.sum((decoding * (logDecoding - logFutureMarginal.unsqueeze(0))).sum(1) * marginal_hidden)
logEncoding = logWithoutNA(encoding)
log_marginal_hidden = logWithoutNA(marginal_hidden)
miWithPast = torch.sum((encoding * (logEncoding - log_marginal_hidden.unsqueeze(0))).sum(1) * marginal_past)
assert miWithFuture <= miWithPast+1e-5, (miWithFuture , miWithPast)
newObjective = 1/args.beta * miWithPast - miWithFuture
print(["Mi with future", miWithFuture, "Mi with past", miWithPast])
print(["objectives","last",objective, "new", newObjective])
if not (newObjective - 0.1 <= objective):
print ("WARNING: Objective not improving. ", newObjective, objective)
if newObjective == objective:
print("Ending")
break
objective = newObjective
return encoding, decoding, logDecoding, miWithPast, log_marginal_hidden
encoding, decoding, logDecoding, miWithPast_train, log_marginal_hidden = runOCE()
futureSurprisal_train = -((future_given_past * marginal_past.unsqueeze(1)).unsqueeze(1) * encoding.unsqueeze(2) * logDecoding.unsqueeze(0)).sum()
#assert False, "how is the vocabulary for held-out data generated????"
# try on held-out data
ngrams = {}
lastPosUni = ("EOS",)*(2*args.horizon-1)
for sentence in CorpusIteratorToy(args.language,"dev", storeMorph=True).iterator():
for line in sentence:
nextPosUni = line["posUni"]
ngram = lastPosUni+(nextPosUni,)
ngrams[ngram] = ngrams.get(ngram, 0) + 1
lastPosUni = lastPosUni[1:]+(nextPosUni,)
nextPosUni = "EOS"
ngram = lastPosUni+(nextPosUni,)
ngrams[ngram] = ngrams.get(ngram, 0) + 1
lastPosUni = lastPosUni[1:]+(nextPosUni,)
#import torch.distributions
import torch.nn as nn
import torch
from torch.autograd import Variable
ngrams = list(ngrams.iteritems())
#ngrams = [x for x in ngrams if x[1] > 100]
#print(ngrams)
#print(["Number of ngrams", len(ngrams)])
keys = [x[0] for x in ngrams]
total = sum([x[1] for x in ngrams])
frequencies = [x[1] for x in ngrams]
pasts = [x[:args.horizon] for x in keys] #range(horizon:range(horizon, 2*horizon)]
futures = [x[args.horizon:] for x in keys]
import torch
pasts_int = torch.LongTensor([stoi_pasts[x] if x in stoi_pasts else stoi_pasts["_OOV_"] for x in pasts])
futures_int = torch.LongTensor([stoi_futures[x] if x in stoi_futures else stoi_futures["_OOV_"] for x in futures])
marginal_past = torch.zeros(len(itos_pasts))
for i in range(len(pasts)):
marginal_past[pasts_int[i]] += frequencies[i]
#marginal_past[-1] = len(itos_futures)
marginal_past = marginal_past.div(marginal_past.sum())
future_given_past = torch.zeros(len(itos_pasts), len(itos_futures))
for i in range(len(pasts)):
future_given_past[pasts_int[i]][futures_int[i]] = frequencies[i]
#future_given_past[-1].fill_(1)
#future_given_past[:,-1].fill_(1)
future_given_past += 0.00001
future_given_past = future_given_past.div(future_given_past.sum(1).unsqueeze(1))
#marginal_future = torch.zeros(len(itos_futures))
#for i in range(len(futures)):
# marginal_future[futures_int[i]] += frequencies[i]
#marginal_future = marginal_future.div(marginal_future.sum())
marginal_hidden = torch.matmul(marginal_past.unsqueeze(0), encoding).squeeze(0)
logDecoding = logWithoutNA(decoding)
#logFutureMarginal = logWithoutNA(marginal_future)
futureSurprisal = -((future_given_past * marginal_past.unsqueeze(1)).unsqueeze(1) * encoding.unsqueeze(2) * logDecoding.unsqueeze(0)).sum()
logEncoding = logWithoutNA(encoding)
miWithPast = torch.sum((encoding * (logEncoding - log_marginal_hidden.unsqueeze(0))).sum(1) * marginal_past)
print(["Mi with past", miWithPast, "Future Surprisal", futureSurprisal/args.horizon, "Horizon", args.horizon]) # "Mi with future", miWithFuture
myID = random.randint(0,10000000)
outpath = "../../results/outputs-oce/estimates-"+args.language+"_"+__file__+"_model_"+str(myID)+".txt"
with open(outpath, "w") as outFile:
print >> outFile, "\t".join(x+" "+str(getattr(args,x)) for x in args_names)
print >> outFile, float(miWithPast)
print >> outFile, float(futureSurprisal/args.horizon)
print >> outFile, float(miWithPast_train)
print >> outFile, float(futureSurprisal_train/args.horizon)
print(outpath)
| [
"[email protected]"
] | |
16241caf95d6f2f6a2c327e2309ad58990c11cd5 | be549921446835ba6dff0cadaa0c7b83570ebc3e | /run_eval_sutter.py | a0ba2df9ac3c6f63655586a070cc69f7762854c8 | [] | no_license | uctoronto/AutoPrescribe | 895ee4375625408c663cee22610bb5425d7efc7f | a6188e9189df727320448a368f6e70036472ede4 | refs/heads/master | 2020-03-27T05:47:47.500486 | 2017-05-31T18:49:33 | 2017-05-31T18:49:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,227 | py | from models.processor import Processor
from models.leap import LEAPModel
from exp.coverage import config_sutter as config
from utils.data import dump
config = config.get_config()
dir = 'build/'
config.saved_model_file = dir + 'sutter_%s_%s_seq2seq.model' % (config.level, config.order)
print(config.saved_model_file.split('/')[-1])
p = Processor(config)
model = LEAPModel(p, config)
# model.do_train()
model.load_params(config.saved_model_file)
# model.do_reinforce(scorer)
model.do_eval(training = False, filename = 'sutter_%s_%s_seq2seq.txt' % (config.level, config.order), max_batch = 5000000)
# model.load_params('../models/resume_seed13_100d_lr0.001_h256.model')
# ret = model.do_generate(data)
#
# from utils.eval import Evaluator
# eva = Evaluator()
# cnt = 0
# truth = []
# sum_jaccard = 0
# for line in open("seq2seq.h256.txt"):
# if cnt % 3 == 1:
# truth = set(line.strip().split("T: ")[1].split(" "))
# if cnt % 3 == 2:
# result = set(line.strip().split("Gen: ")[1].replace("END", "").strip().split(" "))
# jaccard = eva.get_jaccard_k(truth, result)
# sum_jaccard += jaccard
# cnt += 1
#
# print(sum_jaccard * 3 / cnt)
#
# cnt = 0
# truth_list = []
# prediction_list = []
# for line in open("seq2seq.h256.txt"):
# if cnt % 3 == 1:
# truth = set(line.strip().split("T: ")[1].split(" "))
# truth_list.append(truth)
# if cnt % 3 == 2:
# result = set(line.strip().split("Gen: ")[1].replace("END", "").strip().split(" "))
# prediction_list.append(result)
# cnt += 1
#
cnt = 0
results = []
input = []
truth = []
for line in open('sutter_%s_%s_seq2seq.txt' % (config.level, config.order)):
if cnt % 3 == 0:
input = set(line.strip().split("S: ")[1].split(" "))
if cnt % 3 == 1:
if len(line.strip().split("T: ")) <= 1:
truth = []
continue
truth = set(line.strip().split("T: ")[1].split(" "))
if cnt % 3 == 2:
result = set(line.strip().split("Gen: ")[1].replace("END", "").strip().split(" "))
if len(truth) > 0:
results.append((input, truth, result))
cnt += 1
dump(results, "sutter_%s_%s_result_seq2seq.pkl" % (config.level, config.order)) | [
"[email protected]"
] | |
ab4a4ec80a1bfd3b4a215a39861be605bc408651 | 9766c2e479e99cca5bf7cc834c949fc4d5286275 | /SRC/engine/IO/outputdestination.py | c01e8e8163a5f71c0b0c1fb0662dbb4b7e54d8e7 | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | UstbCmsPjy/OOF2 | 4c141e8da3c7e3c5bc9129c2cb27ed301455a155 | f8539080529d257a02b8f5cc44040637387ed9a1 | refs/heads/master | 2023-05-05T09:58:22.597997 | 2020-05-28T23:05:30 | 2020-05-28T23:05:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,483 | py | # -*- python -*-
# This software was produced by NIST, an agency of the U.S. government,
# and by statute is not subject to copyright in the United States.
# Recipients of this software assume all responsibilities associated
# with its operation, modification and maintenance. However, to
# facilitate maintenance we ask that before distributing modified
# versions of this software, you first contact the authors at
# [email protected].
from ooflib.SWIG.common import lock
from ooflib.SWIG.common import switchboard
from ooflib.common import debug
from ooflib.common import enum
from ooflib.common import registeredclass
from ooflib.common import utils
from ooflib.common.IO import datafile
from ooflib.common.IO import formatchars
from ooflib.common.IO import filenameparam
from ooflib.common.IO import parameter
from ooflib.common.IO import reporter
from ooflib.common.IO import xmlmenudump
import os
import weakref
class OutputDestination(registeredclass.RegisteredClass):
registry = []
tip="What to do with Scheduled Output data."
discussion=xmlmenudump.loadFile('DISCUSSIONS/engine/reg/outputdest.xml')
def open(self):
pass
# def open_append(self):
# pass
def flush(self):
pass
def rewind(self):
pass
def close(self):
pass
#=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=#
class GfxWindowDestination(OutputDestination):
def shortrepr(self):
return "<Graphics Window>"
registeredclass.Registration(
'Graphics Window',
OutputDestination,
GfxWindowDestination,
rewindable=False,
ordering=0,
tip="Send graphics window updates to the graphics window.",
discussion=xmlmenudump.loadFile('DISCUSSIONS/engine/reg/gfxoutputdest.xml')
)
#=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=#
# BaseOutputStream does the work for OutputStream, which writes Output
# data to a file. More than one OutputStream can write to the same
# file, which they do by sharing a single BaseOutputStream.
_allStreams = utils.OrderedDict() # All BaseOutputStreams, keyed by filename
_streamsLock = lock.SLock() # Controls access to _allStreams
class BaseOutputStream(object):
def __init__(self, filename, mode, openfile):
self.filename = filename
self.file = None
self.openfile = openfile # fn that actually opens the file
# _streamsLock has always been aquired before __init__ is called.
_allStreams[filename] = self
self.referents = [] # all OutputStreams using this BaseOutputStream
# lastOutput and lastargs are used to decide whether or not to
# write the header info in the data file. It's not written if
# the output and its args are the same as they were for the
# previous write.
self.lastOutput = None
self.lastargs = None
self.seplast = True # was the last thing written a separator?
self.nOpen = 0 # net number of times this has been opened
self.everOpened = False # has this been opened in this oof2 session?
self.lock = lock.SLock() # controls access by OutputStreams
self.mode = mode
self.rewound = False
self.appending = False
def addStream(self, stream):
self.lock.acquire()
self.referents.append(weakref.ref(stream, self._removeStream))
self.lock.release()
def _removeStream(self, wref):
self.referents.remove(wref)
self.lock.acquire()
try:
if len(self.referents) == 0 and self.file is not None:
self.file.close()
self.file = None
del _allStreams[self.filename]
finally:
self.lock.release()
switchboard.notify("output destinations changed")
def open(self):
self.lock.acquire()
try:
if self.file is None:
# The file should be opened with mode "w" if either of
# these conditions holds:
# * It hasn't been opened before during this
# session, and self.mode="w"
# * It's been rewound since the last time it was
# opened.
# In all other cases, it should be opened with mode "a".
if (not self.everOpened and self.mode == "w") or self.rewound:
mowed = "w"
self.rewound = False
self.lastOutput = None
self.seplast = True
self.appending = False
else:
mowed = "a"
self.appending = os.path.exists(self.filename)
self.file = self.openfile(self.filename, mowed)
self.everOpened = True
self.nOpen += 1
finally:
self.lock.release()
def rewind(self):
self.lock.acquire()
self.seplast = True
self.rewound = True
try:
if self.file is not None:
self.file.close()
self.file = None
self.mode = filenameparam.WriteMode("w")
self.everOpened = False
self.lastOutput = None
finally:
self.lock.release()
def close(self):
self.lock.acquire()
try:
self.nOpen -= 1
assert self.nOpen >= 0
if self.nOpen == 0:
self.file.close()
self.file = None
self.seplast = True
finally:
self.lock.release()
def flush(self):
self.lock.acquire()
try:
if self.file is not None:
self.file.flush()
finally:
self.lock.release()
def printHeadersIfNeeded(self, output, *args, **kwargs):
if self.lastOutput != output or self.lastargs != (args, kwargs):
if self.appending or self.lastOutput is not None:
self.file.write("\n") # insert extra blank line before header
output.printHeaders(self, *args, **kwargs)
self.lastOutput = output
self.lastargs = (args, kwargs)
def write(self, text):
# When an object with a "write" method is used as the argument
# of "print >>", write is called once for each printed string,
# once for each space between printed strings, and once for
# the newline at the end.
if text == " " and not self.seplast:
self.file.write(formatchars.getSeparator())
self.seplast = True
elif text == "\n":
self.file.write(text)
self.seplast = True
else:
self.file.write(text)
self.seplast = False
def comment(self, *args):
self.file.write(" ".join([formatchars.getCommentChar()] +
[x for x in args] ))
self.file.write("\n")
self.seplast = False
def rewindStream(filename):
stream = _allStreams[filename]
stream.rewind()
#=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=#
# TextOutputDestination is an intermediate baseclass for
# OutputDestinations that produce some sort of human readable text
# (ie, not OOF2 data files which might be ascii). This includes the
# results of Analysis operations. There are two subclasses:
# OutputStream, which writes to a file, and MessageWindowStream, which
# writes to the OOF2 Message Window.
class TextOutputDestination(OutputDestination):
def __init__(self, basestream):
# Multiple TextOutputDestinations can share the same
# basestream, which is a BaseOutputStream.
self.basestream = basestream
basestream.addStream(self)
def open(self):
self.basestream.open()
def rewind(self):
self.basestream.rewind()
def printHeadersIfNeeded(self, output, *args, **kwargs):
self.basestream.printHeadersIfNeeded(output, *args, **kwargs)
def write(self, text):
self.basestream.write(text)
def comment(self, *args):
self.basestream.comment(*args)
def close(self):
self.basestream.close()
# OutputStream directs output to a file, specified by a file name and
# mode. If two OutputStreams have the same filename but different
# modes, the *last* mode specified is used. TODO: Make sure that the
# documentation is correct about that. It used to be different.
class OutputStream(TextOutputDestination):
def __init__(self, filename, mode):
self.filename = filename
self.mode = mode
try:
_streamsLock.acquire()
try:
basestream = _allStreams[filename]
except KeyError:
basestream = BaseOutputStream(filename, mode, file)
else:
basestream.mode = mode
finally:
_streamsLock.release()
TextOutputDestination.__init__(self, basestream)
switchboard.notify("output destinations changed")
def shortrepr(self):
return self.filename
# newreg is referred to in outputdestinationwidget.py.
newreg = registeredclass.Registration(
'Output Stream',
OutputDestination,
OutputStream,
ordering=1,
rewindable=True,
params=[
filenameparam.WriteFileNameParameter(
'filename', tip=parameter.emptyTipString),
filenameparam.WriteModeParameter(
'mode', tip="Whether to write or append to the file.")
],
tip="Send output to a file.",
discussion=xmlmenudump.loadFile('DISCUSSIONS/engine/reg/outputstream.xml')
)
#=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=#
msgWindowName = "<Message Window>"
class MessageWindowStream(TextOutputDestination):
def __init__(self):
TextOutputDestination.__init__(
self,
BaseOutputStream(msgWindowName, filenameparam.WriteMode("w"),
lambda f,m: reporter.fileobj))
def shortrepr(self):
return "<Message Window>"
registeredclass.Registration(
'Message Window',
OutputDestination,
MessageWindowStream,
ordering=0,
rewindable=False,
tip="Send output to the Message Window.",
discussion=xmlmenudump.loadFile('DISCUSSIONS/engine/reg/messagewindow.xml')
)
msgWindowOutputDestination = MessageWindowStream()
#=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=#
def allTextOutputStreams():
_streamsLock.acquire()
try:
return [n for n in _allStreams.keys() if n != msgWindowName]
finally:
_streamsLock.release()
def forgetTextOutputStreams():
_streamsLock.acquire()
try:
_allStreams.clear()
finally:
_streamsLock.release()
switchboard.notify("output destinations changed")
def getLatestMode(filename, default):
try:
return _allStreams[filename].mode
except KeyError:
return default
#=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=#
class DataFileOutput(OutputDestination):
def __init__(self, filename, mode, format):
self.filename = filename
self.mode = mode
self.format = format
self._dfile = None
self.everOpened = False
self.rewound = False
def open(self):
## See BaseOutputStream.open()
assert self._dfile is None
if (not self.everOpened and self.mode == "w") or self.rewound:
mowed = "w"
self.rewound = False
else:
mowed = "a"
self._dfile = datafile.writeDataFile(
self.filename, mowed, self.format)
self.everOpened= True
def dfile(self):
assert self._dfile is not None
return self._dfile
def flush(self):
if self.isOpen():
self._dfile.flush()
def close(self):
if self.isOpen():
self._dfile.close()
self._dfile = None
def isOpen(self):
return self._dfile is not None
def rewind(self):
# TODO?: what if mode == 'append'? The next call to
# dfile() will reopen at the end of the file. Should it be
# illegal to rewind a file opened for appending?
self.close()
self._dfile = None
self.rewound = True
self.everOpened = False
def shortrepr(self):
return "%s (%s)" % (self.filename, self.format.string())
registeredclass.Registration(
'Data File',
OutputDestination,
DataFileOutput,
rewindable=True,
params=[
filenameparam.WriteFileNameParameter(
"filename", tip=parameter.emptyTipString),
filenameparam.WriteModeParameter(
"mode", tip="Whether to write or append to the file."),
enum.EnumParameter('format', datafile.DataFileFormat, datafile.ASCII,
tip="Format of the file.")
],
ordering=3,
tip="Send Mesh data to a file.",
discussion=xmlmenudump.loadFile('DISCUSSIONS/engine/reg/datafiledest.xml'))
#=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=#
class OutputDestinationParameter(parameter.RegisteredParameter):
def __init__(self, name, value=None, default=None, tip=None):
parameter.RegisteredParameter.__init__(
self, name=name, reg=OutputDestination,
value=value, default=default, tip=tip)
def clone(self):
return self.__class__(self.name, self.value, self.default, self.tip)
| [
"[email protected]"
] | |
7ef5899fc65729bb3d4169066bc9065937633f77 | 8565e4d24b537d1fb0f71fef6215d193ceaed6cc | /tests/test_check_circular.py | 4a91863962a4377cf6bad0ba6466463a0579f885 | [
"MIT"
] | permissive | soasme/dogeon | 5f55c84a6f93aaa7757372664dd60ed90cf200e8 | 496b9a5b099946d14434ed0cd7a94a270f607207 | refs/heads/master | 2020-05-17T19:01:42.780694 | 2018-11-04T05:01:23 | 2018-11-04T05:01:23 | 20,592,607 | 3 | 0 | null | 2014-06-28T01:34:35 | 2014-06-07T12:28:07 | Python | UTF-8 | Python | false | false | 736 | py | import dson
import pytest
def default_iterable(obj):
return list(obj)
def test_circular_dict():
dct = {}
dct['a'] = dct
pytest.raises(ValueError, dson.dumps, dct)
def test_circular_list():
lst = []
lst.append(lst)
pytest.raises(ValueError, dson.dumps, lst)
def test_circular_composite():
dct2 = {}
dct2['a'] = []
dct2['a'].append(dct2)
pytest.raises(ValueError, dson.dumps, dct2)
def test_circular_default():
dson.dumps([set()], default=default_iterable)
pytest.raises(TypeError, dson.dumps, [set()])
def test_circular_off_default():
dson.dumps([set()], default=default_iterable, check_circular=False)
pytest.raises(TypeError, dson.dumps, [set()], check_circular=False)
| [
"[email protected]"
] | |
eee3cfdc459dc13a31ef3210abdd3ab4cc2b38fb | c16ea32a4cddb6b63ad3bacce3c6db0259d2bacd | /google/ads/googleads/v4/googleads-py/google/ads/googleads/v4/enums/types/product_bidding_category_level.py | 136f73cb4b5a918c667515478fd95aaa94f7a0f1 | [
"Apache-2.0"
] | permissive | dizcology/googleapis-gen | 74a72b655fba2565233e5a289cfaea6dc7b91e1a | 478f36572d7bcf1dc66038d0e76b9b3fa2abae63 | refs/heads/master | 2023-06-04T15:51:18.380826 | 2021-06-16T20:42:38 | 2021-06-16T20:42:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,232 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package='google.ads.googleads.v4.enums',
marshal='google.ads.googleads.v4',
manifest={
'ProductBiddingCategoryLevelEnum',
},
)
class ProductBiddingCategoryLevelEnum(proto.Message):
r"""Level of a product bidding category. """
class ProductBiddingCategoryLevel(proto.Enum):
r"""Enum describing the level of the product bidding category."""
UNSPECIFIED = 0
UNKNOWN = 1
LEVEL1 = 2
LEVEL2 = 3
LEVEL3 = 4
LEVEL4 = 5
LEVEL5 = 6
__all__ = tuple(sorted(__protobuf__.manifest))
| [
"bazel-bot-development[bot]@users.noreply.github.com"
] | bazel-bot-development[bot]@users.noreply.github.com |
08824881bc68f2ddf1fee1b25916cd115d4df279 | aec59723a3dd0d3356a4ce426dc0fc381a4d3157 | /catalog/model/pricing.py | 020f6e8a724428673e0662dd1b10eba1af0e2087 | [] | no_license | Guya-LTD/catalog | f44e31593637e22b3b2a2869a387e29875986f7c | 632b3c3766e2600275c0a18db6378b2d38e3c463 | refs/heads/master | 2023-02-11T19:03:36.796812 | 2021-01-08T14:12:06 | 2021-01-08T14:12:06 | 275,332,646 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 859 | py | # -*- coding: utf-8 -*-
"""Copyright Header Details
Copyright
---------
Copyright (C) Guya , PLC - All Rights Reserved (As Of Pending...)
Unauthorized copying of this file, via any medium is strictly prohibited
Proprietary and confidential
LICENSE
-------
This file is subject to the terms and conditions defined in
file 'LICENSE.txt', which is part of this source code package.
Authors
-------
* [Simon Belete](https://github.com/Simonbelete)
Project
-------
* Name:
- Guya E-commerce & Guya Express
* Sub Project Name:
- Catalog Service
* Description
- Catlog Catalog Service
"""
"""Package details
Application features:
--------------------
Python 3.7
Flask
PEP-8 for code style
Entity.
"""
class Pricing:
"""A Base Model Representation of Pricing Entity."""
pass | [
"[email protected]"
] | |
14f648102f5ede6ed0cbfd6da4036fb02e0e97b3 | 8983b099a27d124b17fc20d4e9b5ec2f0bf8be25 | /altair/schema/_interface/named_channels.py | d2d7c77e95eadb00163c13a153019fb543b03f86 | [
"BSD-3-Clause"
] | permissive | princessd8251/altair | a7afa0745291f82215fbda6a477e369f59fcf294 | 387c575ee0410e7ac804273a0f2e5574f4cca26f | refs/heads/master | 2021-01-16T21:41:40.935679 | 2017-08-10T16:36:05 | 2017-08-10T16:36:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 984 | py | # -*- coding: utf-8 -*-
# Auto-generated file: do not modify directly
# - altair version info: v1.2.0-98-g8a98636
# - date: 2017-08-09 12:14:26
from . import channel_wrappers
class Color(channel_wrappers.ChannelWithLegend):
pass
class Column(channel_wrappers.PositionChannel):
pass
class Detail(channel_wrappers.Field):
pass
class Label(channel_wrappers.Field):
pass
class Opacity(channel_wrappers.ChannelWithLegend):
pass
class Order(channel_wrappers.OrderChannel):
pass
class Path(channel_wrappers.OrderChannel):
pass
class Row(channel_wrappers.PositionChannel):
pass
class Shape(channel_wrappers.ChannelWithLegend):
pass
class Size(channel_wrappers.ChannelWithLegend):
pass
class Text(channel_wrappers.Field):
pass
class X(channel_wrappers.PositionChannel):
pass
class X2(channel_wrappers.Field):
pass
class Y(channel_wrappers.PositionChannel):
pass
class Y2(channel_wrappers.Field):
pass
| [
"[email protected]"
] | |
e073a8419eda5bafad84588f1124d089f124d4cd | 5864e86954a221d52d4fa83a607c71bacf201c5a | /carbon/common/lib/markdown/extensions/tables.py | f613f9a67f1f99e646124dad4f9a5fdff380870a | [] | no_license | connoryang/1v1dec | e9a2303a01e5a26bf14159112b112be81a6560fd | 404f2cebf13b311e754d45206008918881496370 | refs/heads/master | 2021-05-04T02:34:59.627529 | 2016-10-19T08:56:26 | 2016-10-19T08:56:26 | 71,334,417 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,302 | py | #Embedded file name: e:\jenkins\workspace\client_SERENITY\branches\release\SERENITY\carbon\common\lib\markdown\extensions\tables.py
import markdown
from markdown.util import etree
class TableProcessor(markdown.blockprocessors.BlockProcessor):
def test(self, parent, block):
rows = block.split('\n')
return len(rows) > 2 and '|' in rows[0] and '|' in rows[1] and '-' in rows[1] and rows[1].strip()[0] in ('|', ':', '-')
def run(self, parent, blocks):
block = blocks.pop(0).split('\n')
header = block[0].strip()
seperator = block[1].strip()
rows = block[2:]
border = False
if header.startswith('|'):
border = True
align = []
for c in self._split_row(seperator, border):
if c.startswith(':') and c.endswith(':'):
align.append('center')
elif c.startswith(':'):
align.append('left')
elif c.endswith(':'):
align.append('right')
else:
align.append(None)
table = etree.SubElement(parent, 'table')
thead = etree.SubElement(table, 'thead')
self._build_row(header, thead, align, border)
tbody = etree.SubElement(table, 'tbody')
for row in rows:
self._build_row(row.strip(), tbody, align, border)
def _build_row(self, row, parent, align, border):
tr = etree.SubElement(parent, 'tr')
tag = 'td'
if parent.tag == 'thead':
tag = 'th'
cells = self._split_row(row, border)
for i, a in enumerate(align):
c = etree.SubElement(tr, tag)
try:
c.text = cells[i].strip()
except IndexError:
c.text = ''
if a:
c.set('align', a)
def _split_row(self, row, border):
if border:
if row.startswith('|'):
row = row[1:]
if row.endswith('|'):
row = row[:-1]
return row.split('|')
class TableExtension(markdown.Extension):
def extendMarkdown(self, md, md_globals):
md.parser.blockprocessors.add('table', TableProcessor(md.parser), '<hashheader')
def makeExtension(configs = {}):
return TableExtension(configs=configs)
| [
"[email protected]"
] | |
3eb6943aae1ad11db104ee00d54ed9bccbb642e4 | 855dc9fcd4170923e8723b6946c09c5cae68e079 | /what_transcode/migrations/0001_initial.py | cb61199f9d66f0b1aee0d9c062f1096d498bbdcf | [
"MIT"
] | permissive | point-source/WhatManager2 | 3fc72976402ac40d132aef0deffd8bcfbd209703 | ddbce0fa1ff4e1fc44bfa726c4f7eace4adbe8a9 | refs/heads/master | 2023-01-27T11:39:43.861041 | 2019-02-24T17:51:24 | 2019-02-24T17:51:24 | 210,232,561 | 1 | 0 | MIT | 2019-09-23T00:21:54 | 2019-09-23T00:21:53 | null | UTF-8 | Python | false | false | 985 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('home', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='TranscodeRequest',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False,
auto_created=True, primary_key=True)),
('requested_by_ip', models.TextField()),
('requested_by_what_user', models.TextField()),
('date_requested', models.DateTimeField(auto_now_add=True)),
('date_completed', models.DateTimeField(null=True)),
('celery_task_id', models.TextField(null=True)),
('what_torrent', models.ForeignKey(to='home.WhatTorrent')),
],
options={
},
bases=(models.Model,),
),
]
| [
"[email protected]"
] | |
e8ecf7fc0963b49fbee6320bd113e8f851195674 | add161c6e8d86dc8448d4f3d4b61a173a3a4543a | /fuglu/src/fuglu/plugins/icap.py | 7c45e6ac0df6497843ea46eb04e299795b1f6fe7 | [
"Apache-2.0"
] | permissive | sporkman/fuglu | 9a578746a52308d618a6edcd7abeb4c50fb0f6fc | 1b458147a93ed17927e0fe16debd80b6f690d11b | refs/heads/master | 2021-01-12T22:30:50.575560 | 2015-04-06T10:07:51 | 2015-04-06T10:07:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,724 | py | # Copyright 2009-2015 Oli Schacher
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# http://vaibhavkulkarni.wordpress.com/2007/11/19/a-icap-client-code-in-c-to-virus-scan-a-file-using-symantec-scan-server/
from fuglu.shared import ScannerPlugin, string_to_actioncode, DEFER, DUNNO, actioncode_to_string, Suspect, apply_template
import socket
import os
class ICAPPlugin(ScannerPlugin):
"""ICAP Antivirus Plugin
This plugin allows Antivirus Scanning over the ICAP Protocol (http://tools.ietf.org/html/rfc3507 )
supported by some AV Scanners like Symantec and Sophos. For sophos, however, it is recommended to use the native SSSP Protocol.
Prerequisites: requires an ICAP capable antivirus engine somewhere in your network
"""
def __init__(self, config, section=None):
ScannerPlugin.__init__(self, config, section)
self.requiredvars = {
'host': {
'default': 'localhost',
'description': 'hostname where the ICAP server runs',
},
'port': {
'default': '1344',
'description': "tcp port or path to unix socket",
},
'timeout': {
'default': '10',
'description': 'socket timeout',
},
'maxsize': {
'default': '22000000',
'description': "maximum message size, larger messages will not be scanned. ",
},
'retries': {
'default': '3',
'description': 'how often should fuglu retry the connection before giving up',
},
'virusaction': {
'default': 'DEFAULTVIRUSACTION',
'description': "action if infection is detected (DUNNO, REJECT, DELETE)",
},
'problemaction': {
'default': 'DEFER',
'description': "action if there is a problem (DUNNO, DEFER)",
},
'rejectmessage': {
'default': 'threat detected: ${virusname}',
'description': "reject message template if running in pre-queue mode and virusaction=REJECT",
},
'service': {
'default': 'AVSCAN',
'description': 'ICAP Av scan service, usually AVSCAN (sophos, symantec)',
},
'enginename': {
'default': 'icap-generic',
'description': "name of the virus engine behind the icap service. used to inform other plugins. can be anything like 'sophos', 'symantec', ...",
},
}
self.logger = self._logger()
def __str__(self):
return "ICAP AV"
def _problemcode(self):
retcode = string_to_actioncode(
self.config.get(self.section, 'problemaction'), self.config)
if retcode != None:
return retcode
else:
# in case of invalid problem action
return DEFER
def examine(self, suspect):
enginename = self.config.get(self.section, 'enginename')
if suspect.size > self.config.getint(self.section, 'maxsize'):
self.logger.info('Not scanning - message too big')
return
content = suspect.get_source()
for i in range(0, self.config.getint(self.section, 'retries')):
try:
viruses = self.scan_stream(content)
if viruses != None:
self.logger.info(
"Virus found in message from %s : %s" % (suspect.from_address, viruses))
suspect.tags['virus'][enginename] = True
suspect.tags['%s.virus' % enginename] = viruses
suspect.debug('viruses found in message : %s' % viruses)
else:
suspect.tags['virus'][enginename] = False
if viruses != None:
virusaction = self.config.get(self.section, 'virusaction')
actioncode = string_to_actioncode(virusaction, self.config)
firstinfected, firstvirusname = viruses.items()[0]
values = dict(
infectedfile=firstinfected, virusname=firstvirusname)
message = apply_template(
self.config.get(self.section, 'rejectmessage'), suspect, values)
return actioncode, message
return DUNNO
except Exception, e:
self.logger.warning("Error encountered while contacting ICAP server (try %s of %s): %s" % (
i + 1, self.config.getint(self.section, 'retries'), str(e)))
self.logger.error("ICAP scan failed after %s retries" %
self.config.getint(self.section, 'retries'))
content = None
return self._problemcode()
def scan_stream(self, buf):
"""
Scan a buffer
buffer (string) : buffer to scan
return either :
- (dict) : {filename1: "virusname"}
- None if no virus found
"""
s = self.__init_socket__()
dr = {}
CRLF = "\r\n"
host = self.config.get(self.section, 'host')
port = self.config.get(self.section, 'port')
service = self.config.get(self.section, 'service')
buflen = len(buf)
# in theory, these fake headers are optional according to the ICAP errata
# and sophos docs
# but better be safe than sorry
fakerequestheader = "GET http://localhost/message.eml HTTP/1.1" + CRLF
fakerequestheader += "Host: localhost" + CRLF
fakerequestheader += CRLF
fakereqlen = len(fakerequestheader)
fakeresponseheader = "HTTP/1.1 200 OK" + CRLF
fakeresponseheader += "Content-Type: message/rfc822" + CRLF
fakeresponseheader += "Content-Length: " + str(buflen) + CRLF
fakeresponseheader += CRLF
fakeresplen = len(fakeresponseheader)
bodyparthexlen = hex(buflen)[2:]
bodypart = bodyparthexlen + CRLF
bodypart += buf + CRLF
bodypart += "0" + CRLF
hdrstart = 0
responsestart = fakereqlen
bodystart = fakereqlen + fakeresplen
# now that we know the length of the fake request/response, we can
# build the ICAP header
icapheader = ""
icapheader += "RESPMOD icap://%s:%s/%s ICAP/1.0 %s" % (
host, port, service, CRLF)
icapheader += "Host: " + host + CRLF
icapheader += "Allow: 204" + CRLF
icapheader += "Encapsulated: req-hdr=%s, res-hdr=%s, res-body=%s%s" % (
hdrstart, responsestart, bodystart, CRLF)
icapheader += CRLF
everything = icapheader + fakerequestheader + \
fakeresponseheader + bodypart + CRLF
s.sendall(everything)
result = s.recv(20000)
s.close()
sheader = "X-Violations-Found:"
if sheader.lower() in result.lower():
lines = result.split('\n')
lineidx = 0
for line in lines:
if sheader.lower() in line.lower():
numfound = int(line[len(sheader):])
# for each found virus, get 4 lines
for vircount in range(numfound):
infectedfile = lines[
lineidx + vircount * 4 + 1].strip()
infection = lines[lineidx + vircount * 4 + 2].strip()
dr[infectedfile] = infection
break
lineidx += 1
if dr == {}:
return None
else:
return dr
def __init_socket__(self):
icap_HOST = self.config.get(self.section, 'host')
unixsocket = False
try:
iport = int(self.config.get(self.section, 'port'))
except ValueError:
unixsocket = True
if unixsocket:
sock = self.config.get(self.section, 'port')
if not os.path.exists(sock):
raise Exception("unix socket %s not found" % sock)
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
s.settimeout(self.config.getint(self.section, 'timeout'))
try:
s.connect(sock)
except socket.error:
raise Exception(
'Could not reach ICAP server using unix socket %s' % sock)
else:
icap_PORT = int(self.config.get(self.section, 'port'))
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(self.config.getint(self.section, 'timeout'))
try:
s.connect((icap_HOST, icap_PORT))
except socket.error:
raise Exception(
'Could not reach ICAP server using network (%s, %s)' % (icap_HOST, icap_PORT))
return s
def lint(self):
viract = self.config.get(self.section, 'virusaction')
print "Virusaction: %s" % actioncode_to_string(string_to_actioncode(viract, self.config))
allok = (self.checkConfig() and self.lint_eicar())
return allok
def lint_eicar(self):
stream = """Date: Mon, 08 Sep 2008 17:33:54 +0200
To: [email protected]
From: [email protected]
Subject: test eicar attachment
X-Mailer: swaks v20061116.0 jetmore.org/john/code/#swaks
MIME-Version: 1.0
Content-Type: multipart/mixed; boundary="----=_MIME_BOUNDARY_000_12140"
------=_MIME_BOUNDARY_000_12140
Content-Type: text/plain
Eicar test
------=_MIME_BOUNDARY_000_12140
Content-Type: application/octet-stream
Content-Transfer-Encoding: BASE64
Content-Disposition: attachment
UEsDBAoAAAAAAGQ7WyUjS4psRgAAAEYAAAAJAAAAZWljYXIuY29tWDVPIVAlQEFQWzRcUFpYNTQo
UF4pN0NDKTd9JEVJQ0FSLVNUQU5EQVJELUFOVElWSVJVUy1URVNULUZJTEUhJEgrSCoNClBLAQIU
AAoAAAAAAGQ7WyUjS4psRgAAAEYAAAAJAAAAAAAAAAEAIAD/gQAAAABlaWNhci5jb21QSwUGAAAA
AAEAAQA3AAAAbQAAAAAA
------=_MIME_BOUNDARY_000_12140--"""
result = self.scan_stream(stream)
if result == None:
print "EICAR Test virus not found!"
return False
print "ICAP server found virus", result
return True
| [
"[email protected]"
] | |
5e9cf5ae03e925ad4d818c9b0637c412bbc60146 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02709/s022509829.py | dd9fa602873f6ee74e43f9bacf44dd9a2eee3894 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 911 | py | import sys
input = sys.stdin.readline
from collections import deque
N = int(input())
#A = list(map(int, input().split()))
A = [(a, i) for i, a in enumerate(map(int, input().split()))]
A = sorted(A, reverse=True)
values = []
num_indcies = {}
for i, a in enumerate(A):
if not a in num_indcies:
num_indcies[a] = [i]
values.append(a)
else:
num_indcies[a].append(i)
values = sorted(values, reverse=True)
ans = 0
# indexの配列
dp_indices = []
for v in values:
dp_indices.extend(num_indcies[v])
dp = [[0] * (N+1) for _ in range(N+1)]
for no, (a, pos) in enumerate(A):
for i in range(no+1):
j = no - i
#k = dp_indices[i+j-2]
#a = A[k]
dp[i+1][j] = max(dp[i+1][j], dp[i][j] + a * (pos -i))
dp[i][j+1] = max(dp[i][j+1], dp[i][j] + a * abs(pos - (N-1-j)))
ans = 0
for i in range(1, N+1):
ans = max(ans, dp[i][N-i])
print(ans)
| [
"[email protected]"
] | |
d4821e8c303ca78433ad4c3a05246646382bc1c9 | 2c15283e394a392f3a2939753f47f767c30a7f3d | /PyBabel/bo.py | 4608880d037e6eefcd1e4538e9c34d261b581c0c | [] | no_license | amdens-sci/AutoDockTools_py3 | 06ae1c2273a848efdec5e1a96906489c89f5c4ab | 28ed47085026b2ac3edd12f75624b22c4db9fcd1 | refs/heads/master | 2023-08-19T02:59:14.843541 | 2021-03-23T17:18:43 | 2021-03-23T17:18:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,861 | py | # ##################################################################################################
# Disclaimer #
# This file is a python3 translation of AutoDockTools (v.1.5.7) #
# Modifications made by Valdes-Tresanco MS (https://github.com/Valdes-Tresanco-MS) #
# Tested by Valdes-Tresanco-MS and Valdes-Tresanco ME #
# There is no guarantee that it works like the original distribution, #
# but feel free to tell us if you get any difference to correct the code. #
# #
# Please use this cite the original reference. #
# If you think my work helps you, just keep this note intact on your program. #
# #
# Modification date: 10/5/20 22:05 #
# #
# ##################################################################################################
#############################################################################
#
# Author: Michel F. SANNER
# Reimplemented from Babel v1.6 from Pat Walters and Math Stahl
#
# Copyright: M. Sanner TSRI 2000
#
#############################################################################
"""
This file implements the BondOrder class that can be used to compute
bond order.
Before a BondOrder object can be used, atoms must have been assigned
a type see (AtomHybridization in types.py).
Bond order can be calculated using 2 different methods depending on whether
rings have been identified previously or not. Babel decides to use the first
method for molecules with more than 200 atoms and the second one else.
example:
>>> from PyBabel.atomTypes import AtomHybridization
>>> from PyBabel.cycle import RingFinder
>>> atype = AtomHybridization()
>>> atype.assignHybridization(atoms)
>>> bo = BondOrder()
>>> bo.assignBondOrder( atoms, bonds )
or
>>> atype = AtomHybridization()
>>> atype.assignHybridization(atoms)
>>> rings = RingFinder()
>>> rings.findRings(allAtoms, bonds)
>>> bo = BondOrder()
>>> bo.assignBondOrder( atoms, bonds, rings )
atoms has to be a list of atom objects
Atom:
a.coords : 3-sequence of floats
a.bonds : list of Bond objects
babel_type: string
babel_atomic_number: int
Bond:
b.atom1 : instance of Atom
b.atom2 : instance of Atom
after completion each bond has a 'bondOrder' attribute (integer)
reimplmentation of Babel1.6 in Python by Michel Sanner April 2000
Original code by W. Patrick Walters and Matthew T. Stahl
"""
from .atomTypes import TypeConverter
from .babelElements import babel_elements
from .util import *
SINGLE_DOUBLE_CUTOFF = 0.95
# SINGLE_DOUBLE_CUTOFF = 0.955
DOUBLE_TRIPLE_CUTOFF = 0.81
class BondOrder:
""" """
def assignBondOrder(self, atoms, bonds, rings=None):
""" """
if not rings:
self.assign_bond_order1(atoms, bonds)
else:
self.assign_bond_order2(atoms, bonds, rings)
def assign_bond_order1(self, atoms, bonds):
""" """
hyb_val = [0, 3, 2, 1]
converter = TypeConverter("HYB")
for a in atoms:
hyb = converter.convert(a.babel_type, 'dummy')
a._redo = hyb_val[int(hyb)]
# print a.full_name(), a.babel_type, hyb, a._redo
for b in bonds:
# initialize bondOrder attribute
if b.bondOrder is None:
b.bondOrder = 1
sum_code = b.atom1._redo + b.atom2._redo
# print b, sum_code
if sum_code == 6:
b.bondOrder = 3
elif sum_code == 4:
b.bondOrder = 2
else:
b.bondOrder = 1
if self.is_carboxyl(b):
b.bondOrder = 2
if b.bondOrder < 1 or b.bondOrder > 3:
print("Bond %s is wierd - Bond order is %d\n" % (b, b.bondOrder))
self.check_for_conjugation(atoms)
# cleanup
for a in atoms:
delattr(a, '_redo')
def is_carboxyl(self, bond):
""" """
c_end = 0
o_end = 0
check = 0
if bond.atom1.babel_type == "Cac" and bond.atom2.babel_type == 'O':
c_end = bond.atom1
o_end = bond.atom2
check = 1
if bond.atom2.babel_type == "Cac" and bond.atom1.babel_type == 'O':
check = 1
c_end = bond.atom2
o_end = bond.atom1
if check and len(o_end.bonds) == 1:
return 1
else:
return 0
def check_for_conjugation(self, atoms):
""" """
for a in atoms:
# if a.full_name()=='1crn: :ASN14:CG': raise
for b1 in a.bonds:
if b1.bondOrder <= 1:
continue
for b2 in a.bonds:
if b1 == b2:
continue
if b2.bondOrder <= 1:
continue
if len(b2.atom1.bonds) > 1 and len(b2.atom2.bonds) > 1:
b2.bondOrder = 1
def check_for_carbonyl(self, atom):
""" """
for b in atom.bonds:
bonded_atom = b.atom1
if bonded_atom == atom:
bonded_atom = b.atom2
if bonded_atom.babel_type == "O2" or bonded_atom.babel_type == "S2":
return 3
return 2
def assign_bond_order2(self, atoms, bonds, rings):
""" """
self.rings = rings
for a in atoms:
if hasattr(a, 'rings'):
a._redo = 1
else:
a._redo = 0
a._dot = 0
a._dbatom = 0
self.assign_hybrid_radii(atoms)
self.estimate_bond_order2(bonds)
for ring in self.rings.rings:
if len(ring['atoms']) == 5:
self.process_5_ring(ring)
for b in bonds:
# initialize bondOrder attribute
if b.bondOrder is None:
b.bondOrder = 1
b._dbbond = 0
if b.bondOrder == 2:
if len(b.atom1.bonds) > 1 and b.atom1.babel_type[0] == 'O':
b.bondOrder = 1
elif len(b.atom2.bonds) > 1 and b.atom2.babel_type[0] == 'O':
b.bondOrder = 1
for b in bonds:
if b.bondOrder == 2:
if len(b.atom1.bonds) == 1 and b.atom1.babel_type[0] == 'N':
b.bondOrder = 1
elif len(b.atom2.bonds) == 1 and b.atom2.babel_type[0] == 'N':
b.bondOrder = 1
## for b in bonds:
## if b.bondOrder > 1:
## print "%3d %3d"%(atoms.index(b.atom1)+1, atoms.index(b.atom2)+1)
for b in bonds:
if b.bondOrder > 1:
a1 = b.atom1
if a1._redo and self.check_for_carbonyl(a1) != 3:
a1._dot = 1
a2 = b.atom2
if a2._redo and self.check_for_carbonyl(a2) != 3:
a2._dot = 1
if len(a1.bonds) == 1 or len(a2.bonds) == 1:
a1._dot = 0
a2._dot = 0
## for a in atoms:
## if a._dot==1:
## print atoms.index(a)+1, a._dot
for a in atoms:
if a.babel_type == "Npl" and len(a.bonds) == 3:
a._dot = 0
## for a in atoms:
## if a._dot==1:
## print atoms.index(a)+1, a._dot
self.atoms = atoms
self.cycles = 0
self.bondStack = []
self.bondChoice = []
# for PYTHON interpreters after 1.5 the recursion depth is limited
# this method can exceed the default recursion depth
import sys
if float(sys.version[:3]) > 1.5:
sys.setrecursionlimit(20000)
self.connect_the_dots(0, 0)
for b in self.bondStack:
if b.bondOrder > 1:
b._dbbond = 1
b.atom1._dbatom = 1
b.atom2._dbatom = 1
for b in bonds:
if b.atom1.babel_type == "O2" or b.atom2.babel_type == "O2":
b._dbbond = 1
b.atom1._dbatom = 1
b.atom2._dbatom = 1
elif b.atom1.babel_type == "O-" and len(b.atom1.bonds) == 1:
b._dbbond = 1
b.atom1._dbatom = 1
b.atom2._dbatom = 1
elif b.atom2.babel_type == "O-" and len(b.atom2.bonds) == 1:
b._dbbond = 1
b.atom1._dbatom = 1
b.atom2._dbatom = 1
for a in atoms:
a._dot = 0
for b in bonds:
if b.bondOrder > 1:
a1 = b.atom1
a2 = b.atom2
if a1._dbatom == 0 and a2._dbatom == 0:
a1._dot = 1
a2._dot = 1
self.bondStack = []
self.bondChoice = []
self.connect_the_dots(0, 0)
for b in self.bondStack:
if b.bondOrder > 1:
b._dbbond = 1
## for b in bo.bondStack:
## a1 = b.atom1
## a2 = b.atom2
## print a1.number, a2.number, a1._dot, a2._dot, b.bondOrder
for b in self.bondStack:
b._dbbond = 1
for b in bonds:
if not b._dbbond:
b.bondOrder = 1
for a in atoms:
a._redo = 0
for b in a.bonds:
a._redo = a._redo + b.bondOrder
if (a.babel_atomic_number == 6 or a.babel_atomic_number) == 7 and \
a._redo > 4:
for b in a.bonds:
conn = None
if b.bondOrder == 2:
b.bondOrder = 1
# cleanup
for a in atoms:
delattr(a, '_dot')
delattr(a, '_dbatom')
delattr(a, '_redo')
for b in bonds:
delattr(b, '_dbbond')
delattr(self, 'atoms')
delattr(self, 'cycles')
delattr(self, 'rings')
## for b in bonds:
## n = b.atom1.number
## print "%4i %4i %2i"%(n, b.atom2.number, b.bondOrder)
def connect_the_dots(self, atom, start):
""" """
a = self.atoms[atom]
if start == len(a.bonds):
return
# print 'AAA', atom+1
if a._dot:
done = 0
i = start
for b in a.bonds[start:]:
con = b.atom1
if con == a:
con = b.atom2
if con._dot:
self.bondStack.append(b)
self.bondChoice.append(0)
if a == b.atom1:
self.bondChoice[-1] = i + 1
else:
self.bondChoice[-1] = -i - 1
a._dot = a._dot - 1
con._dot = con._dot - 1
done = 1
break
i = i + 1
if not done and len(self.bondStack):
b = self.bondStack[-1]
if self.bondChoice[-1] > 0:
new_atm = b.atom1
else:
new_atm = b.atom2
choice_bnd = abs(self.bondChoice[-1])
self.bondChoice = self.bondChoice[:-1]
self.bondStack = self.bondStack[:-1]
b.atom1._dot = b.atom1._dot + 1
b.atom2._dot = b.atom2._dot + 1
# print 'BBB', self.atoms.index(new_atm)+1
self.connect_the_dots(self.atoms.index(new_atm), choice_bnd)
if self.cycles > 10000:
# print 'EEE'
return
if atom + 1 == len(self.atoms):
# print 'DDD'
return
else:
self.cycles = self.cycles + 1
# print 'CCC', atom+2
self.connect_the_dots(atom + 1, 0)
def get_bond(self, bonds, a1, a2):
""" """
for b in bonds:
if (b.atom1 == a1 and b.atom2 == a2) or (b.atom1 == a2 and b.atom2 == a1):
return b
def process_5_ring(self, ring):
""" """
atoms = ring['atoms']
t1 = torsion_angle(atoms[4].coords, atoms[0].coords,
atoms[1].coords, atoms[2].coords)
t2 = torsion_angle(atoms[0].coords, atoms[1].coords,
atoms[2].coords, atoms[3].coords)
t3 = torsion_angle(atoms[1].coords, atoms[2].coords,
atoms[3].coords, atoms[4].coords)
t4 = torsion_angle(atoms[2].coords, atoms[3].coords,
atoms[4].coords, atoms[0].coords)
t5 = torsion_angle(atoms[3].coords, atoms[4].coords,
atoms[0].coords, atoms[1].coords)
if math.fabs(t1) < 7.0:
a1 = atoms[0]
a2 = atoms[1]
bond = self.get_bond(ring['bonds'], a1, a2)
bond.bondOrder = 1
dist = distance(a1.coords, a2.coords)
cov_sum = a1.babel_bond_ord_rad + a2.babel_bond_ord_rad
ratio = dist / cov_sum
if ratio < SINGLE_DOUBLE_CUTOFF:
bond.bondOrder = 2
if math.fabs(t2) < 7.0:
a1 = atoms[1]
a2 = atoms[2]
bond = self.get_bond(ring['bonds'], a1, a2)
bond.bondOrder = 1
dist = distance(a1.coords, a2.coords)
cov_sum = a1.babel_bond_ord_rad + a2.babel_bond_ord_rad
ratio = dist / cov_sum
if ratio < SINGLE_DOUBLE_CUTOFF:
bond.bondOrder = 2
if math.fabs(t3) < 7.0:
a1 = atoms[2]
a2 = atoms[3]
bond = self.get_bond(ring['bonds'], a1, a2)
bond.bondOrder = 1
dist = distance(a1.coords, a2.coords)
cov_sum = a1.babel_bond_ord_rad + a2.babel_bond_ord_rad
ratio = dist / cov_sum
if ratio < SINGLE_DOUBLE_CUTOFF:
bond.bondOrder = 2
if math.fabs(t4) < 7.0:
a1 = atoms[3]
a2 = atoms[4]
bond = self.get_bond(ring['bonds'], a1, a2)
bond.bondOrder = 1
dist = distance(a1.coords, a2.coords)
cov_sum = a1.babel_bond_ord_rad + a2.babel_bond_ord_rad
ratio = dist / cov_sum
if ratio < SINGLE_DOUBLE_CUTOFF:
bond.bondOrder = 2
if math.fabs(t5) < 7.0:
a1 = atoms[4]
a2 = atoms[0]
bond = self.get_bond(ring['bonds'], a1, a2)
bond.bondOrder = 1
dist = distance(a1.coords, a2.coords)
cov_sum = a1.babel_bond_ord_rad + a2.babel_bond_ord_rad
ratio = dist / cov_sum
if ratio < SINGLE_DOUBLE_CUTOFF:
bond.bondOrder = 2
def estimate_bond_order2(self, bonds):
""" """
converter = TypeConverter("HYB")
for b in bonds:
bo = 1
a1 = b.atom1
a2 = b.atom2
dist = distance(a1.coords, a2.coords)
cov_sum = a1.babel_bond_ord_rad + a2.babel_bond_ord_rad
ratio = dist / cov_sum
start_type = converter.convert(a1.babel_type, "all_caps")
end_type = converter.convert(a2.babel_type, "all_caps")
if ratio <= DOUBLE_TRIPLE_CUTOFF:
if start_type[0] == '1' and end_type[0] == '1':
bo = 3
elif ratio <= SINGLE_DOUBLE_CUTOFF:
if start_type[0] == '2' and end_type[0] == '2':
bo = 2
b.bondOrder = bo
def assign_hybrid_radii(self, atoms):
""" """
converter = TypeConverter("XYZ")
for a in atoms:
atm_type = converter.convert(a.babel_type, 'zero')
if atm_type == 0:
atm_type = a.babel_type
atm_type = converter.clean_atom_type(atm_type)
a.babel_cov_rad = babel_elements[atm_type]['cov_rad']
a.babel_bond_ord_rad = babel_elements[atm_type]['bond_ord_rad']
a.babel_max_bonds = babel_elements[atm_type]['max_bonds']
if __name__ == '__main__':
import sys
from .atomTypes import AtomHybridization
from .cycle import RingFinder
from MolKit.pdbParser import NewPdbParser
parser = NewPdbParser("/tsri/pdb/struct/%s.pdb" % sys.argv[1])
mols = parser.parse()
mol = mols[0]
mol.buildBondsByDistance()
allAtoms = mol.chains.residues.atoms
bonds = allAtoms.bonds[0]
print("assigning atom types")
babel = AtomHybridization()
babel.assignHybridization(allAtoms)
print("looking for rings")
rings = RingFinder()
rings.findRings(allAtoms, bonds)
print("assigning bond order")
bo = BondOrder()
# pdb.run("bo.assignBondOrder(allAtoms, bonds)")
bo.assignBondOrder(allAtoms, bonds)
for b in bonds:
if b.bondOrder > 1:
a1 = b.atom1
a2 = b.atom2
print('%-20s %-20s %d' % (a1.full_name(), a2.full_name(),
b.bondOrder))
| [
"[email protected]"
] | |
3d76924803db335c9cb94bb42f4444f162c2d2ae | 936f72b46215b89b277ffd57256e54f727ce1ac5 | /spark-comp04/token.py | 3147a73cbc6b3be806e113977983bf177f1a4f32 | [] | no_license | luizirber/dc-compilers | 91dc99097d628339b53b20a0c0f2a6255a599b7a | 4a47e786583c5f50cac2ac3a35de195f7be7a735 | refs/heads/master | 2016-09-06T11:27:51.815748 | 2012-07-03T01:28:26 | 2012-07-03T01:28:26 | 41,540 | 7 | 0 | null | null | null | null | UTF-8 | Python | false | false | 278 | py | class Token(object):
def __init__(self, type, attr=None, lineno='???'):
self.type = type
self.attr = attr
self.lineno = lineno
def __cmp__(self, o):
return cmp(self.type, o)
def __repr__(self):
return self.attr or self.type
| [
"[email protected]"
] | |
f3822c56be1305e7b55915ab88f6b4e8ff7f9704 | 62587160029c7c79b5d11f16e8beae4afa1c4834 | /webpages/island_scraper_kyero/island_scraper/middlewares.py | f34dd9c19c21b5524d2483086acae265764a8f49 | [] | no_license | LukaszMalucha/Scrapy-Collection | b11dcf2c09f33d190e506559d978e4f3b77f9f5a | 586f23b90aa984c22ea8f84eba664db9649ed780 | refs/heads/master | 2022-12-14T15:06:00.868322 | 2021-07-27T12:09:07 | 2021-07-27T12:09:07 | 144,448,351 | 3 | 0 | null | 2022-11-22T03:16:19 | 2018-08-12T07:55:05 | Python | UTF-8 | Python | false | false | 3,611 | py | # -*- coding: utf-8 -*-
# Define here the models for your spider middleware
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
from scrapy import signals
class IslandScraperSpiderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(self, response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(self, response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, dict or Item objects.
for i in result:
yield i
def process_spider_exception(self, response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Response, dict
# or Item objects.
pass
def process_start_requests(self, start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
class IslandScraperDownloaderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the downloader middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_request(self, request, spider):
# Called for each request that goes through the downloader
# middleware.
# Must either:
# - return None: continue processing this request
# - or return a Response object
# - or return a Request object
# - or raise IgnoreRequest: process_exception() methods of
# installed downloader middleware will be called
return None
def process_response(self, request, response, spider):
# Called with the response returned from the downloader.
# Must either;
# - return a Response object
# - return a Request object
# - or raise IgnoreRequest
return response
def process_exception(self, request, exception, spider):
# Called when a download handler or a process_request()
# (from other downloader middleware) raises an exception.
# Must either:
# - return None: continue processing this exception
# - return a Response object: stops process_exception() chain
# - return a Request object: stops process_exception() chain
pass
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
| [
"[email protected]"
] | |
6544fcf260d6f8112c79a5e3a5ec70a10575a277 | 9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97 | /sdBs/AllRun/pg_1425+219/sdB_PG_1425+219_lc.py | e8ee83c9f091b54b888664988d5fb0c6cd57aee1 | [] | no_license | tboudreaux/SummerSTScICode | 73b2e5839b10c0bf733808f4316d34be91c5a3bd | 4dd1ffbb09e0a599257d21872f9d62b5420028b0 | refs/heads/master | 2021-01-20T18:07:44.723496 | 2016-08-08T16:49:53 | 2016-08-08T16:49:53 | 65,221,159 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 346 | py | from gPhoton.gAperture import gAperture
def main():
gAperture(band="NUV", skypos=[216.986042,21.632814], stepsz=30., csvfile="/data2/fleming/GPHOTON_OUTPU/LIGHTCURVES/sdBs/sdB_PG_1425+219 /sdB_PG_1425+219_lc.csv", maxgap=1000., overwrite=True, radius=0.00555556, annulus=[0.005972227,0.0103888972], verbose=3)
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
48b1cfe1f2c159159035fd8b8781a2df3fb2ffde | b11a5afd6682fe003445431ab60a9273a8680c23 | /language/nqg/tasks/spider/write_dataset.py | b2ed9f1018cf872e2b4933c9712c698deaeb8e52 | [
"LicenseRef-scancode-generic-cla",
"Apache-2.0"
] | permissive | Srividya-me/language | a874b11783e94da7747fc9a1b0ae1661cd5c9d4a | 61fa7260ac7d690d11ef72ca863e45a37c0bdc80 | refs/heads/master | 2023-08-28T10:30:59.688879 | 2021-11-12T22:31:56 | 2021-11-13T01:04:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,111 | py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Write Spider dataset in TSV format."""
import json
from absl import app
from absl import flags
from language.nqg.tasks import tsv_utils
from language.nqg.tasks.spider import database_constants
from tensorflow.io import gfile
FLAGS = flags.FLAGS
flags.DEFINE_string("examples", "", "Path to Spider json examples.")
flags.DEFINE_string("output", "", "Output tsv file.")
flags.DEFINE_bool(
"filter_by_database", True,
"Whether to only select examples for databases used for the Spider-SSP"
"setting proposed in the paper. Should be False to follow the standard"
"Spider-XSP setting.")
def normalize_whitespace(source):
tokens = source.split()
return " ".join(tokens)
def load_json(filepath):
with gfile.GFile(filepath, "r") as reader:
text = reader.read()
return json.loads(text)
def main(unused_argv):
examples_json = load_json(FLAGS.examples)
examples = []
for example_json in examples_json:
database = example_json["db_id"]
source = example_json["question"]
target = example_json["query"]
# Optionally skip if database not in set of databases with >= 50 examples.
if (FLAGS.filter_by_database and
database not in database_constants.DATABASES):
continue
# Prepend database.
source = "%s: %s" % (database, source)
target = normalize_whitespace(target)
examples.append((source.lower(), target.lower()))
tsv_utils.write_tsv(examples, FLAGS.output)
if __name__ == "__main__":
app.run(main)
| [
"[email protected]"
] | |
4dee5e0f7b4cc51baf47bb8c3e8933d77c641c85 | c47340ae6bcac6002961cc2c6d2fecb353c1e502 | /controlm_py/models/fts_general_settings.py | 9eab4959b0d808a9b66815d288744bfd62ea5263 | [
"MIT"
] | permissive | rafaeldelrey/controlm_py | 6d9f56b8b6e72750f329d85b932ace6c41002cbd | ed1eb648d1d23e587321227217cbfcc5065535ab | refs/heads/main | 2023-04-23T09:01:32.024725 | 2021-05-19T00:25:53 | 2021-05-19T00:25:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,131 | py | # coding: utf-8
"""
Control-M Services
Provides access to BMC Control-M Services # noqa: E501
OpenAPI spec version: 9.20.115
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class FtsGeneralSettings(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'home_directory': 'str',
'multiple_login_allowed': 'bool',
'max_open_sessions': 'int',
'max_login_failures': 'int',
'delay_after_login_failure': 'int',
'throttling_activated': 'bool',
'max_simultaneous_uploads': 'int',
'max_simultaneous_downloads': 'int'
}
attribute_map = {
'home_directory': 'homeDirectory',
'multiple_login_allowed': 'multipleLoginAllowed',
'max_open_sessions': 'maxOpenSessions',
'max_login_failures': 'maxLoginFailures',
'delay_after_login_failure': 'delayAfterLoginFailure',
'throttling_activated': 'throttlingActivated',
'max_simultaneous_uploads': 'maxSimultaneousUploads',
'max_simultaneous_downloads': 'maxSimultaneousDownloads'
}
def __init__(self, home_directory=None, multiple_login_allowed=None, max_open_sessions=None, max_login_failures=None, delay_after_login_failure=None, throttling_activated=None, max_simultaneous_uploads=None, max_simultaneous_downloads=None): # noqa: E501
"""FtsGeneralSettings - a model defined in Swagger""" # noqa: E501
self._home_directory = None
self._multiple_login_allowed = None
self._max_open_sessions = None
self._max_login_failures = None
self._delay_after_login_failure = None
self._throttling_activated = None
self._max_simultaneous_uploads = None
self._max_simultaneous_downloads = None
self.discriminator = None
if home_directory is not None:
self.home_directory = home_directory
if multiple_login_allowed is not None:
self.multiple_login_allowed = multiple_login_allowed
if max_open_sessions is not None:
self.max_open_sessions = max_open_sessions
if max_login_failures is not None:
self.max_login_failures = max_login_failures
if delay_after_login_failure is not None:
self.delay_after_login_failure = delay_after_login_failure
if throttling_activated is not None:
self.throttling_activated = throttling_activated
if max_simultaneous_uploads is not None:
self.max_simultaneous_uploads = max_simultaneous_uploads
if max_simultaneous_downloads is not None:
self.max_simultaneous_downloads = max_simultaneous_downloads
@property
def home_directory(self):
"""Gets the home_directory of this FtsGeneralSettings. # noqa: E501
Root path where transferred files are stored. If you want to use a different directory for each logged in user, you must add /${userName} to the path. # noqa: E501
:return: The home_directory of this FtsGeneralSettings. # noqa: E501
:rtype: str
"""
return self._home_directory
@home_directory.setter
def home_directory(self, home_directory):
"""Sets the home_directory of this FtsGeneralSettings.
Root path where transferred files are stored. If you want to use a different directory for each logged in user, you must add /${userName} to the path. # noqa: E501
:param home_directory: The home_directory of this FtsGeneralSettings. # noqa: E501
:type: str
"""
self._home_directory = home_directory
@property
def multiple_login_allowed(self):
"""Gets the multiple_login_allowed of this FtsGeneralSettings. # noqa: E501
Allow multiple open sessions # noqa: E501
:return: The multiple_login_allowed of this FtsGeneralSettings. # noqa: E501
:rtype: bool
"""
return self._multiple_login_allowed
@multiple_login_allowed.setter
def multiple_login_allowed(self, multiple_login_allowed):
"""Sets the multiple_login_allowed of this FtsGeneralSettings.
Allow multiple open sessions # noqa: E501
:param multiple_login_allowed: The multiple_login_allowed of this FtsGeneralSettings. # noqa: E501
:type: bool
"""
self._multiple_login_allowed = multiple_login_allowed
@property
def max_open_sessions(self):
"""Gets the max_open_sessions of this FtsGeneralSettings. # noqa: E501
Maximum concurrent open sessions # noqa: E501
:return: The max_open_sessions of this FtsGeneralSettings. # noqa: E501
:rtype: int
"""
return self._max_open_sessions
@max_open_sessions.setter
def max_open_sessions(self, max_open_sessions):
"""Sets the max_open_sessions of this FtsGeneralSettings.
Maximum concurrent open sessions # noqa: E501
:param max_open_sessions: The max_open_sessions of this FtsGeneralSettings. # noqa: E501
:type: int
"""
self._max_open_sessions = max_open_sessions
@property
def max_login_failures(self):
"""Gets the max_login_failures of this FtsGeneralSettings. # noqa: E501
Number of retries before the server closes the connection # noqa: E501
:return: The max_login_failures of this FtsGeneralSettings. # noqa: E501
:rtype: int
"""
return self._max_login_failures
@max_login_failures.setter
def max_login_failures(self, max_login_failures):
"""Sets the max_login_failures of this FtsGeneralSettings.
Number of retries before the server closes the connection # noqa: E501
:param max_login_failures: The max_login_failures of this FtsGeneralSettings. # noqa: E501
:type: int
"""
self._max_login_failures = max_login_failures
@property
def delay_after_login_failure(self):
"""Gets the delay_after_login_failure of this FtsGeneralSettings. # noqa: E501
Time of waiting before letting the user to do another login in seconds # noqa: E501
:return: The delay_after_login_failure of this FtsGeneralSettings. # noqa: E501
:rtype: int
"""
return self._delay_after_login_failure
@delay_after_login_failure.setter
def delay_after_login_failure(self, delay_after_login_failure):
"""Sets the delay_after_login_failure of this FtsGeneralSettings.
Time of waiting before letting the user to do another login in seconds # noqa: E501
:param delay_after_login_failure: The delay_after_login_failure of this FtsGeneralSettings. # noqa: E501
:type: int
"""
self._delay_after_login_failure = delay_after_login_failure
@property
def throttling_activated(self):
"""Gets the throttling_activated of this FtsGeneralSettings. # noqa: E501
Allow bandwidth throttling # noqa: E501
:return: The throttling_activated of this FtsGeneralSettings. # noqa: E501
:rtype: bool
"""
return self._throttling_activated
@throttling_activated.setter
def throttling_activated(self, throttling_activated):
"""Sets the throttling_activated of this FtsGeneralSettings.
Allow bandwidth throttling # noqa: E501
:param throttling_activated: The throttling_activated of this FtsGeneralSettings. # noqa: E501
:type: bool
"""
self._throttling_activated = throttling_activated
@property
def max_simultaneous_uploads(self):
"""Gets the max_simultaneous_uploads of this FtsGeneralSettings. # noqa: E501
Maximum simultaneos uploads # noqa: E501
:return: The max_simultaneous_uploads of this FtsGeneralSettings. # noqa: E501
:rtype: int
"""
return self._max_simultaneous_uploads
@max_simultaneous_uploads.setter
def max_simultaneous_uploads(self, max_simultaneous_uploads):
"""Sets the max_simultaneous_uploads of this FtsGeneralSettings.
Maximum simultaneos uploads # noqa: E501
:param max_simultaneous_uploads: The max_simultaneous_uploads of this FtsGeneralSettings. # noqa: E501
:type: int
"""
self._max_simultaneous_uploads = max_simultaneous_uploads
@property
def max_simultaneous_downloads(self):
"""Gets the max_simultaneous_downloads of this FtsGeneralSettings. # noqa: E501
Maximum simultaneos downloads # noqa: E501
:return: The max_simultaneous_downloads of this FtsGeneralSettings. # noqa: E501
:rtype: int
"""
return self._max_simultaneous_downloads
@max_simultaneous_downloads.setter
def max_simultaneous_downloads(self, max_simultaneous_downloads):
"""Sets the max_simultaneous_downloads of this FtsGeneralSettings.
Maximum simultaneos downloads # noqa: E501
:param max_simultaneous_downloads: The max_simultaneous_downloads of this FtsGeneralSettings. # noqa: E501
:type: int
"""
self._max_simultaneous_downloads = max_simultaneous_downloads
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(FtsGeneralSettings, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, FtsGeneralSettings):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
6ff66a5e7100cbdd1877f359622be88b41e19b2c | c4c159a21d2f1ea0d7dfaa965aeff01c8ef70dce | /flask/flaskenv/Lib/site-packages/keras_applications/inception_v3.py | 1b825c0ce4aea562e468b337a5843f63810f57d5 | [] | no_license | AhsonAslam/webapi | 54cf7466aac4685da1105f9fb84c686e38f92121 | 1b2bfa4614e7afdc57c9210b0674506ea70b20b5 | refs/heads/master | 2020-07-27T06:05:36.057953 | 2019-09-17T06:35:33 | 2019-09-17T06:35:33 | 208,895,450 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 130 | py | version https://git-lfs.github.com/spec/v1
oid sha256:6bdeecc0c5e0341451f5d87e17d12c89a210b6161e1b066aca6e02bc425b2abf
size 14598
| [
"github@cuba12345"
] | github@cuba12345 |
8d6b7100e1ca9bc1aa8edd46c91935f9aebbd87e | 3a1be455fc5e117bd8792ed46c59793f8b29a01f | /python/paddle/distributed/sharding/group_sharded.py | 6fd4caa7b4a5c41e73fcf95ac50d0253bb3e7c79 | [
"Apache-2.0"
] | permissive | liyancas/Paddle | 42d5e7c71c37b4e63bf54e6e31e82e40aef044ce | 98303291d27cb831b19111d82793159cbe9a85ca | refs/heads/develop | 2022-05-21T03:27:16.497238 | 2022-04-01T00:52:17 | 2022-04-01T00:52:17 | 72,499,865 | 0 | 0 | Apache-2.0 | 2022-02-11T08:16:37 | 2016-11-01T03:17:41 | Python | UTF-8 | Python | false | false | 9,839 | py | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import logging
from enum import Enum
import paddle
from paddle.optimizer import Optimizer
from paddle.distributed.utils import get_logger
from paddle.distributed.fleet.meta_optimizers.dygraph_optimizer.sharding_optimizer_stage2 import ShardingOptimizerStage2
from paddle.distributed.fleet.meta_parallel.sharding.sharding_stage2 import ShardingStage2
from paddle.distributed.fleet.meta_parallel.sharding.sharding_stage3 import ShardingStage3
from paddle.distributed.fleet.meta_parallel.sharding.sharding_utils import ShardingScaler
logger_ = get_logger(logging.INFO)
def group_sharded_parallel(model,
optimizer,
level,
scaler=None,
group=None,
offload=False,
sync_buffers=False,
buffer_max_size=2**23,
segment_size=2**20,
sync_comm=False):
"""
Use group_sharded_parallel can perform group shared configuration on the model, optimizer and GradScaler. Level has three string options, 'os', 'os_g' and 'p_g_os' corresponds to three different usage scenarios: optimizer state segmentation, optimizer state + gradient segmentation, and parameter + gradient + optimizer state segmentation.
Usually, optimizer state + gradient segmentation is actually a re optimization of optimizer state segmentation, so optimizer state + gradient segmentation can be used to realize optimizer state segmentation.
Args:
model (Layer): The layer to be wrapped with group_sharded_parallel.
optimizer (Optimizer): The optimizer to be wrapped with group_sharded_parallel.
level (str): The different level of the group sharded. Such as `os`, `os_g`, `p_g_os`.
scaler (GradScaler, optional): If AMP is used, you need to pass GradScaler. Defaults to None, indicating that GradScaler is not used.
group (Group, optional): The group instance. Defaults to None, indicating that the default environment group is used.
offload (bool, optional): Whether to use the offload function. Defaults to False, which means that the offload function is not used.
sync_buffers (bool, optional): Whether to broadcast model buffers. It is generally used when there are registered model buffers. Defaults to False, indicating that model buffers are not used.
buffer_max_size (int, optional): The max size of the buffer used to integrate gradient in `os_g`. The larger the size, the more GPU memory will be used. Defaults to 2**23, which means that the dimension of the buffer is 2**23.
segment_size (int, optional): The smallest size of parameter to be sharded in `p_g_os`. Defaults to 2**20, indicating that the dimension of the minimum segmented parameter is 2**20.
sync_comm (bool, optional): Whether to use synchronous communication, only in `p_g_os` used. Defaults to False, indicating that asynchronous communication is used.
Returns:
model: A wrapper for group sharded given model.
optimizer: A wrapper for group sharded given optimizer.
scaler: A wrapper for group sharded given scaler.
Examples:
.. code-block:: python
# required: distributed
import paddle
from paddle.fluid.dygraph.nn import Linear
from paddle.distributed import fleet
from paddle.distributed.sharding import group_sharded_parallel
fleet.init(is_collective=True)
group = paddle.distributed.new_group([0, 1])
model = Linear(1000, 1000)
clip = paddle.nn.ClipGradByGlobalNorm(clip_norm=1.0)
optimizer = paddle.optimizer.AdamW(learning_rate=0.001, parameters=model.parameters(), weight_decay=0.00001, grad_clip=clip)
# wrap sharding model, optimizer and scaler
model, optimizer, scaler = group_sharded_parallel(model, optimizer, "p_g", scaler=scaler)
img, label = data
label.stop_gradient = True
img.stop_gradient = True
out = model(img)
loss = paddle.nn.functional.cross_entropy(input=out, label=label)
loss.backward()
optimizer.step()
optimizer.clear_grad()
"""
# check optition type
assert isinstance(
model,
paddle.nn.Layer), "The model must be the instance of paddle.nn.Layer."
assert isinstance(
optimizer, Optimizer
), "The optimizer must be the instance of paddle.optimizer.Optimizer."
assert level in ['os', 'os_g', 'p_g_os'
], "The level must be os, os_g or p_g_os."
def check_dtype(param):
return param.dtype == paddle.float16
params_fp16 = list(filter(check_dtype, model.parameters()))
if scaler is None and len(params_fp16) > 0:
raise ValueError("Please enter the correct scaler.")
# convert model/optimizer/scaler
if level in ['os', 'os_g']:
logger_.info("*" * 30)
logger_.info("Sharded level os uses sharded level os_g achieved now.")
logger_.info("*" * 30)
optimizer = ShardingOptimizerStage2(
params=model.parameters(),
optim=optimizer,
group=group,
offload=offload)
model = ShardingStage2(
model,
optimizer,
group=group,
sync_buffers=sync_buffers,
buffer_max_size=buffer_max_size)
elif level == 'p_g_os':
model = ShardingStage3(
model,
optimizer=optimizer,
group=group,
sync_buffers=sync_buffers,
segment_size=segment_size,
offload=offload,
sync_comm=sync_comm)
else:
raise ValueError("Please enter the correct level.")
if params_fp16 and isinstance(scaler, paddle.amp.GradScaler):
scaler = ShardingScaler(scaler)
logger_.info("*" * 30)
logger_.info(
"If there is a communication hang using group sharded, please check whether the communication operations of each process are unified."
)
logger_.info("*" * 30)
return model, optimizer, scaler
def save_group_sharded_model(model, output, optimizer=None):
"""
Group sharded encapsulated model and optimizer state saving module.
.. note::
If using save_group_sharded_model saves the model. When loading again, you need to set the model or optimizer state before using group_sharded_parallel.
Args:
model (Layer): A wrapper for group sharded given model.
output (str): Save directory.
optimizer (Optimizer, optional): Group sharded encapsulated optimizer. Defaults to None, indicating that the optimizer state is not saved.
Examples:
.. code-block:: python
# required: distributed
import paddle
from paddle.fluid.dygraph.nn import Linear
from paddle.distributed import fleet
from paddle.distributed.sharding import group_sharded_parallel, save_group_sharded_model
fleet.init(is_collective=True)
group = paddle.distributed.new_group([0, 1])
model = Linear(1000, 1000)
clip = paddle.nn.ClipGradByGlobalNorm(clip_norm=1.0)
optimizer = paddle.optimizer.AdamW(learning_rate=0.001, parameters=model.parameters(), weight_decay=0.00001, grad_clip=clip)
# wrap sharding model, optimizer and scaler
model, optimizer, scaler = group_sharded_parallel(model, optimizer, "p_g", scaler=scaler)
img, label = data
label.stop_gradient = True
img.stop_gradient = True
out = model(img)
loss = paddle.nn.functional.cross_entropy(input=out, label=label)
loss.backward()
optimizer.step()
optimizer.clear_grad()
# save model and optimizer state_dict
save_group_sharded_model(model, optimizer, output=output_dir)
"""
logger_.info(
"==========Begin to save group sharded model and optimizer==========")
assert not os.path.isfile(
output
), "Saving directory ({}) should be a directory, not a file".format(output)
os.makedirs(output, exist_ok=True)
output_model = os.path.join(output, "model.pdmodel")
if isinstance(model, ShardingStage2):
paddle.save(model._layer.state_dict(), output_model)
elif isinstance(model, ShardingStage3):
convert2cpu = True if model._offload else False
model.get_all_parameters(convert2cpu=convert2cpu)
paddle.save(model._layer.state_dict(), output_model)
else:
raise ValueError(
"Please use the layer which is wrapped with group_sharded_parallel.")
if optimizer is not None:
assert hasattr(
optimizer, "_optim"
), "Please use the optimizer which is wrapped with group_sharded_parallel."
output_opt = os.path.join(output, "model.pdopt")
paddle.save(optimizer._optim.state_dict(), output_opt)
logger_.info(
"==========End to save group sharded model and optimizer==========")
| [
"[email protected]"
] | |
568aa59ae896f8dcad1d6c4c19a117a22a0ff63c | c4d05bf624ce277b35d83ba8ba9636f26043280e | /project/urls.py | d6e90307036ceed43e1f6355ce2dc672ebb0e233 | [
"Apache-2.0"
] | permissive | DrMartiner/kaptilo_back | 2366b3a2b5c9bd9dc57c9091ff5fd0025963668d | df7f716030edbb1a70388fcbb808b0985dabefbf | refs/heads/main | 2023-04-09T03:12:52.274388 | 2021-03-22T09:48:39 | 2021-03-22T09:48:39 | 349,943,620 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 862 | py | from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.urls import path, include
from apps.link.views import OriginalLinkRedirectView
admin.site.site_header = "Kaptilo"
admin.site.site_title = "Kaptilo"
admin.site.index_title = "Welcome to Kaptilo admin-panel"
urlpatterns = [
path("<str:uuid>/", OriginalLinkRedirectView.as_view(), name="original-link-redirect"),
path("api/v1/", include(("apps.api.urls", "apps.api"), namespace="api_v1")),
path("admin/super-sec/", admin.site.urls),
path("admin/", include("admin_honeypot.urls", namespace="admin_honeypot")),
]
if settings.DEBUG:
urlpatterns += staticfiles_urlpatterns()
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| [
"[email protected]"
] | |
668963624d3086f1b1dd35cf080200af75bf8736 | 191a7f83d964f74a2b3c7faeb4fc47d9c63d521f | /.history/main_20210523152045.py | 6d7861a88a7d86a28f1d8d675b4416ba674fb3c2 | [] | no_license | AndreLiu1225/Kinder-Values-Survey | 2a317feee8d5b17c27da2b2116742656e35d8ab9 | 090c27da0c822abb7dfc0ec6e13ae1b3dcb7bbf3 | refs/heads/master | 2023-05-03T00:26:00.481423 | 2021-06-04T03:24:19 | 2021-06-04T03:24:19 | 371,989,154 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,795 | py | from flask import Flask, render_template, redirect, url_for
from flask_wtf import FlaskForm
from wtforms import StringField, TextField, SubmitField, IntegerField, SelectField, RadioField
from wtforms.validators import DataRequired, Email, EqualTo, Length, ValidationError
app = Flask(__name__)
app.config['SECRET_KEY'] = "0c8973c8a5e001bb0c816a7b56c84f3a"
class MCQ(FlaskForm):
age = IntegerField("Please enter your age", validators=[DataRequired()])
profession = StringField("What is your profession?", validators=[DataRequired(), Length(max=30)])
power = RadioField("Do you desire a higher social status and dominance over others?", choices=[('Yes', 'It is my priority'), ('No', 'It is not my priority')])
tradition = RadioField("Do you care preservingabout tradition", choices=[('Yes', 'It is my priority'), ('No', 'It is not my priority')])
achievement = RadioField("Do you desire a higher social status and dominance over others?", choices=[('Yes', 'It is my priority'), ('No', 'It is not my priority')])
stimulation = RadioField("Do you desire a higher social status and dominance over others?", choices=[('Yes', 'It is my priority'), ('No', 'It is not my priority')])
hedonism = RadioField("Do you desire a higher social status and dominance over others?", choices=[('Yes', 'It is my priority'), ('No', 'It is not my priority')])
conformity = RadioField("Do you desire a higher social status and dominance over others?", choices=[('Yes', 'It is my priority'), ('No', 'It is not my priority')])
self_direction = RadioField("Do you desire a higher social status and dominance over others?", choices=[('Yes', 'It is my priority'), ('No', 'It is not my priority')])
submit = SubmitField("Submit")
if __name__ == "__main__":
app.run(debug=True)
| [
"[email protected]"
] | |
8065d754386fc0b3762e05f4fc04a7f53121086e | 9da6c375dbf1af87622a2ba0fb773e8f513d8021 | /cli/bak.20200512-local/abcombo.py | a267f8c6d9d445c64cdd848a3d93c27eb4e147ce | [] | no_license | wri/tree_canopy_fcn | a80a9971403f6ca2548d44146ed08aa22d7d559e | 78f742e4e26e34008417468f73413643edde801e | refs/heads/master | 2022-10-11T03:25:41.503263 | 2020-06-16T12:39:21 | 2020-06-16T12:39:21 | 236,492,565 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,895 | py | import os,sys
PROJECT_DIR='/home/ericp/tree_canopy_fcn/repo'
sys.path.append(PROJECT_DIR)
from pprint import pprint
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
from torch_kit.loss import MaskedLoss
import torch_kit.functional as F
from torch_kit.optimizers.radam import RAdam
import pytorch_models.deeplab.model as dm
import pytorch_models.unet.model as um
from utils.dataloader import HeightIndexDataset, CATEGORY_BOUNDS
from config import BUILTUP_CATEGORY_THRESHOLDS
#
# RUN CONFIG
#
BATCH_SIZE=8
DEFAULT_OPTIMIZER='adam'
LRS=[1e-3,1e-4]
NB_CATEGORIES=len(CATEGORY_BOUNDS)+1
# # AB STATS: ALL
# MEANS=[100.83741572079242, 100.4938850966076, 86.63500986931308, 118.72746674454453]
# STDEVS=[42.098045003124774, 39.07388735786421, 39.629813116928815, 34.72351480486876]
# DSETS_PATH='../datasets/los_angeles-plieades-lidar_USGS_LPC_CA_LosAngeles_2016_LAS_2018.STATS.csv'
# AB STATS: 2015,16 Train/valid
MEANS=[94.79936157686979, 92.8912348691044, 80.50194782393349, 108.14889758142212]
STDEVS=[36.37876660224377, 33.22686387734999, 33.30808192430284, 30.075380846943716]
DSETS_PATH='../datasets/los_angeles-plieades_naip-lidar_USGS_LPC_CA_LosAngeles_2016_LAS_2018.STATS.csv'
YEAR_MAX=2016
# # NAIP STATS: ALL (<2017)
# MEANS=[106.47083152919251, 104.25520495313522, 98.61836143687523, 119.95594400425841]
# STDEVS=[38.23711386806666, 34.410688920150264, 31.468324931640534, 31.831786730471276]
# DSET_PATH=f'{PROJECT_DIR}/datasets/los_angeles-naip-lidar_USGS_LPC_CA_LosAngeles_2016_LAS_2018.STATS.csv'
# # NAIP ONLY
# IBNDS={
# '4': { 'min': 0 }, # ndvi
# '5': { 'min': -0.35} # ndwi
# }
# # PLIEDES INPUT
IBNDS=None
#
# TORCH_KIT CLI
#
def model(**cfig):
_header('model',cfig)
model_type=cfig.pop('type','dlv3p')
cfig['out_ch']=cfig.get('out_ch',NB_CATEGORIES)
if model_type=='dlv3p':
mod=dm.DeeplabV3plus(**cfig)
elif model_type=='unet':
mod=um.UNet(**cfig)
else:
raise ValueError(f'model_type ({model_type}) not implemented')
if torch.cuda.is_available():
mod=mod.cuda()
return mod
def criterion(**cfig):
ignore_index=cfig.get('ignore_index')
weights=cfig.get('weights')
print("criterion:",ignore_index,weights)
if weights:
weights=torch.Tensor(weights)
if torch.cuda.is_available():
weights=weights.cuda()
if ignore_index is not None:
criterion=nn.CrossEntropyLoss(weight=weights,ignore_index=ignore_index)
# criterion=MaskedLoss(
# weight=weights,
# loss_type='ce',
# mask_value=ignore_index )
else:
criterion=nn.CrossEntropyLoss(weight=weights)
return criterion
def optimizer(**cfig):
_header('optimizer',cfig)
opt_name=cfig.get('name',DEFAULT_OPTIMIZER)
if opt_name=='adam':
optimizer=torch.optim.Adam
elif opt_name=='radam':
optimizer=RAdam
else:
ValueError(f'optimizer "{opt_name}" not implemented')
return optimizer
def loaders(**cfig):
"""
"""
# INITAL DATASET HANDLING
dsets_df=pd.read_csv(DSETS_PATH)
train_df=dsets_df[dsets_df.dset_type=='train']
valid_df=dsets_df[dsets_df.dset_type=='valid']
train_df=train_df[train_df.input_year<=YEAR_MAX].iloc[1:6*8+1]
valid_df=valid_df[valid_df.input_year<=YEAR_MAX]
example_path=train_df.rgbn_path.iloc[0]
#
# on with the show
#
dev=cfig.get('dev')
vmap=cfig.get('vmap')
batch_size=cfig.get('batch_size',BATCH_SIZE)
band_indices=['ndvi']
augment=cfig.get('augment',True)
shuffle=cfig.get('shuffle',True)
no_data_value=cfig.get('no_data_value',False)
cropping=cfig.get('cropping',None)
float_cropping=cfig.get('float_cropping',None)
update_version=cfig.get('update_version',False)
print('AUGMENT:',augment)
print('SHUFFLE:',shuffle)
print('BATCH_SIZE:',batch_size)
print('NO DATA VALUE:',no_data_value)
print('CROPPING:',cropping)
print('FLOAT CROPPING:',float_cropping)
if (train_df.shape[0]>=batch_size*8) and (valid_df.shape[0]>=batch_size*2):
if dev:
train_df=train_df.sample(batch_size*8)
valid_df=valid_df.sample(batch_size*2)
dl_train=HeightIndexDataset.loader(
batch_size=batch_size,
# input_bands=[0,1,2],
# input_band_count=3,
band_indices=['ndvi'],
category_bounds=HeightIndexDataset.NAIP_GREEN,
input_bounds=IBNDS,
dataframe=train_df,
means=MEANS,
stdevs=STDEVS,
no_data_value=no_data_value,
cropping=cropping,
float_cropping=float_cropping,
example_path=example_path,
augment=augment,
train_mode=True,
target_dtype=np.int,
shuffle_data=shuffle)
return dl_train, None
dl_valid=HeightIndexDataset.loader(
batch_size=batch_size,
# input_bands=[0,1,2],
# input_band_count=3,
band_indices=['ndvi'],
category_bounds=HeightIndexDataset.NAIP_GREEN,
input_bounds=IBNDS,
dataframe=valid_df,
means=MEANS,
stdevs=STDEVS,
no_data_value=no_data_value,
cropping=cropping,
float_cropping=float_cropping,
example_path=example_path,
augment=augment,
train_mode=True,
target_dtype=np.int,
shuffle_data=shuffle)
print("SIZE:",train_df.shape[0],valid_df.shape[0])
return dl_train, dl_valid
else:
print('NOT ENOUGH DATA',train_df.shape[0],valid_df.shape[0],batch_size*8,batch_size*30)
return False, False
#
# HELPERS
#
def _header(title,cfig=None):
print('='*100)
print(title)
print('-'*100)
if cfig:
pprint(cfig)
| [
"[email protected]"
] | |
583f60ff287cee838ecb0f535047399292eeab50 | 653eaef652627b155569b5fe9ab9bb3607fc1e78 | /alg/ganite/ganite.py | 1e5aaedc8c7c7720ded1a6bf6cee744f9f7711a5 | [
"BSD-3-Clause"
] | permissive | IlyaTrofimov/mlforhealthlabpub | 11ab86a83bd2ffd2574364a956b322b0c62406ae | 190cbad2faae9e559ffe7a68143df7f747d70adc | refs/heads/main | 2023-04-16T03:58:38.423288 | 2021-04-21T10:22:43 | 2021-04-21T10:22:43 | 358,528,623 | 0 | 0 | NOASSERTION | 2021-04-16T08:25:26 | 2021-04-16T08:25:25 | null | UTF-8 | Python | false | false | 12,408 | py | '''
GANITE:
Jinsung Yoon 10/11/2017
'''
import tensorflow as tf
import numpy as np
from tqdm import tqdm
import argparse
import os
import json
import pandas as pd
import initpath_alg
initpath_alg.init_sys_path()
import utilmlab
def init_arg():
parser = argparse.ArgumentParser()
parser.add_argument("--alpha", default=1, type=float)
parser.add_argument("--kk", default=10, type=int)
parser.add_argument("--it", default=10000, type=int)
parser.add_argument("-o", default='./result.json')
parser.add_argument('-ocsv')
parser.add_argument("--trainx", default="trainx.csv")
parser.add_argument("--trainy", default="trainy.csv")
parser.add_argument("--traint")
parser.add_argument("--testx", default="testx.csv")
parser.add_argument("--testy", default="testy.csv")
parser.add_argument("--testt")
return parser.parse_args()
#%% Performance Metrics
def Perf_RPol_ATT(Test_T, Test_Y, Output_Y):
# RPol
# Decision of Output_Y
hat_t = np.sign(Output_Y[:,1] - Output_Y[:,0])
hat_t = (0.5*(hat_t + 1))
new_hat_t = np.abs(1-hat_t)
# Intersection
idx1 = hat_t * Test_T
idx0 = new_hat_t * (1-Test_T)
# RPol Computation
RPol1 = (np.sum(idx1 * Test_Y)/(np.sum(idx1)+1e-8)) * np.mean(hat_t)
RPol0 = (np.sum(idx0 * Test_Y)/(np.sum(idx0)+1e-8)) * np.mean(new_hat_t)
RPol = 1 - (RPol1 + RPol0)
# ATT
# Original ATT
ATT_value = np.sum(Test_T * Test_Y) / (np.sum(Test_T) + 1e-8) - np.sum((1-Test_T) * Test_Y) / (np.sum(1-Test_T) + 1e-8)
# Estimated ATT
ATT_estimate = np.sum(Test_T * (Output_Y[:,1] - Output_Y[:,0]) ) / (np.sum(Test_T) + 1e-8)
# Final ATT
ATT = np.abs( ATT_value - ATT_estimate )
print('pol0:{} pol1:{} pol:{} mean hat:{} mean new hat:{} ATT:{}'.format(RPol0, RPol1, RPol, np.mean(hat_t), np.mean(new_hat_t), ATT))
return [RPol, ATT]
def PEHE(y, hat_y):
e_PEHE = tf.reduce_mean( tf.squared_difference( (y[:,1]-y[:,0]), (hat_y[:,1] - hat_y[:,0]) ))
return e_PEHE
def ATE(y, hat_y):
e_PEHE = tf.abs( tf.reduce_mean( y[:,1]-y[:,0] ) - tf.reduce_mean( hat_y[:,1]-hat_y[:,0] ) )
return e_PEHE
def xavier_init(size):
in_dim = size[0]
xavier_stddev = 1. / tf.sqrt(in_dim / 2.)
return tf.random_normal(shape = size, stddev = xavier_stddev)
# 3.1 Generator
def generator(x,t,y):
inputs = tf.concat(axis = 1, values = [x,t,y])
G_h1 = tf.nn.relu(tf.matmul(inputs, G_W1) + G_b1)
G_h2 = tf.nn.relu(tf.matmul(G_h1, G_W2) + G_b2)
G_h31 = tf.nn.relu(tf.matmul(G_h2, G_W31) + G_b31)
G_prob1 = (tf.matmul(G_h31, G_W32) + G_b32)
G_h41 = tf.nn.relu(tf.matmul(G_h2, G_W41) + G_b41)
G_prob2 = (tf.matmul(G_h41, G_W42) + G_b42)
G_prob = tf.nn.sigmoid(tf.concat(axis = 1, values = [G_prob1, G_prob2]))
return G_prob
# 3.2. Discriminator
def discriminator(x,t,y,hat_y):
# Factual & Counterfactual outcomes concatenate
inp0 = (1.-t) * y + t * tf.reshape(hat_y[:,0], [-1,1])
inp1 = t * y + (1.-t) * tf.reshape(hat_y[:,1], [-1,1])
inputs = tf.concat(axis = 1, values = [x,inp0,inp1])
D_h1 = tf.nn.relu(tf.matmul(inputs, D_W1) + D_b1)
D_h2 = tf.nn.relu(tf.matmul(D_h1, D_W2) + D_b2)
D_logit = tf.matmul(D_h2, D_W3) + D_b3
return D_logit
# 3.3. Inference Nets
def inference(x):
I_h1 = tf.nn.relu(tf.matmul(x, I_W1) + I_b1)
I_h2 = tf.nn.relu(tf.matmul(I_h1, I_W2) + I_b2)
I_h31 = tf.nn.relu(tf.matmul(I_h2, I_W31) + I_b31)
I_prob1 = (tf.matmul(I_h31, I_W32) + I_b32)
I_h41 = tf.nn.relu(tf.matmul(I_h2, I_W41) + I_b41)
I_prob2 = (tf.matmul(I_h41, I_W42) + I_b42)
I_prob = tf.nn.sigmoid(tf.concat(axis = 1, values = [I_prob1, I_prob2]))
return I_prob
# Random sample generator for Z and R
def sample_Z(m, n):
return np.random.uniform(-1., 1., size = [m, n])
def sample_X(X, size):
start_idx = np.random.randint(0, X.shape[0], size)
return start_idx
if __name__ == '__main__':
args = init_arg()
fn_trainx, fn_trainy, fn_traint = args.trainx, args.trainy, args.traint
fn_testx, fn_testy, fn_testt = args.testx, args.testy, args.testt
Train_X = pd.read_csv(fn_trainx).values
Train_Y = pd.read_csv(fn_trainy).values
Train_T = pd.read_csv(fn_traint).values if fn_traint is not None else None
Test_X = pd.read_csv(fn_testx).values
Test_Y = pd.read_csv(fn_testy).values
Test_T = pd.read_csv(fn_testt).values if fn_testt is not None else None
dim_outcome = Test_Y.shape[1]
fn_json = args.o
fn_csv = args.ocsv
num_iterations = args.it
mb_size = 256
alpha = args.alpha
num_kk = args.kk
Train_No = len(Train_X)
Test_No = len(Test_X)
Dim = len(Train_X[0])
H_Dim1 = int(Dim)
H_Dim2 = int(Dim)
tf.reset_default_graph()
#%% 1. Input
# 1.1. Feature (X)
X = tf.placeholder(tf.float32, shape = [None, Dim])
# 1.2. Treatment (T)
T = tf.placeholder(tf.float32, shape = [None, 1])
# 1.3. Outcome (Y)
Y = tf.placeholder(tf.float32, shape = [None, 1])
# 1.6. Test Outcome (Y_T) - Potential outcome
# Y_T = tf.placeholder(tf.float32, shape = [None, 2]) # Twins
# Y_T = tf.placeholder(tf.float32, shape = [None, 1]) # Jobs
Y_T = tf.placeholder(tf.float32, shape = [None, dim_outcome])
#%% 2. layer construction
# 2.1 Generator Layer
G_W1 = tf.Variable(xavier_init([(Dim+2), H_Dim1])) # Inputs: X + Treatment (1) + Factual Outcome (1) + Random Vector (Z)
G_b1 = tf.Variable(tf.zeros(shape = [H_Dim1]))
G_W2 = tf.Variable(xavier_init([H_Dim1, H_Dim2]))
G_b2 = tf.Variable(tf.zeros(shape = [H_Dim2]))
G_W31 = tf.Variable(xavier_init([H_Dim2, H_Dim2]))
G_b31 = tf.Variable(tf.zeros(shape = [H_Dim2])) # Output: Estimated Potential Outcomes
G_W32 = tf.Variable(xavier_init([H_Dim2, 1]))
G_b32 = tf.Variable(tf.zeros(shape = [1])) # Output: Estimated Potential Outcomes
G_W41 = tf.Variable(xavier_init([H_Dim2, H_Dim2]))
G_b41 = tf.Variable(tf.zeros(shape = [H_Dim2])) # Output: Estimated Potential Outcomes
G_W42 = tf.Variable(xavier_init([H_Dim2, 1]))
G_b42 = tf.Variable(tf.zeros(shape = [1])) # Output: Estimated Potential Outcomes
theta_G = [G_W1, G_W2, G_W31, G_W32, G_W41, G_W42, G_b1, G_b2, G_b31, G_b32, G_b41, G_b42]
# 2.2 Discriminator
D_W1 = tf.Variable(xavier_init([(Dim+2), H_Dim1])) # Inputs: X + Factual Outcomes + Estimated Counterfactual Outcomes
D_b1 = tf.Variable(tf.zeros(shape = [H_Dim1]))
D_W2 = tf.Variable(xavier_init([H_Dim1, H_Dim2]))
D_b2 = tf.Variable(tf.zeros(shape = [H_Dim2]))
D_W3 = tf.Variable(xavier_init([H_Dim2, 1]))
D_b3 = tf.Variable(tf.zeros(shape = [1]))
theta_D = [D_W1, D_W2, D_W3, D_b1, D_b2, D_b3]
# 2.3 Inference Layer
I_W1 = tf.Variable(xavier_init([(Dim), H_Dim1]))
I_b1 = tf.Variable(tf.zeros(shape = [H_Dim1]))
I_W2 = tf.Variable(xavier_init([H_Dim1, H_Dim2]))
I_b2 = tf.Variable(tf.zeros(shape = [H_Dim2]))
I_W31 = tf.Variable(xavier_init([H_Dim2, H_Dim2]))
I_b31 = tf.Variable(tf.zeros(shape = [H_Dim2]))
I_W32 = tf.Variable(xavier_init([H_Dim2, 1]))
I_b32 = tf.Variable(tf.zeros(shape = [1]))
I_W41 = tf.Variable(xavier_init([H_Dim2, H_Dim2]))
I_b41 = tf.Variable(tf.zeros(shape = [H_Dim2]))
I_W42 = tf.Variable(xavier_init([H_Dim2, 1]))
I_b42 = tf.Variable(tf.zeros(shape = [1]))
theta_I = [I_W1, I_W2, I_W31, I_W32, I_W41, I_W42, I_b1, I_b2, I_b31, I_b32, I_b41, I_b42]
#%% Structure
# 1. Generator
Tilde = generator(X,T,Y)
# 2. Discriminator
D_logit = discriminator(X,T,Y,Tilde)
# 3. Inference function
Hat = inference(X)
#%% Loss
# 1. Discriminator loss
#D_loss = -tf.reduce_mean(T * tf.log(D_prob + 1e-8) + (1. -T) * tf.log(1. - D_prob + 1e-8))
D_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels = T, logits = D_logit ))
# 2. Generator loss
G_loss_GAN = -D_loss
G_loss_R = tf.reduce_mean(tf.losses.mean_squared_error(Y, (T * tf.reshape(Tilde[:,1],[-1,1]) + (1. - T) * tf.reshape(Tilde[:,0],[-1,1]) )))
G_loss = G_loss_R + alpha * G_loss_GAN
# 4. Inference loss
I_loss1 = tf.reduce_mean(tf.losses.mean_squared_error((T) * Y + (1-T) * tf.reshape(Tilde[:,1],[-1,1]), tf.reshape(Hat[:,1],[-1,1]) ))
I_loss2 = tf.reduce_mean(tf.losses.mean_squared_error((1-T) * Y + (T) * tf.reshape(Tilde[:,0],[-1,1]), tf.reshape(Hat[:,0],[-1,1]) ))
I_loss = I_loss1 + I_loss2
# Loss Followup
if Test_T is None:
Hat_Y = Hat
Loss1 = PEHE(Y_T, Hat_Y)
Loss2 = ATE(Y_T, Hat_Y)
#%% Solver
G_solver = tf.train.AdamOptimizer().minimize(G_loss, var_list=theta_G)
D_solver = tf.train.AdamOptimizer().minimize(D_loss, var_list=theta_D)
I_solver = tf.train.AdamOptimizer().minimize(I_loss, var_list=theta_I)
#%% Sessions
sess = tf.Session()
sess.run(tf.global_variables_initializer())
# Iterations
# Train G and D first
for it in tqdm(range(num_iterations)):
for kk in range(num_kk):
idx_mb = sample_X(Train_X, mb_size)
X_mb = Train_X[idx_mb,:]
T_mb = np.reshape(Train_T[idx_mb], [mb_size,1])
Y_mb = np.reshape(Train_Y[idx_mb], [mb_size,1])
_, D_loss_curr = sess.run([D_solver, D_loss], feed_dict = {X: X_mb, T: T_mb, Y: Y_mb})
idx_mb = sample_X(Train_X, mb_size)
X_mb = Train_X[idx_mb,:]
T_mb = np.reshape(Train_T[idx_mb], [mb_size,1])
Y_mb = np.reshape(Train_Y[idx_mb], [mb_size,1])
_, G_loss_curr, Tilde_curr = sess.run([G_solver, G_loss, Tilde], feed_dict = {X: X_mb, T: T_mb, Y: Y_mb})
#%% Testing
if it % 100 == 0:
print('Iter: {}'.format(it))
print('D_loss: {:.4}'.format((D_loss_curr)))
print('G_loss: {:.4}'.format((G_loss_curr)))
print()
# Train I and ID
result = {}
for it in tqdm(range(num_iterations)):
idx_mb = sample_X(Train_X, mb_size)
X_mb = Train_X[idx_mb,:]
T_mb = np.reshape(Train_T[idx_mb], [mb_size,1])
Y_mb = np.reshape(Train_Y[idx_mb], [mb_size,1])
_, I_loss_curr = sess.run([I_solver, I_loss], feed_dict = {X: X_mb, T: T_mb, Y: Y_mb})
#%% Testing
if it % 100 == 0:
result = {
'alpha': alpha,
'kk': num_kk
}
if Test_T is not None:
Hat_curr = sess.run([Hat], feed_dict = {X: Test_X})[0]
[R_Pol_Out, B] = Perf_RPol_ATT(Test_T, Test_Y, Hat_curr)
print('Iter: {}'.format(it))
print('I_loss: {:.4}'.format((I_loss_curr)))
print('R_Pol_Out: {:.4}'.format(R_Pol_Out))
print('')
result['R_Pol_Out'] = float(R_Pol_Out)
else:
New_X_mb = Test_X
Y_T_mb = Test_Y
Loss1_curr, Loss2_curr, Hat_curr = sess.run([Loss1, Loss2, Hat], feed_dict = {X: New_X_mb, Y_T: Y_T_mb})
print('Iter: {}'.format(it))
print('I_loss: {:.4}'.format((I_loss_curr)))
print('Loss_PEHE_Out: {:.4}'.format(np.sqrt(Loss1_curr)))
print('Loss_ATE_Out: {:.4}'.format(Loss2_curr))
print('')
result['Loss_PEHE_Out'] = float(np.sqrt(Loss1_curr))
result['Loss_ATE_Out'] = float(Loss2_curr)
with open(fn_json, "w") as fp:
json.dump(result, fp)
if fn_csv is not None:
Hat_curr = sess.run([Hat], feed_dict = {X: Test_X})[0]
if Test_T is not None:
[R_Pol_Out, B] = Perf_RPol_ATT(Test_T, Test_Y, Hat_curr)
df = pd.DataFrame(Hat_curr, columns=['A', 'B'])
df.to_csv(fn_csv, index=False)
odir = os.path.dirname(fn_csv)
df_test_X = pd.DataFrame(Test_X)
df_test_X.to_csv('{}/testx.csv'.format(odir), index=False)
df_test_Y = pd.DataFrame(Test_Y)
df_test_Y.to_csv('{}/testy.csv'.format(odir), index=False)
if Test_T is not None:
df_test_T = pd.DataFrame(Test_T)
fn_test1 = '{}/testt.csv'.format(odir)
df_test_T.to_csv(fn_test1, index=False)
| [
"[email protected]"
] | |
f8184270f36e3f165d97bbb247f6f0b508fc5810 | ba7d84b4b85be8c3221468527757e264e64616b9 | /tests/hammytest.py | b5f03afc22f1e60ade3aca0eb505d0bf88fd3fe8 | [] | no_license | gomesr/timetracker | c18eb4b6f33e08eadd72971216b16560ef085aa1 | ce57a0791727a3b06e4b167fbeb3cb3e558ff2f1 | refs/heads/master | 2021-01-22T23:58:20.247393 | 2010-12-12T01:16:54 | 2010-12-12T01:16:54 | 1,130,286 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 675 | py |
import unittest
from trackers.hammy import HamsterTracker
from hamster import client
class HammyTest(unittest.TestCase):
def setUp(self):
self.tracker = HamsterTracker()
def test_create_100_activites(self):
tags = []
ids = []
try:
for i in range(1,100):
ids.append(self.tracker.start("activity-%d" % i,
"",
"some elaborate desciption",
tags))
finally:
# clean up!
for id in ids:
self.tracker.storage.remove_fact(id)
| [
"[email protected]"
] | |
dd1953d6927d29066068ea81328364dee75a86e6 | bbf1ae079309eca11270422d3f0d259d1515d430 | /numerical-tours/python/todo/solutions/wavelet_2_haar2d.py | 7ec8c89d23ba2108e274a13521844d6ad479f593 | [
"BSD-2-Clause"
] | permissive | ZichaoDi/Di_MATLABTool | 5e6a67b613c4bcf4d904ddc47c2744b4bcea4885 | c071291c63685c236f507b2cb893c0316ab6415c | refs/heads/master | 2021-08-11T07:28:34.286526 | 2021-08-04T18:26:46 | 2021-08-04T18:26:46 | 149,222,333 | 9 | 5 | null | null | null | null | UTF-8 | Python | false | false | 2,522 | py | def exo1():
"""
Implement a full wavelet transform that extract iteratively wavelet
coefficients, by repeating these steps. Take care of choosing the
correct number of steps.
"""
Jmin = 0
fw = f
for j in J: -1: Jmin:
fw(1: 2^(j + 1), 1: 2^(j + 1)) = haar(fw(1: 2^(j + 1), 1: 2^(j + 1)))
%
j1 = J-j
if j1 <4
A = fw(1: 2^(j + 1), 1: 2^(j + 1))
imageplot(A(1: 2^j, 2^j + 1: 2^(j + 1)), ['Horizontal, j = ' num2str(j)], 3, 4, j1 + 1)
imageplot(A(2^j + 1: 2^(j + 1), 1: 2^j), ['Vertical, j = ' num2str(j)], 3, 4, j1 + 5)
imageplot(A(2^j + 1: 2^(j + 1), 2^j + 1: 2^(j + 1)), ['Diagonal, j = ' num2str(j)], 3, 4, j1 + 9)
def exo2():
"""
Write the inverse wavelet transform that computes $f_1$ from
coefficients |fW|.
"""
f1 = fw
for j in Jmin: J:
s = 1: 2^j; t = 2^j + 1: 2^(j + 1); u = 1: 2^(j + 1)
f1(u, u) = ihaar(f1(s, s), f1(s, t), f1(t, s), f1(t, t))
%
j1 = J-j
if j1 >0 & j1 <5
A = f1(1: 2^(j + 1), 1: 2^(j + 1))
subplot(2, 2, j1)
imageplot(A, ['Partial reconstruction, j = ' num2str(j)])
def exo3():
"""
Display the reconstructed signal obtained from |fw1|, for a decreasing cut-off scale $j$.
"""
jlist = J-(1: 4)
fw = perform_haar_transf(f, 1, + 1)
for i in 1: length(jlist):
j = jlist(i)
fw1 = zeros(n); fw1(1: 2^j, 1: 2^j) = fw(1: 2^j, 1: 2^j)
f1 = perform_haar_transf(fw1, 1, -1)
% display
subplot(2, 2, i)
imageplot(f1)
title(strcat(['j = ' num2str(j) ', SNR = ' num2str(snr(f, f1), 3) 'dB']))
def exo4():
"""
Find the threshold $T$ so that the number of remaining coefficients in
|fwT| is a fixed number $m$. Use this threshold to compute |fwT| and then display
the corresponding approximation $f_1$ of $f$. Try for an increasing number $m$ of coeffiients.
"""
m_list = round([.005 .01 .05 .1]*N); % number of kept coefficients
fw = perform_haar_transf(f, 1, + 1)
for i in 1: length(m_list):
m = m_list(i)
% select threshold
v = sort(abs(fw(: )))
if v(1) <v(N)
v = reverse(v)
T = v(m)
fwT = fw .* (abs(fw) >= T)
% inverse
f1 = perform_haar_transf(fwT, 1, -1)
% display
subplot(2, 2, i)
imageplot(f1)
title(strcat(['m = ' num2str(m) ', SNR = ' num2str(snr(f, f1), 3) 'dB']))
| [
"[email protected]"
] | |
286cc8c250f2c2b4030ffc5e75d7d1213b47a934 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_yens.py | f7c90d82f8fc7ae9864e4492c2449f9c31d5b2f4 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 217 | py |
from xai.brain.wordbase.nouns._yen import _YEN
#calss header
class _YENS(_YEN, ):
def __init__(self,):
_YEN.__init__(self)
self.name = "YENS"
self.specie = 'nouns'
self.basic = "yen"
self.jsondata = {}
| [
"[email protected]"
] | |
a31d0693760097d9ec0bfc62e4a5c4d7383c09ab | 378b200007c5d3633572b61eb3dd2180748086b7 | /chefsBackEnd/chefsBackEnd/asgi.py | d077d3550da2054b45a48c64401ec50a84113e40 | [] | no_license | jgartsu12/chefs-table-backend | 4163c2c9a2bb586d4432c332238682bf282ef967 | 71611cf17aa457f8bc9a7ec7d853c570062d22fb | refs/heads/master | 2022-12-16T04:22:30.954831 | 2020-07-08T19:24:37 | 2020-07-08T19:24:37 | 251,097,796 | 1 | 0 | null | 2022-12-08T10:13:44 | 2020-03-29T17:59:15 | Python | UTF-8 | Python | false | false | 401 | py | """
ASGI config for chefsBackEnd project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'chefsBackEnd.settings')
application = get_asgi_application()
| [
"[email protected]"
] | |
764c228e5a8b115f7ca60c1480fdff36b20ab047 | 8a3726abfc9cb72d8ccf7d32b18edabf8d16b630 | /18/a.py | 32847a4eb7fdc71ad694396872b27a628860cf2a | [] | no_license | alex-stephens/aoc2015 | 48a46efc1a888ea2d451a5938fc404d26e96e1a0 | ccc1c85f8da7a0585003b2e4f99f3f1def35ec0b | refs/heads/master | 2023-02-05T23:02:19.148138 | 2020-12-27T19:16:47 | 2020-12-27T19:16:47 | 324,579,165 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,065 | py | grid = [list(line.strip()) for line in open('input.txt').readlines()]
rows, cols = len(grid), len(grid[0])
def count_neighbours(i, j):
rmin, rmax = max(i-1, 0), min(i+1, rows-1)
cmin, cmax = max(j-1, 0), min(j+1, cols-1)
ans = 0
for r in range(rmin, rmax+1):
for c in range(cmin, cmax+1):
if (r,c) == (i,j):
continue
ans += 1 if grid[r][c] == '#' else 0
return ans
it = 100
for i in range(it):
new_grid = [['x' for _ in range(cols)] for _ in range(rows)]
for r in range(rows):
for c in range(cols):
count = count_neighbours(r,c)
if grid[r][c] == '#' and (count != 2 and count != 3):
new_grid[r][c] = '.'
elif grid[r][c] == '.' and count == 3:
new_grid[r][c] = '#'
else:
new_grid[r][c] = grid[r][c]
grid = [list(x) for x in new_grid]
# print('--------------------------')
# for g in grid:
# print(''.join(g))
print(sum([''.join(r).count('#') for r in grid])) | [
"[email protected]"
] | |
05c5693d3b24a5c3fd147316f1f2cfeaba19014b | 5c39f5ac529e9f292ba0e4965fd684d4c6eefe8a | /migrations/0001_initial.py | 8570a25dfd79013e6c9c3202871e7bdc877c28d4 | [] | no_license | joshianshul2/csv_db | 6d24dec8bdcd8f00115a8729d5036beb47994d0e | e7215002c0a2fb8cadd0d4087b8651b1ec9e30ea | refs/heads/master | 2023-04-21T19:46:56.941399 | 2021-05-11T17:29:38 | 2021-05-11T17:29:38 | 356,846,462 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,962 | py | # Generated by Django 3.2 on 2021-04-07 05:30
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='AvgMaster',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('county', models.CharField(max_length=255)),
('state', models.CharField(max_length=255)),
('NetPrAr', models.FloatField(default=0.0)),
('Rate', models.FloatField()),
('UserPercentage', models.FloatField(default=0.0)),
('FinaleValue', models.FloatField(default=0.0)),
('accountId', models.BigIntegerField()),
('acres', models.FloatField()),
('adTargetingCountyId', models.BigIntegerField()),
('address', models.CharField(max_length=255)),
('baths', models.BigIntegerField()),
('beds', models.BigIntegerField()),
('brokerCompany', models.CharField(max_length=255)),
('brokerName', models.CharField(max_length=255)),
('Url', models.URLField(max_length=255)),
('city', models.CharField(max_length=255)),
('cityID', models.BigIntegerField()),
('companyLogoDocumentId', models.BigIntegerField()),
('countyId', models.BigIntegerField()),
('description', models.TextField(max_length=255)),
('hasHouse', models.BooleanField()),
('hasVideo', models.BooleanField()),
('hasVirtualTour', models.BigIntegerField()),
('imageCount', models.BigIntegerField()),
('imageAltTextDisplay', models.CharField(max_length=255)),
('isHeadlineAd', models.BooleanField()),
('lwPropertyId', models.BigIntegerField()),
('isALC', models.BigIntegerField()),
('latitude', models.FloatField()),
('longitude', models.FloatField()),
('price', models.FloatField()),
('types', models.TextField(max_length=255)),
('status', models.CharField(max_length=20)),
('status1', models.CharField(max_length=255)),
('zip', models.BigIntegerField()),
('Descrpt', models.TextField(default='!', max_length=255)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='PropertyMaster',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('accountId', models.BigIntegerField()),
('acres', models.FloatField()),
('adTargetingCountyId', models.BigIntegerField()),
('address', models.CharField(max_length=255)),
('baths', models.BigIntegerField()),
('beds', models.BigIntegerField()),
('brokerCompany', models.CharField(max_length=255)),
('brokerName', models.CharField(max_length=255)),
('Url', models.URLField(max_length=255)),
('city', models.CharField(max_length=255)),
('cityID', models.BigIntegerField()),
('companyLogoDocumentId', models.BigIntegerField()),
('county', models.CharField(max_length=255)),
('countyId', models.BigIntegerField()),
('description', models.TextField(max_length=255)),
('hasHouse', models.BooleanField()),
('hasVideo', models.BooleanField()),
('hasVirtualTour', models.BigIntegerField()),
('imageCount', models.BigIntegerField()),
('imageAltTextDisplay', models.CharField(max_length=255)),
('isHeadlineAd', models.BooleanField()),
('lwPropertyId', models.BigIntegerField()),
('isALC', models.BigIntegerField()),
('latitude', models.FloatField()),
('longitude', models.FloatField()),
('price', models.FloatField()),
('types', models.TextField(max_length=255)),
('state', models.CharField(max_length=255)),
('status', models.CharField(max_length=20)),
('status1', models.CharField(max_length=255)),
('zip', models.BigIntegerField()),
('Rate', models.FloatField()),
('NetPrAr', models.FloatField(default=0.0)),
('Descrpt', models.TextField(default='!', max_length=255)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='StatusMaster',
fields=[
('status', models.IntegerField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=255)),
],
),
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=255)),
('last_name', models.CharField(max_length=255)),
('email', models.CharField(max_length=255)),
('password', models.CharField(max_length=255)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
),
]
| [
"[email protected]"
] | |
848356b8842293f30d58db6ccfc7eb592c6638f8 | 7ba131dadd14574d0dc463eac483b48e505d9a67 | /itchatmp/controllers/mpapi/mp/messages.py | 23161a6eb430de2c24c5073f8d647048adbe49da | [
"MIT"
] | permissive | sysuzyq/itchatmp | a40e615016c43c12c989a0a0257069ead042464c | be235d023b6a55123706d56cd0d149e4271b2c0c | refs/heads/master | 2021-01-19T23:59:34.792413 | 2017-03-15T06:54:52 | 2017-03-15T06:54:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,458 | py | ''' This package is for mass texting in wechat mp
1. What can we send?
- IMAGE, VOICE, VIDEO, TEXT, NEWS, CARD
2. How to send them?
- we use send_some / send_all method
`send_some(targetIdList, msgType, mediaId, additionalDict)`
- for msg like text and card, just pass content as msgId
- for files like image, voice, video, we need to upload them first
`upload(fileType, openedFile, additionalDict, permanent)`
- for news, you need to form them first and upload to get msgId
`create_news(newsDict, permanent)`
for images used in news, you need to turn them into url first
`get_image_url(openedFile)`
- SPECIAL WARNING: video is a little bit **STRANGE**
when uploading or sending, you need to pass additionalDict to method
`{"title" :VIDEO_TITLE, "introduction" :INTRODUCTION}`
3. I alse listed API list for you:
- SENDING
send_some
send_all
preview
- MSG MANAGING
delete
get
- TEMP MATERIAL MANAGING
upload
download
- PERMENENT MATERIAL MANAGING
get_material
delete_material
get_materialcount
batchget_material
- FORM NEWS
create_news
update_news
get_image_url
'''
import logging, json, os, mimetypes, io, re
from ..requests import requests
from .common import access_token
from itchatmp.utils import retry, encode_send_dict
from itchatmp.config import SERVER_URL
from itchatmp.content import (
IMAGE, VOICE, VIDEO, THUMB, TEXT, NEWS, CARD)
from itchatmp.returnvalues import ReturnValue
logger = logging.getLogger('itchatmp')
@access_token
def send_some(msgType, mediaId, additionalDict={},
targetIdList=[], partyIdList=[], tagIdList=[],
agentId=None, accessToken=None):
msgDict = __form_send_dict(msgType, mediaId, additionalDict)
if not msgDict: return msgDict
if not isinstance(targetIdList, list) or len(targetIdList) < 2:
return ReturnValue({'errcode': 40130})
msgDict['touser'] = targetIdList
r = requests.post('%s/cgi-bin/message/mass/send?access_token=%s' %
(SERVER_URL, accessToken), data=encode_send_dict(msgDict))
def _wrap_result(result):
return ReturnValue(result.json())
r._wrap_result = _wrap_result
return r
@access_token
def send_all(msgType, mediaId, additionalDict={},
tagId=None, agentId=None, accessToken=None):
msgDict = __form_send_dict(msgType, mediaId, additionalDict)
if not msgDict: return msgDict
if tagId is None:
msgDict['filter'] = {'is_to_all': True, 'tag_id': 0}
else:
msgDict['filter'] = {'is_to_all': False, 'tag_id': tagId}
r = requests.post('%s/cgi-bin/message/mass/sendall?access_token=%s' %
(SERVER_URL, accessToken), data=encode_send_dict(msgDict))
def _wrap_result(result):
result = ReturnValue(result.json())
if 'media_id' in result: result['errcode'] = 0
return result
r._wrap_result = _wrap_result
return r
@access_token
def preview(msgType, mediaId, additionalDict={},
toUserId=None, toWxAccount=None, accessToken=None):
msgDict = __form_send_dict(msgType, mediaId, additionalDict)
if not msgDict: return msgDict
if (toUserId or toWxAccount) is None:
return ReturnValue({'errcode': -10003})
else:
if toUserId is not None: msgDict['touser'] = toUserId
if toWxAccount is not None: msgDict['towxname'] = toWxAccount
r = requests.post('%s/cgi-bin/message/mass/preview?access_token=%s' %
(SERVER_URL, accessToken), data=encode_send_dict(msgDict))
def _wrap_result(result):
return ReturnValue(result.json())
r._wrap_result = _wrap_result
return r
@access_token
def form_video_id(mediaId, additionalDict, accessToken=None):
''' in theory video needs another method to get media_id for sending '''
additionalDict['media_id'] = mediaId
additionalDict['description'] = additionalDict['introduction']
# requests.packages.urllib3.disable_warnings()
url = 'https://file.api.weixin.qq.com/cgi-bin/media/uploadvideo' \
'?access_token=%s' % accessToken
r = requests.post(url, data=encode_send_dict(additionalDict))
# verify=False).json()
# I don't know why this is a fake ssl
def _wrap_result(result):
result = ReturnValue(result.json())
if 'media_id' in result:
result['errcode'] = 0
return result
r._wrap_result = _wrap_result
return r
def __form_send_dict(msgType, mediaId, additionalDict):
if not msgType in (IMAGE, VOICE, VIDEO, TEXT, NEWS, CARD):
return ReturnValue({'errcode': 40004,})
elif msgType == VIDEO:
mediaId = form_video_id(mediaId, additionalDict)['media_id']
if not mediaId: return mediaId
return {
NEWS: {'mpnews':{'media_id': mediaId}, 'msgtype': 'mpnews'},
TEXT: {'text': {'content': mediaId}, 'msgtype': 'text'},
VOICE: {'voice': {'media_id': mediaId}, 'msgtype': 'voice'},
IMAGE: {'image': {'media_id': mediaId}, 'msgtype': 'image'},
VIDEO: {'mpvideo':{'media_id': mediaId,
'title': additionalDict.get('title', ''),
'description': additionalDict.get('introduction', '')},
'msgtype': 'mpvideo'},
CARD: {'wxcard': {'card_id': mediaId}, 'msgtype': 'wxcard'},
}[msgType]
@access_token
def delete(msgId, accessToken=None):
r = requests.post('%s/cgi-bin/message/mass/delete?access_token=%s' %
(SERVER_URL, accessToken), data={'msg_id': msgId})
def _wrap_result(result):
return ReturnValue(result.json())
r._wrap_result = _wrap_result
return r
@access_token
def get(msgId, accessToken=None):
r = requests.post('%s/cgi-bin/message/mass/get?access_token=%s' %
(SERVER_URL, accessToken), data={'msg_id': int(msgId)})
def _wrap_result(result):
return ReturnValue(result.json())
r._wrap_result = _wrap_result
return r
@access_token
def upload(fileType, fileDir, additionalDict={}, permanent=False, accessToken=None):
if additionalDict: # format additionalDict
for key in ('description',):
if key in additionalDict and isinstance(additionalDict[key], dict):
for k, v in additionalDict[key].items():
if k not in additionalDict:
additionalDict[k] = v
additionalDict = {k.lower().replace('_', ''): v
for k, v in additionalDict.items()}
if 'introduction' in additionalDict:
additionalDict['description'] = additionalDict['introduction']
if not fileType in (IMAGE, VOICE, VIDEO, THUMB):
return ReturnValue({'errcode': 40004,})
elif fileType == VIDEO and permanent and not ('title' in additionalDict
and 'description' in additionalDict):
return ReturnValue({'errcode': -10003, 'errmsg':
'additionalDict for type VIDEO should be: ' +
"{'Title' : 'title', 'Description' :'des'}"})
try:
with open(fileDir, 'rb') as f:
file_ = f.read()
except:
return ReturnValue({'errcode': -10004,})
fileName = 'file' + os.path.splitext(fileDir)[1]
if hasattr(fileName, 'decode'):
fileName = fileName.decode('utf8', 'replace')
fileMime = mimetypes.guess_type(fileName)[0] or 'application/octet-stream'
if permanent:
url = '%s/cgi-bin/material/add_material?access_token=%s&type=%s'
else:
url = '%s/cgi-bin/media/upload?access_token=%s&type=%s'
files = {'media': (fileName, file_, fileMime), }
if fileType == VIDEO and permanent:
files['description'] = (None, encode_send_dict({
'title': additionalDict['title'],
'introduction': additionalDict['description'], }
), 'application/json')
r = requests.post(url % (SERVER_URL, accessToken, fileType),
files=files)
def _wrap_result(result):
result = ReturnValue(result.json())
if 'media_id' in result:
result['errcode'] = 0
else:
for k in result:
if 'media_id' in k:
result['media_id'] = result[k]
result['errcode'] = 0
break
return result
r._wrap_result = _wrap_result
return r
@access_token
def download(mediaId, accessToken=None):
r = requests.get('%s/cgi-bin/media/get?access_token=%s&media_id=%s' %
(SERVER_URL, accessToken, mediaId), stream=True)
def _wrap_result(result):
if 'text/plain' in result.headers['Content-Type']:
j = result.json()
if 'down_url' in j or 'news_item' in j:
j['errcode'] = 0
return ReturnValue(j)
else:
tempStorage = io.BytesIO()
for block in result.iter_content(1024):
tempStorage.write(block)
basicDict = {'File': tempStorage, 'errcode': 0}
if 'Content-disposition' in result.headers:
match = re.search('filename="(.*?)"', result.headers['Content-disposition'])
if match:
basicDict['FileName'] = match.group(1)
if 'Content-Type' in result.headers:
basicDict['ContentType'] = result.headers['Content-Type']
if 'Content-Length' in result.headers:
basicDict['ContentLength'] = result.headers['Content-Length']
return ReturnValue(basicDict)
r._wrap_result = _wrap_result
return r
@access_token
def get_material(mediaId, accessToken=None):
data = {'media_id': mediaId}
data = encode_send_dict(data)
if data is None: return ReturnValue({'errcode': -10001})
r = requests.post('%s/cgi-bin/material/get_material?access_token=%s' %
(SERVER_URL, accessToken), data=data, stream=True)
def _wrap_result(result):
if 'text/plain' in result.headers['Content-Type']:
j = result.json()
if 'down_url' in j or 'news_item' in j:
j['errcode'] = 0
return ReturnValue(j)
else:
tempStorage = io.BytesIO()
for block in result.iter_content(1024):
tempStorage.write(block)
basicDict = {'File': tempStorage, 'errcode': 0}
if 'Content-disposition' in result.headers:
match = re.search('filename="(.*?)"', result.headers['Content-disposition'])
if match:
basicDict['FileName'] = match.group(1)
if 'Content-Type' in result.headers:
basicDict['ContentType'] = result.headers['Content-Type']
if 'Content-Length' in result.headers:
basicDict['ContentLength'] = result.headers['Content-Length']
return ReturnValue(basicDict)
r._wrap_result = _wrap_result
return r
@access_token
def delete_material(mediaId, accessToken=None):
r = requests.post('%s/cgi-bin/material/del_material?access_token=%s' %
(SERVER_URL, accessToken), data={'msg_id': mediaId})
def _wrap_result(result):
return ReturnValue(result.json())
r._wrap_result = _wrap_result
return r
@access_token
def get_material_count(accessToken=None):
r = requests.get('%s/cgi-bin/material/get_materialcount?access_token=%s'
% (SERVER_URL, accessToken))
def _wrap_result(result):
result = ReturnValue(result.json())
if 'voice_count' in result:
result['errcode'] = 0
return result
r._wrap_result = _wrap_result
return r
@access_token
def batchget_material(fileType, offset=0, count=20, accessToken=None):
if not fileType in (IMAGE, VOICE, VIDEO, THUMB):
return ReturnValue({'errcode': 40004,})
if 20 < count: count = 20
data = {'type': fileType,
'offset': offset,
'count': count, }
data = encode_send_dict(data)
if data is None: return ReturnValue({'errcode': -10001})
r = requests.post('%s/cgi-bin/material/batchget_material?access_token=%s'
% (SERVER_URL, accessToken), data=data)
def _wrap_result(result):
result = ReturnValue(result.json())
if 'total_count' in result: result['errcode'] = 0
return result
r._wrap_result = _wrap_result
return r
@access_token
def create_news(newsDict, permanent=False, accessToken=None):
if permanent:
url = '%s/cgi-bin/material/add_news?access_token=%s'
else:
url = '%s/cgi-bin/media/uploadnews?access_token=%s'
r = requests.post(url % (SERVER_URL, accessToken),
data=encode_send_dict(newsDict))
def _wrap_result(result):
result = ReturnValue(result.json())
if 'media_id' in result: result['errcode'] = 0
return result
r._wrap_result = _wrap_result
return r
@access_token
def update_news(mediaId, newsDict, index=0, accessToken=None):
data = {
'media_id': mediaId,
'index': index,
'articles': newsDict, }
data = encode_send_dict(data)
if data is None: return ReturnValue({'errcode': -10001})
r = requests.post('%s/cgi-bin/material/update_news?access_token=%s' %
(SERVER_URL, accessToken), data=data)
def _wrap_result(result):
return ReturnValue(result.json())
r._wrap_result = _wrap_result
return r
@access_token
def get_image_url(openedFile, accessToken=None):
r = requests.post('%s/cgi-bin/media/uploadimg?access_token=%s' %
(SERVER_URL, accessToken), files={'file': openedFile})
def _wrap_result(result):
result = ReturnValue(result.json())
if 'url' in result: result['errcode'] = 0
return result
r._wrap_result = _wrap_result
return r
@access_token
def get_autoreply(accessToken=None):
r = requests.post('%s/cgi-bin/get_current_autoreply_info?access_token=%s' %
(SERVER_URL, accessToken))
def _wrap_result(result):
result = ReturnValue(result.json())
if 'is_autoreply_open' in result:
result['errcode'] = 0
return result
r._wrap_result = _wrap_result
return r
| [
"[email protected]"
] | |
545aac01abbebdbef94ce6fc238f142f7cc74f19 | c27c51f5c33e0431dbe7db6e18c21b249d476cfa | /OpenSource_Python_Code/nova-2013.2/nova/tests/keymgr/fake.py | 85e62e3dcf9076b4adb4dd85da8723f0fe4f1411 | [
"Apache-2.0"
] | permissive | bopopescu/Python_Stuff | 9bef74e0db17bb5e3ba2d908ced01ee744820d80 | 9aa94a0fa5e4e802090c7b29ec88b840e304d9e5 | refs/heads/master | 2022-11-20T06:54:36.581623 | 2017-12-04T18:56:02 | 2017-12-04T18:56:02 | 282,171,169 | 0 | 0 | null | 2020-07-24T08:54:37 | 2020-07-24T08:54:36 | null | UTF-8 | Python | false | false | 854 | py | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 Justin Santa Barbara
# Copyright 2012 OpenStack LLC
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Implementation of a fake key manager."""
from nova.keymgr import mock_key_mgr
def fake_api():
return mock_key_mgr.MockKeyManager()
| [
"[email protected]"
] | |
159e62cf42f265a5d96156ae23363dbeced3b8c0 | 1e53216c58f3c7843031721305590b83dbaed3f2 | /week_four/db_demo/db_app/migrations/0003_message_post_user_who_liked.py | 59fc1606c04688bdf72a3cafe91a74cffc27e608 | [] | no_license | MTaylorfullStack/python_july_20 | 991852ba12d6f06d6b93b8efc60b66ee311b5cb3 | bdfb0d9a74300f2d6743ac2d108571692ca43ad9 | refs/heads/master | 2022-12-12T18:03:00.886048 | 2020-08-27T23:53:31 | 2020-08-27T23:53:31 | 277,956,745 | 2 | 2 | null | 2023-06-30T20:06:11 | 2020-07-08T01:09:34 | Python | UTF-8 | Python | false | false | 425 | py | # Generated by Django 2.2 on 2020-07-29 00:53
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('db_app', '0002_message_post'),
]
operations = [
migrations.AddField(
model_name='message_post',
name='user_who_liked',
field=models.ManyToManyField(related_name='liked_post', to='db_app.User'),
),
]
| [
"[email protected]"
] | |
2ba794c5fbdf6b165029c3b20b7d4ae08486b115 | 4fd77ce692e10e962483c7e3e6e76c44887e9f52 | /geatpy/templates/soeas/GA/studGA/soea_psy_studGA_templet.py | 7cb191a9338b905bc256f6ecb2c43a2de4b72a72 | [
"MIT"
] | permissive | Passion-long/geatpy | d1aaf1622058473649840a9e2e26f9d0b0844bce | 8e2ab8730babaae640272bd4c77106519bdd120c | refs/heads/master | 2020-07-09T13:40:36.217907 | 2019-08-23T03:36:12 | 2019-08-23T03:36:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,750 | py | # -*- coding: utf-8 -*-
import numpy as np
import geatpy as ea # 导入geatpy库
from sys import path as paths
from os import path
paths.append(path.split(path.split(path.realpath(__file__))[0])[0])
class soea_psy_studGA_templet(ea.SoeaAlgorithm):
"""
soea_psy_studGA_templet.py - Polysomy Stud GA templet(多染色体种马遗传算法模板)
模板说明:
该模板是内置算法模板soea_studGA_templet的多染色体版本,
因此里面的种群对象为支持混合编码的多染色体种群类PsyPopulation类的对象。
算法描述:
本模板实现的是种马遗传算法。算法流程详见参考文献[1]。
模板使用注意:
本模板调用的目标函数形如:aimFunc(pop),
其中pop为种群类的对象,代表一个种群,
pop对象的Phen属性(即种群染色体的表现型)等价于种群所有个体的决策变量组成的矩阵,
该函数根据该Phen计算得到种群所有个体的目标函数值组成的矩阵,并将其赋值给pop对象的ObjV属性。
若有约束条件,则在计算违反约束程度矩阵CV后赋值给pop对象的CV属性(详见Geatpy数据结构)。
该函数不返回任何的返回值,求得的目标函数值保存在种群对象的ObjV属性中,
违反约束程度矩阵保存在种群对象的CV属性中。
例如:population为一个种群对象,则调用aimFunc(population)即可完成目标函数值的计算,
此时可通过population.ObjV得到求得的目标函数值,population.CV得到违反约束程度矩阵。
若不符合上述规范,则请修改算法模板或自定义新算法模板。
参考文献:
[1] Khatib W , Fleming P J . The stud GA: A mini revolution?[C]// International
Conference on Parallel Problem Solving from Nature. Springer, Berlin, Heidelberg, 1998.
"""
def __init__(self, problem, population):
ea.SoeaAlgorithm.__init__(self, problem, population) # 先调用父类构造方法
if str(type(population)) != "<class 'PsyPopulation.PsyPopulation'>":
raise RuntimeError('传入的种群对象必须为PsyPopulation类型')
self.name = 'psy-studGA'
self.problem = problem
self.population = population
self.selFunc = 'tour' # 锦标赛选择算子
# 由于有多个染色体,因此需要用多个重组和变异算子,于是对应有多个重组和变异概率
self.recFuncs = []
self.mutFuncs = []
self.pcs = []
self.pms = []
for i in range(population.ChromNum):
if population.Encodings[i] == 'P':
self.recFuncs.append('xovpmx') # 部分匹配交叉
self.mutFuncs.append('mutinv') # 染色体片段逆转变异
else:
self.recFuncs.append('xovdp') # 两点交叉
if population.Encodings[i] == 'BG':
self.mutFuncs.append('mutbin') # 二进制变异
elif population.Encodings[i] == 'RI':
self.mutFuncs.append('mutbga') # breeder GA中的变异算子
else:
raise RuntimeError('编码方式必须为''BG''、''RI''或''P''.')
self.pcs.append(1) # 重组概率
self.pms.append(1) # 整条染色体的变异概率
def run(self):
#==========================初始化配置===========================
population = self.population
NIND = population.sizes
self.initialization() # 初始化算法模板的一些动态参数
#===========================准备进化============================
population.initChrom(NIND) # 初始化种群染色体矩阵(内含染色体解码,详见PsyPopulation类的源码)
self.problem.aimFunc(population) # 计算种群的目标函数值
population.FitnV = ea.scaling(self.problem.maxormins * population.ObjV, population.CV) # 计算适应度
self.evalsNum = population.sizes # 记录评价次数
#===========================开始进化============================
while self.terminated(population) == False:
bestIdx = np.argmax(population.FitnV, axis = 0) # 得到当代的最优个体的索引, 设置axis=0可使得返回一个向量
studPop = population[np.tile(bestIdx, (NIND//2))] # 复制最优个体NIND//2份,组成一个“种马种群”
restPop = population[np.where(np.array(range(NIND)) != bestIdx)[0]] # 得到除去精英个体外其它个体组成的种群
# 选择个体,以便后面与种马种群进行交配
tempPop = restPop[ea.selecting(self.selFunc, restPop.FitnV, (NIND - studPop.sizes))]
# 将种马种群与选择出来的个体进行合并
population = studPop + tempPop
# 进行进化操作,分别对各种编码的染色体进行重组和变异
for i in range(population.ChromNum):
population.Chroms[i] = ea.recombin(self.recFuncs[i], population.Chroms[i], self.pcs[i]) # 重组
population.Chroms[i] = ea.mutate(self.mutFuncs[i], population.Encodings[i], population.Chroms[i], population.Fields[i], self.pms[i]) # 变异
# 求进化后个体的目标函数值
population.Phen = population.decoding() # 染色体解码
self.problem.aimFunc(population)
self.evalsNum += population.sizes # 更新评价次数
population.FitnV = ea.scaling(self.problem.maxormins * population.ObjV, population.CV) # 计算适应度
return self.finishing(population) # 调用finishing完成后续工作并返回结果
| [
"[email protected]"
] | |
280cab33335daf580bca95b971a5c093b1896c52 | 6a95112805b64322953429270a305d01fef3faea | /dist/weewx-4.10.0/bin/weewx/drivers/cc3000.py | 22f01d1c3bb42aab59083b7c30094abe338751ce | [
"GPL-1.0-or-later",
"GPL-3.0-only",
"Apache-2.0"
] | permissive | tomdotorg/docker-weewx | c6d59dc492a9e53f3bc898f7b9f593717092d72c | 7085654f455d39b06acc688738fde27e1f78ad1e | refs/heads/main | 2023-06-08T17:57:44.184399 | 2023-01-30T11:21:23 | 2023-01-30T11:21:23 | 54,113,384 | 21 | 16 | Apache-2.0 | 2022-10-19T23:46:26 | 2016-03-17T11:39:29 | Dockerfile | UTF-8 | Python | false | false | 64,470 | py | #!/usr/bin/env python
#
# Copyright 2014 Matthew Wall
# See the file LICENSE.txt for your rights.
"""Driver for CC3000 data logger
http://www.rainwise.com/products/attachments/6832/20110518125531.pdf
There are a few variants:
CC-3000_ - __
| |
| 41 = 418 MHz
| 42 = 433 MHz
| __ = 2.4 GHz (LR compatible)
R = serial (RS232, RS485)
_ = USB 2.0
The CC3000 communicates using FTDI USB serial bridge. The CC3000R has both
RS-232 and RS-485 serial ports, only one of which may be used at a time.
A long range (LR) version transmits up to 2 km using 2.4GHz.
The RS232 communicates using 115200 N-8-1
The instrument cluster contains a DIP switch controls with value 0-3 and a
default of 0. This setting prevents interference when there are multiple
weather stations within radio range.
The CC3000 includes a temperature sensor - that is the source of inTemp. The
manual indicates that the CC3000 should run for 3 or 4 hours before applying
any calibration to offset the heat generated by CC3000 electronics.
The CC3000 uses 4 AA batteries to maintain its clock. Use only rechargeable
NiMH batteries.
The logger contains 2MB of memory, with a capacity of 49834 records (over 11
months of data at a 10 minute logging interval). The exact capacity depends
on the sensors; the basic sensor record is 42 bytes.
The logger does not delete old records when it fills up; once the logger is
full, new data are lost. So the driver must periodically clear the logger
memory.
This driver does not support hardware record_generation. It does support
catchup on startup.
If you request many history records then interrupt the receive, the logger will
continue to send history records until it sends all that were requested. As a
result, any queries made while the logger is still sending will fail.
The rainwise rain bucket measures 0.01 inches per tip. The logger firmware
automatically converts the bucket tip count to the measure of rain in ENGLISH
or METRIC units.
The historical records (DOWNLOAD), as well as current readings (NOW) track
the amount of rain since midnight; i.e., DOWNLOAD records rain value resets to 0
at midnight and NOW records do the same.
The RAIN=? returns a rain counter that only resets with the RAIN=RESET command.
This counter isn't used by weewx. Also, RAIN=RESET doesn't just reset this
counter, it also resets the daily rain count.
Logger uses the following units:
ENGLISH METRIC
wind mph m/s
rain inch mm
pressure inHg mbar
temperature F C
The CC3000 has the habit of failing to execute about 1 in 6000
commands. That the bad news. The good news is that the
condition is easily detected and the driver can recover in about 1s.
The telltale sing of failure is the first read after sending
the command (to read the echo of the command) times out. As such,
the timeout is set to 1s. If the timeout is hit, the buffers
are flushed and the command is retried. Oh, and there is one
more pecurliar part to this. On the retry, the command is echoed
as an empty string. That empty string is expected on the retry
and execution continues.
weewx includes a logwatch script that makes it easy to see the above
behavior in action. In the snippet below, 3 NOW commands and one
IME=? were retried successfully. The Retry Info section shows
that all succeeded on the second try.
--------------------- weewx Begin ------------------------
average station clock skew: 0.0666250000000001
min: -0.53 max: 0.65 samples: 160
counts:
archive: records added 988
cc3000: NOW cmd echo timed out 3
cc3000: NOW echoed as empty string 3
cc3000: NOW successful retries 3
cc3000: TIME=? cmd echo timed out 1
cc3000: TIME=? echoed as empty string 1
cc3000: TIME=? successful retries 1
....
cc3000 Retry Info:
Dec 29 00:50:04 ella weewx[24145] INFO weewx.drivers.cc3000: TIME=?: Retry worked. Total tries: 2
Dec 29 04:46:21 ella weewx[24145] INFO weewx.drivers.cc3000: NOW: Retry worked. Total tries: 2
Dec 29 08:31:11 ella weewx[22295] INFO weewx.drivers.cc3000: NOW: Retry worked. Total tries: 2
Dec 29 08:50:51 ella weewx[22295] INFO weewx.drivers.cc3000: NOW: Retry worked. Total tries: 2
....
---------------------- weewx End -------------------------
Clearing memory on the CC3000 takes about 12s. As such, the 1s
timeout mentioned above won't work for this command. Consequently,
when executing MEM=CLEAR, the timeout is set to 20s. Should this
command fail, rather than losing 1 second retrying, 20 sexconds
will be lost.
The CC3000 very rarely stops returning observation values.
[Observed once in 28 months of operation over two devices.]
Operation returns to normal after the CC3000 is rebooted.
This driver now reboots when this situation is detected.
If this happens, the log will show:
INFO weewx.drivers.cc3000: No data from sensors, rebooting.
INFO weewx.drivers.cc3000: Back from a reboot:
INFO weewx.drivers.cc3000: ....................
INFO weewx.drivers.cc3000:
INFO weewx.drivers.cc3000: Rainwise CC-3000 Version: 1.3 Build 022 Dec 02 2016
INFO weewx.drivers.cc3000: Flash ID 202015
INFO weewx.drivers.cc3000: Initializing memory...OK.
This driver was tested with:
Rainwise CC-3000 Version: 1.3 Build 022 Dec 02 2016
Earlier versions of this driver were tested with:
Rainwise CC-3000 Version: 1.3 Build 006 Sep 04 2013
Rainwise CC-3000 Version: 1.3 Build 016 Aug 21 2014
"""
# FIXME: Come up with a way to deal with firmware inconsistencies. if we do
# a strict protocol where we wait for an OK response, but one version of
# the firmware responds whereas another version does not, this leads to
# comm problems. specializing the code to handle quirks of each
# firmware version is not desirable.
# UPDATE: As of 0.30, the driver does a flush of the serial buffer before
# doing any command. The problem detailed above (OK not being returned)
# was probably because the timeout was too short for the MEM=CLEAR
# command. That command gets a longer timeout in version 0.30.
# FIXME: Figure out why system log messages are lost. When reading from the logger
# there are many messages to the log that just do not show up, or msgs
# that appear in one run but not in a second, identical run. I suspect
# that system log cannot handle the load? or its buffer is not big enough?
# Update:
# With debug=0, this has never been observed in v1.3 Build 22 Dec 02 2016.
# With debug=1, tailing the log looks like everything is running, but no
# attempt was made to compuare log data between runs. Observations on
# NUC7i5 running Debian Buster.
from __future__ import with_statement
from __future__ import absolute_import
from __future__ import print_function
import datetime
import logging
import math
import serial
import string
import sys
import time
from six import byte2int
from six import PY2
from six.moves import input
import weeutil.weeutil
import weewx.drivers
import weewx.wxformulas
from weeutil.weeutil import to_int
from weewx.crc16 import crc16
log = logging.getLogger(__name__)
DRIVER_NAME = 'CC3000'
DRIVER_VERSION = '0.40'
def loader(config_dict, engine):
return CC3000Driver(**config_dict[DRIVER_NAME])
def configurator_loader(config_dict):
return CC3000Configurator()
def confeditor_loader():
return CC3000ConfEditor()
DEBUG_SERIAL = 0
DEBUG_CHECKSUM = 0
DEBUG_OPENCLOSE = 0
class ChecksumError(weewx.WeeWxIOError):
def __init__(self, msg):
weewx.WeeWxIOError.__init__(self, msg)
class ChecksumMismatch(ChecksumError):
def __init__(self, a, b, buf=None):
msg = "Checksum mismatch: 0x%04x != 0x%04x" % (a, b)
if buf is not None:
msg = "%s (%s)" % (msg, buf)
ChecksumError.__init__(self, msg)
class BadCRC(ChecksumError):
def __init__(self, a, b, buf=None):
msg = "Bad CRC: 0x%04x != '%s'" % (a, b)
if buf is not None:
msg = "%s (%s)" % (msg, buf)
ChecksumError.__init__(self, msg)
class CC3000Configurator(weewx.drivers.AbstractConfigurator):
def add_options(self, parser):
super(CC3000Configurator, self).add_options(parser)
parser.add_option("--info", dest="info", action="store_true",
help="display weather station configuration")
parser.add_option("--current", dest="current", action="store_true",
help="display current weather readings")
parser.add_option("--history", dest="nrecords", type=int, metavar="N",
help="display N records (0 for all records)")
parser.add_option("--history-since", dest="nminutes", metavar="N",
type=int, help="display records since N minutes ago")
parser.add_option("--clear-memory", dest="clear", action="store_true",
help="clear station memory")
parser.add_option("--get-header", dest="gethead", action="store_true",
help="display data header")
parser.add_option("--get-rain", dest="getrain", action="store_true",
help="get the rain counter")
parser.add_option("--reset-rain", dest="resetrain", action="store_true",
help="reset the rain counter")
parser.add_option("--get-max", dest="getmax", action="store_true",
help="get the max values observed")
parser.add_option("--reset-max", dest="resetmax", action="store_true",
help="reset the max counters")
parser.add_option("--get-min", dest="getmin", action="store_true",
help="get the min values observed")
parser.add_option("--reset-min", dest="resetmin", action="store_true",
help="reset the min counters")
parser.add_option("--get-clock", dest="getclock", action="store_true",
help="display station clock")
parser.add_option("--set-clock", dest="setclock", action="store_true",
help="set station clock to computer time")
parser.add_option("--get-interval", dest="getint", action="store_true",
help="display logger archive interval, in seconds")
parser.add_option("--set-interval", dest="interval", metavar="N",
type=int,
help="set logging interval to N seconds")
parser.add_option("--get-units", dest="getunits", action="store_true",
help="show units of logger")
parser.add_option("--set-units", dest="units", metavar="UNITS",
help="set units to METRIC or ENGLISH")
parser.add_option('--get-dst', dest='getdst', action='store_true',
help='display daylight savings settings')
parser.add_option('--set-dst', dest='setdst',
metavar='mm/dd HH:MM,mm/dd HH:MM,[MM]M',
help='set daylight savings start, end, and amount')
parser.add_option("--get-channel", dest="getch", action="store_true",
help="display the station channel")
parser.add_option("--set-channel", dest="ch", metavar="CHANNEL",
type=int,
help="set the station channel")
def do_options(self, options, parser, config_dict, prompt): # @UnusedVariable
self.driver = CC3000Driver(**config_dict[DRIVER_NAME])
if options.current:
print(self.driver.get_current())
elif options.nrecords is not None:
for r in self.driver.station.gen_records(options.nrecords):
print(r)
elif options.nminutes is not None:
since_ts = time.mktime((datetime.datetime.now()-datetime.timedelta(
minutes=options.nminutes)).timetuple())
for r in self.driver.gen_records_since_ts(since_ts):
print(r)
elif options.clear:
self.clear_memory(options.noprompt)
elif options.gethead:
print(self.driver.station.get_header())
elif options.getrain:
print(self.driver.station.get_rain())
elif options.resetrain:
self.reset_rain(options.noprompt)
elif options.getmax:
print(self.driver.station.get_max())
elif options.resetmax:
self.reset_max(options.noprompt)
elif options.getmin:
print(self.driver.station.get_min())
elif options.resetmin:
self.reset_min(options.noprompt)
elif options.getclock:
print(self.driver.station.get_time())
elif options.setclock:
self.set_clock(options.noprompt)
elif options.getdst:
print(self.driver.station.get_dst())
elif options.setdst:
self.set_dst(options.setdst, options.noprompt)
elif options.getint:
print(self.driver.station.get_interval() * 60)
elif options.interval is not None:
self.set_interval(options.interval / 60, options.noprompt)
elif options.getunits:
print(self.driver.station.get_units())
elif options.units is not None:
self.set_units(options.units, options.noprompt)
elif options.getch:
print(self.driver.station.get_channel())
elif options.ch is not None:
self.set_channel(options.ch, options.noprompt)
else:
print("Firmware:", self.driver.station.get_version())
print("Time:", self.driver.station.get_time())
print("DST:", self.driver.station.get_dst())
print("Units:", self.driver.station.get_units())
print("Memory:", self.driver.station.get_memory_status())
print("Interval:", self.driver.station.get_interval() * 60)
print("Channel:", self.driver.station.get_channel())
print("Charger:", self.driver.station.get_charger())
print("Baro:", self.driver.station.get_baro())
print("Rain:", self.driver.station.get_rain())
print("HEADER:", self.driver.station.get_header())
print("MAX:", self.driver.station.get_max())
print("MIN:", self.driver.station.get_min())
self.driver.closePort()
def clear_memory(self, noprompt):
print(self.driver.station.get_memory_status())
ans = weeutil.weeutil.y_or_n("Clear console memory (y/n)? ",
noprompt)
if ans == 'y':
print('Clearing memory (takes approx. 12s)')
self.driver.station.clear_memory()
print(self.driver.station.get_memory_status())
else:
print("Clear memory cancelled.")
def reset_rain(self, noprompt):
print(self.driver.station.get_rain())
ans = weeutil.weeutil.y_or_n("Reset rain counter (y/n)? ",
noprompt)
if ans == 'y':
print('Resetting rain counter')
self.driver.station.reset_rain()
print(self.driver.station.get_rain())
else:
print("Reset rain cancelled.")
def reset_max(self, noprompt):
print(self.driver.station.get_max())
ans = weeutil.weeutil.y_or_n("Reset max counters (y/n)? ",
noprompt)
if ans == 'y':
print('Resetting max counters')
self.driver.station.reset_max()
print(self.driver.station.get_max())
else:
print("Reset max cancelled.")
def reset_min(self, noprompt):
print(self.driver.station.get_min())
ans = weeutil.weeutil.y_or_n("Reset min counters (y/n)? ",
noprompt)
if ans == 'y':
print('Resetting min counters')
self.driver.station.reset_min()
print(self.driver.station.get_min())
else:
print("Reset min cancelled.")
def set_interval(self, interval, noprompt):
if interval < 0 or 60 < interval:
raise ValueError("Logger interval must be 0-60 minutes")
print("Interval is", self.driver.station.get_interval(), " minutes.")
ans = weeutil.weeutil.y_or_n("Set interval to %d minutes (y/n)? " % interval,
noprompt)
if ans == 'y':
print("Setting interval to %d minutes" % interval)
self.driver.station.set_interval(interval)
print("Interval is now", self.driver.station.get_interval())
else:
print("Set interval cancelled.")
def set_clock(self, noprompt):
print("Station clock is", self.driver.station.get_time())
print("Current time is", datetime.datetime.now())
ans = weeutil.weeutil.y_or_n("Set station time to current time (y/n)? ",
noprompt)
if ans == 'y':
print("Setting station clock to %s" % datetime.datetime.now())
self.driver.station.set_time()
print("Station clock is now", self.driver.station.get_time())
else:
print("Set clock cancelled.")
def set_units(self, units, noprompt):
if units.lower() not in ['metric', 'english']:
raise ValueError("Units must be METRIC or ENGLISH")
print("Station units is", self.driver.station.get_units())
ans = weeutil.weeutil.y_or_n("Set station units to %s (y/n)? " % units,
noprompt)
if ans == 'y':
print("Setting station units to %s" % units)
self.driver.station.set_units(units)
print("Station units is now", self.driver.station.get_units())
else:
print("Set units cancelled.")
def set_dst(self, dst, noprompt):
if dst != '0' and len(dst.split(',')) != 3:
raise ValueError("DST must be 0 (disabled) or start, stop, amount "
"with the format mm/dd HH:MM, mm/dd HH:MM, [MM]M")
print("Station DST is", self.driver.station.get_dst())
ans = weeutil.weeutil.y_or_n("Set station DST to %s (y/n)? " % dst,
noprompt)
if ans == 'y':
print("Setting station DST to %s" % dst)
self.driver.station.set_dst(dst)
print("Station DST is now", self.driver.station.get_dst())
else:
print("Set DST cancelled.")
def set_channel(self, ch, noprompt):
if ch not in [0, 1, 2, 3]:
raise ValueError("Channel must be one of 0, 1, 2, or 3")
print("Station channel is", self.driver.station.get_channel())
ans = weeutil.weeutil.y_or_n("Set station channel to %s (y/n)? " % ch,
noprompt)
if ans == 'y':
print("Setting station channel to %s" % ch)
self.driver.station.set_channel(ch)
print("Station channel is now", self.driver.station.get_channel())
else:
print("Set channel cancelled.")
class CC3000Driver(weewx.drivers.AbstractDevice):
"""weewx driver that communicates with a RainWise CC3000 data logger."""
# map rainwise names to database schema names
DEFAULT_SENSOR_MAP = {
'dateTime': 'TIMESTAMP',
'outTemp': 'TEMP OUT',
'outHumidity': 'HUMIDITY',
'windDir': 'WIND DIRECTION',
'windSpeed': 'WIND SPEED',
'windGust': 'WIND GUST',
'pressure': 'PRESSURE',
'inTemp': 'TEMP IN',
'extraTemp1': 'TEMP 1',
'extraTemp2': 'TEMP 2',
'day_rain_total': 'RAIN',
'supplyVoltage': 'STATION BATTERY',
'consBatteryVoltage': 'BATTERY BACKUP',
'radiation': 'SOLAR RADIATION',
'UV': 'UV INDEX',
}
def __init__(self, **stn_dict):
log.info('Driver version is %s' % DRIVER_VERSION)
global DEBUG_SERIAL
DEBUG_SERIAL = int(stn_dict.get('debug_serial', 0))
global DEBUG_CHECKSUM
DEBUG_CHECKSUM = int(stn_dict.get('debug_checksum', 0))
global DEBUG_OPENCLOSE
DEBUG_OPENCLOSE = int(stn_dict.get('debug_openclose', 0))
self.max_tries = int(stn_dict.get('max_tries', 5))
self.model = stn_dict.get('model', 'CC3000')
port = stn_dict.get('port', CC3000.DEFAULT_PORT)
log.info('Using serial port %s' % port)
self.polling_interval = float(stn_dict.get('polling_interval', 2))
log.info('Polling interval is %s seconds' % self.polling_interval)
self.use_station_time = weeutil.weeutil.to_bool(
stn_dict.get('use_station_time', True))
log.info('Using %s time for loop packets' %
('station' if self.use_station_time else 'computer'))
# start with the default sensormap, then augment with user-specified
self.sensor_map = dict(self.DEFAULT_SENSOR_MAP)
if 'sensor_map' in stn_dict:
self.sensor_map.update(stn_dict['sensor_map'])
log.info('Sensor map is %s' % self.sensor_map)
# periodically check the logger memory, then clear it if necessary.
# these track the last time a check was made, and how often to make
# the checks. threshold of None indicates do not clear logger.
self.logger_threshold = to_int(
stn_dict.get('logger_threshold', 0))
self.last_mem_check = 0
self.mem_interval = 7 * 24 * 3600
if self.logger_threshold != 0:
log.info('Clear logger at %s records' % self.logger_threshold)
# track the last rain counter value so we can determine deltas
self.last_rain = None
self.station = CC3000(port)
self.station.open()
# report the station configuration
settings = self._init_station_with_retries(self.station, self.max_tries)
log.info('Firmware: %s' % settings['firmware'])
self.arcint = settings['arcint']
log.info('Archive interval: %s' % self.arcint)
self.header = settings['header']
log.info('Header: %s' % self.header)
self.units = weewx.METRICWX if settings['units'] == 'METRIC' else weewx.US
log.info('Units: %s' % settings['units'])
log.info('Channel: %s' % settings['channel'])
log.info('Charger status: %s' % settings['charger'])
log.info('Memory: %s' % self.station.get_memory_status())
def time_to_next_poll(self):
now = time.time()
next_poll_event = int(now / self.polling_interval) * self.polling_interval + self.polling_interval
log.debug('now: %f, polling_interval: %d, next_poll_event: %f' % (now, self.polling_interval, next_poll_event))
secs_to_poll = next_poll_event - now
log.debug('Next polling event in %f seconds' % secs_to_poll)
return secs_to_poll
def genLoopPackets(self):
cmd_mode = True
if self.polling_interval == 0:
self.station.set_auto()
cmd_mode = False
reboot_attempted = False
ntries = 0
while ntries < self.max_tries:
ntries += 1
try:
# Poll on polling_interval boundaries.
if self.polling_interval != 0:
time.sleep(self.time_to_next_poll())
values = self.station.get_current_data(cmd_mode)
now = int(time.time())
ntries = 0
log.debug("Values: %s" % values)
if values:
packet = self._parse_current(
values, self.header, self.sensor_map)
log.debug("Parsed: %s" % packet)
if packet and 'dateTime' in packet:
if not self.use_station_time:
packet['dateTime'] = int(time.time() + 0.5)
packet['usUnits'] = self.units
if 'day_rain_total' in packet:
packet['rain'] = self._rain_total_to_delta(
packet['day_rain_total'], self.last_rain)
self.last_rain = packet['day_rain_total']
else:
log.debug("No rain in packet: %s" % packet)
log.debug("Packet: %s" % packet)
yield packet
else:
if not reboot_attempted:
# To be on the safe side, max of one reboot per execution.
reboot_attempted = True
log.info("No data from sensors, rebooting.")
startup_msgs = self.station.reboot()
log.info("Back from a reboot:")
for line in startup_msgs:
log.info(line)
# periodically check memory, clear if necessary
if time.time() - self.last_mem_check > self.mem_interval:
nrec = self.station.get_history_usage()
self.last_mem_check = time.time()
if nrec is None:
log.info("Memory check: Cannot determine memory usage")
else:
log.info("Logger is at %d records, "
"logger clearing threshold is %d" %
(nrec, self.logger_threshold))
if self.logger_threshold != 0 and nrec >= self.logger_threshold:
log.info("Clearing all records from logger")
self.station.clear_memory()
except (serial.serialutil.SerialException, weewx.WeeWxIOError) as e:
log.error("Failed attempt %d of %d to get data: %s" %
(ntries, self.max_tries, e))
else:
msg = "Max retries (%d) exceeded" % self.max_tries
log.error(msg)
raise weewx.RetriesExceeded(msg)
def genStartupRecords(self, since_ts):
"""Return archive records from the data logger. Download all records
then return the subset since the indicated timestamp.
Assumptions:
- the units are consistent for the entire history.
- the archive interval is constant for entire history.
- the HDR for archive records is the same as current HDR
"""
log.debug("GenStartupRecords: since_ts=%s" % since_ts)
log.info('Downloading new records (if any).')
last_rain = None
new_records = 0
for pkt in self.gen_records_since_ts(since_ts):
log.debug("Packet: %s" % pkt)
pkt['usUnits'] = self.units
pkt['interval'] = self.arcint
if 'day_rain_total' in pkt:
pkt['rain'] = self._rain_total_to_delta(
pkt['day_rain_total'], last_rain)
last_rain = pkt['day_rain_total']
else:
log.debug("No rain in record: %s" % r)
log.debug("Packet: %s" % pkt)
new_records += 1
yield pkt
log.info('Downloaded %d new records.' % new_records)
def gen_records_since_ts(self, since_ts):
return self.station.gen_records_since_ts(self.header, self.sensor_map, since_ts)
@property
def hardware_name(self):
return self.model
@property
def archive_interval(self):
return self.arcint
def getTime(self):
try:
v = self.station.get_time()
return _to_ts(v)
except ValueError as e:
log.error("getTime failed: %s" % e)
return 0
def setTime(self):
self.station.set_time()
@staticmethod
def _init_station_with_retries(station, max_tries):
for cnt in range(max_tries):
try:
return CC3000Driver._init_station(station)
except (serial.serialutil.SerialException, weewx.WeeWxIOError) as e:
log.error("Failed attempt %d of %d to initialize station: %s" %
(cnt + 1, max_tries, e))
else:
raise weewx.RetriesExceeded("Max retries (%d) exceeded while initializing station" % max_tries)
@staticmethod
def _init_station(station):
station.flush()
station.wakeup()
station.set_echo()
settings = dict()
settings['firmware'] = station.get_version()
settings['arcint'] = station.get_interval() * 60 # arcint is in seconds
settings['header'] = CC3000Driver._parse_header(station.get_header())
settings['units'] = station.get_units()
settings['channel'] = station.get_channel()
settings['charger'] = station.get_charger()
return settings
@staticmethod
def _rain_total_to_delta(rain_total, last_rain):
# calculate the rain delta between the current and previous rain totals.
return weewx.wxformulas.calculate_rain(rain_total, last_rain)
@staticmethod
def _parse_current(values, header, sensor_map):
return CC3000Driver._parse_values(values, header, sensor_map,
"%Y/%m/%d %H:%M:%S")
@staticmethod
def _parse_values(values, header, sensor_map, fmt):
"""parse the values and map them into the schema names. if there is
a failure for any one value, then the entire record fails."""
pkt = dict()
if len(values) != len(header) + 1:
log.info("Values/header mismatch: %s %s" % (values, header))
return pkt
for i, v in enumerate(values):
if i >= len(header):
continue
label = None
for m in sensor_map:
if sensor_map[m] == header[i]:
label = m
if label is None:
continue
try:
if header[i] == 'TIMESTAMP':
pkt[label] = _to_ts(v, fmt)
else:
pkt[label] = float(v)
except ValueError as e:
log.error("Parse failed for '%s' '%s': %s (idx=%s values=%s)" %
(header[i], v, e, i, values))
return dict()
return pkt
@staticmethod
def _parse_header(header):
h = []
for v in header:
if v == 'HDR' or v[0:1] == '!':
continue
h.append(v.replace('"', ''))
return h
def get_current(self):
data = self.station.get_current_data()
return self._parse_current(data, self.header, self.sensor_map)
def _to_ts(tstr, fmt="%Y/%m/%d %H:%M:%S"):
return time.mktime(time.strptime(tstr, fmt))
def _format_bytes(buf):
# byte2int not necessary in PY3 and will raise an exception
# if used ("int object is not subscriptable")
if PY2:
return ' '.join(['%0.2X' % byte2int(c) for c in buf])
return ' '.join(['%0.2X' % c for c in buf])
def _check_crc(buf):
idx = buf.find(b'!')
if idx < 0:
return
a = 0
b = 0
cs = b''
try:
cs = buf[idx+1:idx+5]
if DEBUG_CHECKSUM:
log.debug("Found checksum at %d: %s" % (idx, cs))
a = crc16(buf[0:idx]) # calculate checksum
if DEBUG_CHECKSUM:
log.debug("Calculated checksum %x" % a)
b = int(cs, 16) # checksum provided in data
if a != b:
raise ChecksumMismatch(a, b, buf)
except ValueError as e:
raise BadCRC(a, cs, buf)
class CC3000(object):
DEFAULT_PORT = '/dev/ttyUSB0'
def __init__(self, port):
self.port = port
self.baudrate = 115200
self.timeout = 1 # seconds for everyting except MEM=CLEAR
# MEM=CLEAR of even two records needs a timeout of 13 or more. 20 is probably safe.
# flush cmd echo value
# 0.000022 0.000037 12.819934 0.000084
# 0.000018 0.000036 12.852024 0.000088
self.mem_clear_timeout = 20 # reopen w/ bigger timeout for MEM=CLEAR
self.serial_port = None
def __enter__(self):
self.open()
return self
def __exit__(self, _, value, traceback):
self.close()
def open(self, timeoutOverride=None):
if DEBUG_OPENCLOSE:
log.debug("Open serial port %s" % self.port)
to = timeoutOverride if timeoutOverride is not None else self.timeout
self.serial_port = serial.Serial(self.port, self.baudrate,
timeout=to)
def close(self):
if self.serial_port is not None:
if DEBUG_OPENCLOSE:
log.debug("Close serial port %s" % self.port)
self.serial_port.close()
self.serial_port = None
def write(self, data):
if not PY2:
# Encode could perhaps fail on bad user input (DST?).
# If so, this will be handled later when it is observed that the
# command does not do what is expected.
data = data.encode('ascii', 'ignore')
if DEBUG_SERIAL:
log.debug("Write: '%s'" % data)
n = self.serial_port.write(data)
if n is not None and n != len(data):
raise weewx.WeeWxIOError("Write expected %d chars, sent %d" %
(len(data), n))
def read(self):
"""The station sends CR NL before and after any response. Some
responses have a 4-byte CRC checksum at the end, indicated with an
exclamation. Not every response has a checksum.
"""
data = self.serial_port.readline()
if DEBUG_SERIAL:
log.debug("Read: '%s' (%s)" % (data, _format_bytes(data)))
data = data.strip()
_check_crc(data)
if not PY2:
# CRC passed, so this is unlikely.
# Ignore as irregular data will be handled later.
data = data.decode('ascii', 'ignore')
return data
def flush(self):
self.flush_input()
self.flush_output()
def flush_input(self):
log.debug("Flush input buffer")
self.serial_port.flushInput()
def flush_output(self):
log.debug("Flush output buffer")
self.serial_port.flushOutput()
def queued_bytes(self):
return self.serial_port.inWaiting()
def send_cmd(self, cmd):
"""Any command must be terminated with a CR"""
self.write("%s\r" % cmd)
def command(self, cmd):
# Sample timings for first fifteen NOW commands after startup.
# Flush CMD ECHO VALUE
# -------- -------- -------- --------
# 0.000021 0.000054 0.041557 0.001364
# 0.000063 0.000109 0.040432 0.001666
# 0.000120 0.000123 0.024272 0.016871
# 0.000120 0.000127 0.025148 0.016657
# 0.000119 0.000126 0.024966 0.016665
# 0.000130 0.000142 0.041037 0.001791
# 0.000120 0.000126 0.023533 0.017023
# 0.000120 0.000137 0.024336 0.016747
# 0.000117 0.000133 0.026254 0.016684
# 0.000120 0.000140 0.025014 0.016739
# 0.000121 0.000134 0.024801 0.016779
# 0.000120 0.000141 0.024635 0.016906
# 0.000118 0.000129 0.024354 0.016894
# 0.000120 0.000133 0.024214 0.016861
# 0.000118 0.000122 0.024599 0.016865
# MEM=CLEAR needs a longer timeout. >12s to clear a small number of records has been observed.
# It also appears to be highly variable. The two examples below are from two different CC3000s.
#
# In this example, clearing at 11,595 records took > 6s.
# Aug 18 06:46:21 charlemagne weewx[684]: cc3000: logger is at 11595 records, logger clearing threshold is 10000
# Aug 18 06:46:21 charlemagne weewx[684]: cc3000: clearing all records from logger
# Aug 18 06:46:21 charlemagne weewx[684]: cc3000: MEM=CLEAR: The resetting of timeout to 20 took 0.000779 seconds.
# Aug 18 06:46:28 charlemagne weewx[684]: cc3000: MEM=CLEAR: times: 0.000016 0.000118 6.281638 0.000076
# Aug 18 06:46:28 charlemagne weewx[684]: cc3000: MEM=CLEAR: The resetting of timeout to 1 took 0.001444 seconds.
#
# In this example, clearing at 11,475 records took > 12s.
# Aug 18 07:17:14 ella weewx[615]: cc3000: logger is at 11475 records, logger clearing threshold is 10000
# Aug 18 07:17:14 ella weewx[615]: cc3000: clearing all records from logger
# Aug 18 07:17:14 ella weewx[615]: cc3000: MEM=CLEAR: The resetting of timeout to 20 took 0.001586 seconds.
# Aug 18 07:17:27 ella weewx[615]: cc3000: MEM=CLEAR: times: 0.000020 0.000058 12.459346 0.000092
# Aug 18 07:17:27 ella weewx[615]: cc3000: MEM=CLEAR: The resetting of timeout to 1 took 0.001755 seconds.
#
# Here, clearing 90 records took very close to 13 seconds.
# Aug 18 14:46:00 ella weewx[24602]: cc3000: logger is at 91 records, logger clearing threshold is 90
# Aug 18 14:46:00 ella weewx[24602]: cc3000: clearing all records from logger
# Aug 18 14:46:00 ella weewx[24602]: cc3000: MEM=CLEAR: The resetting of timeout to 20 took 0.000821 seconds.
# Aug 18 14:46:13 ella weewx[24602]: cc3000: MEM=CLEAR: times: 0.000037 0.000061 12.970494 0.000084
# Aug 18 14:46:13 ella weewx[24602]: cc3000: MEM=CLEAR: The resetting of timeout to 1 took 0.001416 seconds.
reset_timeout = False
# MEM=CLEAR needs a much larger timeout value. Reopen with that larger timeout and reset below.
#
# Closing and reopening with a different timeout is quick:
# Aug 18 07:17:14 ella weewx[615]: cc3000: MEM=CLEAR: The resetting of timeout to 20 took 0.001586 seconds.
# Aug 18 07:17:27 ella weewx[615]: cc3000: MEM=CLEAR: The resetting of timeout to 1 took 0.001755 seconds.
if cmd == 'MEM=CLEAR':
reset_timeout = True # Reopen with default timeout in finally.
t1 = time.time()
self.close()
self.open(self.mem_clear_timeout)
t2 = time.time()
close_open_time = t2 - t1
log.info("%s: The resetting of timeout to %d took %f seconds." % (cmd, self.mem_clear_timeout, close_open_time))
try:
return self.exec_cmd_with_retries(cmd)
finally:
if reset_timeout:
t1 = time.time()
self.close()
self.open()
reset_timeout = True
t2 = time.time()
close_open_time = t2 - t1
log.info("%s: The resetting of timeout to %d took %f seconds." % (cmd, self.timeout, close_open_time))
def exec_cmd_with_retries(self, cmd):
"""Send cmd. Time the reading of the echoed command. If the measured
time is >= timeout, the cc3000 is borked. The input and output buffers
will be flushed and the command retried. Try up to 10 times.
It practice, one retry does the trick.
cc3000s.
"""
attempts = 0
while attempts < 10:
attempts += 1
t1 = time.time()
self.flush() # flush
t2 = time.time()
flush_time = t2 - t1
self.send_cmd(cmd) # send cmd
t3 = time.time()
cmd_time = t3 - t2
data = self.read() # read the cmd echo
t4 = time.time()
echo_time = t4 - t3
if ((cmd != 'MEM=CLEAR' and echo_time >= self.timeout)
or (cmd == 'MEM=CLEAR' and echo_time >= self.mem_clear_timeout)):
# The command timed out reading back the echo of the command.
# No need to read the values as it will also time out.
# Log it and retry. In practice, the retry always works.
log.info("%s: times: %f %f %f -retrying-" %
(cmd, flush_time, cmd_time, echo_time))
log.info('%s: Reading cmd echo timed out (%f seconds), retrying.' %
(cmd, echo_time))
# Retrying setting the time must be special cased as now a little
# more than one second has passed. As such, redo the command
# with the current time.
if cmd.startswith("TIME=") and cmd != "TIME=?":
cmd = self._compose_set_time_command()
# Retry
else:
# Success, the reading of the echoed command did not time out.
break
if data != cmd and attempts > 1:
# After retrying, the cmd always echoes back as an empty string.
if data == '':
log.info("%s: Accepting empty string as cmd echo." % cmd)
else:
raise weewx.WeeWxIOError(
"command: Command failed: cmd='%s' reply='%s'" % (cmd, data))
t5 = time.time()
retval = self.read()
t6 = time.time()
value_time = t6 - t5
if cmd == 'MEM=CLEAR':
log.info("%s: times: %f %f %f %f" %
(cmd, flush_time, cmd_time, echo_time, value_time))
if attempts > 1:
if retval != '':
log.info("%s: Retry worked. Total tries: %d" % (cmd, attempts))
else:
log.info("%s: Retry failed." % cmd)
log.info("%s: times: %f %f %f %f" %
(cmd, flush_time, cmd_time, echo_time, value_time))
return retval
def get_version(self):
log.debug("Get firmware version")
return self.command("VERSION")
def reboot(self):
# Reboot outputs the following (after the reboot):
# ....................
# <blank line>
# Rainwise CC-3000 Version: 1.3 Build 022 Dec 02 2016
# Flash ID 202015
# Initializing memory...OK.
log.debug("Rebooting CC3000.")
self.send_cmd("REBOOT")
time.sleep(5)
dots = self.read()
blank = self.read()
ver = self.read()
flash_id = self.read()
init_msg = self.read()
return [dots, blank, ver, flash_id, init_msg]
# give the station some time to wake up. when we first hit it with a
# command, it often responds with an empty string. then subsequent
# commands get the proper response. so for a first command, send something
# innocuous and wait a bit. hopefully subsequent commands will then work.
# NOTE: This happens periodically and does not appear to be related to
# "waking up". Getter commands now retry, so removing the sleep.
def wakeup(self):
self.command('ECHO=?')
def set_echo(self, cmd='ON'):
log.debug("Set echo to %s" % cmd)
data = self.command('ECHO=%s' % cmd)
if data != 'OK':
raise weewx.WeeWxIOError("Set ECHO failed: %s" % data)
def get_header(self):
log.debug("Get header")
data = self.command("HEADER")
cols = data.split(',')
if cols[0] != 'HDR':
raise weewx.WeeWxIOError("Expected HDR, got %s" % cols[0])
return cols
def set_auto(self):
# auto does not echo the command
self.send_cmd("AUTO")
def get_current_data(self, send_now=True):
data = ''
if send_now:
data = self.command("NOW")
else:
data = self.read()
if data == 'NO DATA' or data == 'NO DATA RECEIVED':
log.debug("No data from sensors")
return []
return data.split(',')
def get_time(self):
# unlike all of the other accessor methods, the TIME command returns
# OK after it returns the requested parameter. so we have to pop the
# OK off the serial so it does not trip up other commands.
log.debug("Get time")
tstr = self.command("TIME=?")
if tstr not in ['ERROR', 'OK']:
data = self.read()
if data != 'OK':
raise weewx.WeeWxIOError("Failed to get time: %s, %s" % (tstr, data))
return tstr
@staticmethod
def _compose_set_time_command():
ts = time.time()
tstr = time.strftime("%Y/%m/%d %H:%M:%S", time.localtime(ts))
log.info("Set time to %s (%s)" % (tstr, ts))
return "TIME=%s" % tstr
def set_time(self):
s = self._compose_set_time_command()
data = self.command(s)
if data != 'OK':
raise weewx.WeeWxIOError("Failed to set time to %s: %s" %
(s, data))
def get_dst(self):
log.debug("Get daylight saving")
return self.command("DST=?")
def set_dst(self, dst):
log.debug("Set DST to %s" % dst)
# Firmware 1.3 Build 022 Dec 02 2016 returns 3 lines (<input-dst>,'',OK)
data = self.command("DST=%s" % dst) # echoed input dst
if data != dst:
raise weewx.WeeWxIOError("Failed to set DST to %s: %s" %
(dst, data))
data = self.read() # read ''
if data not in ['ERROR', 'OK']:
data = self.read() # read OK
if data != 'OK':
raise weewx.WeeWxIOError("Failed to set DST to %s: %s" %
(dst, data))
def get_units(self):
log.debug("Get units")
return self.command("UNITS=?")
def set_units(self, units):
log.debug("Set units to %s" % units)
data = self.command("UNITS=%s" % units)
if data != 'OK':
raise weewx.WeeWxIOError("Failed to set units to %s: %s" %
(units, data))
def get_interval(self):
log.debug("Get logging interval")
return int(self.command("LOGINT=?"))
def set_interval(self, interval=5):
log.debug("Set logging interval to %d minutes" % interval)
data = self.command("LOGINT=%d" % interval)
if data != 'OK':
raise weewx.WeeWxIOError("Failed to set logging interval: %s" %
data)
def get_channel(self):
log.debug("Get channel")
return self.command("STATION")
def set_channel(self, channel):
log.debug("Set channel to %d" % channel)
if channel < 0 or 3 < channel:
raise ValueError("Channel must be 0-3")
data = self.command("STATION=%d" % channel)
if data != 'OK':
raise weewx.WeeWxIOError("Failed to set channel: %s" % data)
def get_charger(self):
log.debug("Get charger")
return self.command("CHARGER")
def get_baro(self):
log.debug("Get baro")
return self.command("BARO")
def set_baro(self, offset):
log.debug("Set barometer offset to %d" % offset)
if offset != '0':
parts = offset.split('.')
if (len(parts) != 2 or
(not (len(parts[0]) == 2 and len(parts[1]) == 2) and
not (len(parts[0]) == 3 and len(parts[1]) == 1))):
raise ValueError("Offset must be 0, XX.XX (inHg), or XXXX.X (mbar)")
data = self.command("BARO=%d" % offset)
if data != 'OK':
raise weewx.WeeWxIOError("Failed to set baro: %s" % data)
def get_memory_status(self):
# query for logger memory use. output is something like this:
# 6438 bytes, 111 records, 0%
log.debug("Get memory status")
return self.command("MEM=?")
def get_max(self):
log.debug("Get max values")
# Return outside temperature, humidity, pressure, wind direction,
# wind speed, rainfall (daily total), station voltage, inside
# temperature.
return self.command("MAX=?").split(',')
def reset_max(self):
log.debug("Reset max values")
data = self.command("MAX=RESET")
if data != 'OK':
raise weewx.WeeWxIOError("Failed to reset max values: %s" % data)
def get_min(self):
log.debug("Get min values")
# Return outside temperature, humidity, pressure, wind direction,
# wind speed, rainfall (ignore), station voltage, inside temperature.
return self.command("MIN=?").split(',')
def reset_min(self):
log.debug("Reset min values")
data = self.command("MIN=RESET")
if data != 'OK':
raise weewx.WeeWxIOError("Failed to reset min values: %s" % data)
def get_history_usage(self):
# return the number of records in the logger
s = self.get_memory_status()
if 'records' in s:
return int(s.split(',')[1].split()[0])
return None
def clear_memory(self):
log.debug("Clear memory")
data = self.command("MEM=CLEAR")
# It's a long wait for the OK. With a greatly increased timeout
# just for MEM=CLEAR, we should be able to read the OK.
if data == 'OK':
log.info("MEM=CLEAR succeeded.")
else:
raise weewx.WeeWxIOError("Failed to clear memory: %s" % data)
def get_rain(self):
log.debug("Get rain total")
# Firmware 1.3 Build 022 Dec 02 2017 returns OK after the rain count
# This is like TIME=?
rstr = self.command("RAIN")
if rstr not in ['ERROR', 'OK']:
data = self.read()
if data != 'OK':
raise weewx.WeeWxIOError("Failed to get rain: %s" % data)
return rstr
def reset_rain(self):
log.debug("Reset rain counter")
data = self.command("RAIN=RESET")
if data != 'OK':
raise weewx.WeeWxIOError("Failed to reset rain: %s" % data)
def gen_records_since_ts(self, header, sensor_map, since_ts):
if since_ts is None:
since_ts = 0.0
num_records = 0
else:
now_ts = time.mktime(datetime.datetime.now().timetuple())
nseconds = now_ts - since_ts
nminutes = math.ceil(nseconds / 60.0)
num_records = math.ceil(nminutes / float(self.get_interval()))
if num_records == 0:
log.debug('gen_records_since_ts: Asking for all records.')
else:
log.debug('gen_records_since_ts: Asking for %d records.' % num_records)
for r in self.gen_records(nrec=num_records):
pkt = CC3000Driver._parse_values(r[1:], header, sensor_map, "%Y/%m/%d %H:%M")
if 'dateTime' in pkt and pkt['dateTime'] > since_ts:
yield pkt
def gen_records(self, nrec=0):
"""
Generator function for getting nrec records from the device. A value
of 0 indicates all records.
The CC3000 returns a header ('HDR,'), the archive records
we are interested in ('REC,'), daily max and min records
('MAX,', 'MIN,') as well as messages for various events such as a
reboot ('MSG,').
Things get interesting when nrec is non-zero.
DOWNLOAD=n returns the latest n records in memory. The CC3000 does
not distinguish between REC, MAX, MIN and MSG records in memory.
As such, DOWNLOAD=5 does NOT mean fetch the latest 5 REC records.
For example, if the latest 5 records include a MIN and a MAX record,
only 3 REC records will be returned (along with the MIN and MAX
records).
Given that one can't ask pecisely ask for a given number of archive
records, a heuristic is used and errs on the side of asking for
too many records.
The heurisitic for number of records to ask for is:
the sum of:
nrec
7 * the number of days convered in the request (rounded up)
Note: One can determine the number of days from the number of
records requested because the archive interval is known.
Asking for an extra seven records per day allows for the one MIN and
one MAX records generated per day, plus a buffer for up to five MSG
records each day. Unless one is rebooting the CC3000 all day, this
will be plenty. Typically, there will be zero MSG records. Clearing
memory and rebooting actions generate MSG records. Both are uncommon.
As a result, gen_records will overshoot the records asked for, but this
is not a problem in practice. Also, if a new archive record is written
while this operation is taking place, it will be returned. As such,
the number wouldn't be precise anyway. One could work around this by
accumulating records before returning, and then returning an exact
amount, but it simply isn't worth it.
Examining the records in the CC3000 (808 records at the time of the
examination) shows the following records found:
HDR: 1 (the header record, per the spec)
REC: 800 (the archive records -- ~2.8 days worth)
MSG: 1 (A clear command that executed ~2.8 days ago:
MSG 2019/12/20 15:48 CLEAR ON COMMAND!749D)
MIN: 3 (As expected for 3 days.)
MAX: 3 (As expected for 3 days.)
Interrogating the CC3000 for a large number of records fails miserably
if, while reading the responses, the responses are parsed and added
to the datbase. (Check sum mismatches, partical records, etc.). If
these last two steps are skipped, reading from the CC3000 is very
reliable. This can be observed by asing for history with wee_config.
Observed with > 11K of records.
To address the above problem, all records are read into memory. Reading
all records into memory before parsing and inserting into the database
is very reliable. For smaller amounts of recoreds, the reading into
memory could be skipped, but what would be the point?
"""
log.debug('gen_records(%d)' % nrec)
totrec = self.get_history_usage()
log.debug('gen_records: Requested %d latest of %d records.' % (nrec, totrec))
if nrec == 0:
num_to_ask = 0
else:
# Determine the number of records to ask for.
# See heuristic above.
num_mins_asked = nrec * self.get_interval()
num_days_asked = math.ceil(num_mins_asked / (24.0*60))
num_to_ask = nrec + 7 * num_days_asked
if num_to_ask == 0:
cmd = 'DOWNLOAD'
else:
cmd = 'DOWNLOAD=%d' % num_to_ask
log.debug('%s' % cmd)
# Note: It takes about 14s to read 1000 records into memory.
if num_to_ask == 0:
log.info('Reading all records into memory. This could take some time.')
elif num_to_ask < 1000:
log.info('Reading %d records into memory.' % num_to_ask)
else:
log.info('Reading %d records into memory. This could take some time.' % num_to_ask)
yielded = 0
recs = []
data = self.command(cmd)
while data != 'OK':
recs.append(data)
data = self.read()
log.info('Finished reading %d records.' % len(recs))
yielded = 0
for data in recs:
values = data.split(',')
if values[0] == 'REC':
yielded += 1
yield values
elif (values[0] == 'HDR' or values[0] == 'MSG' or
values[0] == 'MIN' or values[0] == 'MAX' or
values[0].startswith('DOWNLOAD')):
pass
else:
log.error("Unexpected record '%s' (%s)" % (values[0], data))
log.debug('Downloaded %d records' % yielded)
class CC3000ConfEditor(weewx.drivers.AbstractConfEditor):
@property
def default_stanza(self):
return """
[CC3000]
# This section is for RainWise MarkIII weather stations and CC3000 logger.
# Serial port such as /dev/ttyS0, /dev/ttyUSB0, or /dev/cuaU0
port = %s
# The station model, e.g., CC3000 or CC3000R
model = CC3000
# The driver to use:
driver = weewx.drivers.cc3000
""" % (CC3000.DEFAULT_PORT,)
def prompt_for_settings(self):
print("Specify the serial port on which the station is connected, for")
print("example /dev/ttyUSB0 or /dev/ttyS0.")
port = self._prompt('port', CC3000.DEFAULT_PORT)
return {'port': port}
# define a main entry point for basic testing. invoke from the weewx root dir:
#
# PYTHONPATH=bin python -m weewx.drivers.cc3000 --help
#
# FIXME: This duplicates all of the functionality in CC3000Conigurator.
# Perhaps pare this down to a version option and, by default,
# polling and printing records (a la, the vantage driver)..
if __name__ == '__main__':
import optparse
import weewx
import weeutil.logger
usage = """%prog [options] [--help]"""
parser = optparse.OptionParser(usage=usage)
parser.add_option('--version', action='store_true',
help='display driver version')
parser.add_option('--test-crc', dest='testcrc', action='store_true',
help='test crc')
parser.add_option('--port', metavar='PORT',
help='port to which the station is connected',
default=CC3000.DEFAULT_PORT)
parser.add_option('--get-version', dest='getver', action='store_true',
help='display firmware version')
parser.add_option('--debug', action='store_true', default=False,
help='emit additional diagnostic information')
parser.add_option('--get-status', dest='status', action='store_true',
help='display memory status')
parser.add_option('--get-channel', dest='getch', action='store_true',
help='display station channel')
parser.add_option('--set-channel', dest='setch', metavar='CHANNEL',
help='set station channel')
parser.add_option('--get-battery', dest='getbat', action='store_true',
help='display battery status')
parser.add_option('--get-current', dest='getcur', action='store_true',
help='display current data')
parser.add_option('--get-memory', dest='getmem', action='store_true',
help='display memory status')
parser.add_option('--get-records', dest='getrec', metavar='NUM_RECORDS',
help='display records from station memory')
parser.add_option('--get-header', dest='gethead', action='store_true',
help='display data header')
parser.add_option('--get-units', dest='getunits', action='store_true',
help='display units')
parser.add_option('--set-units', dest='setunits', metavar='UNITS',
help='set units to ENGLISH or METRIC')
parser.add_option('--get-time', dest='gettime', action='store_true',
help='display station time')
parser.add_option('--set-time', dest='settime', action='store_true',
help='set station time to computer time')
parser.add_option('--get-dst', dest='getdst', action='store_true',
help='display daylight savings settings')
parser.add_option('--set-dst', dest='setdst',
metavar='mm/dd HH:MM,mm/dd HH:MM,[MM]M',
help='set daylight savings start, end, and amount')
parser.add_option('--get-interval', dest='getint', action='store_true',
help='display logging interval, in seconds')
parser.add_option('--set-interval', dest='setint', metavar='INTERVAL',
type=int, help='set logging interval, in seconds')
parser.add_option('--clear-memory', dest='clear', action='store_true',
help='clear logger memory')
parser.add_option('--get-rain', dest='getrain', action='store_true',
help='get rain counter')
parser.add_option('--reset-rain', dest='resetrain', action='store_true',
help='reset rain counter')
parser.add_option('--get-max', dest='getmax', action='store_true',
help='get max counter')
parser.add_option('--reset-max', dest='resetmax', action='store_true',
help='reset max counters')
parser.add_option('--get-min', dest='getmin', action='store_true',
help='get min counter')
parser.add_option('--reset-min', dest='resetmin', action='store_true',
help='reset min counters')
parser.add_option('--poll', metavar='POLL_INTERVAL', type=int,
help='poll interval in seconds')
parser.add_option('--reboot', dest='reboot', action='store_true',
help='reboot the station')
(options, args) = parser.parse_args()
if options.version:
print("%s driver version %s" % (DRIVER_NAME, DRIVER_VERSION))
exit(0)
if options.debug:
DEBUG_SERIAL = 1
DEBUG_CHECKSUM = 1
DEBUG_OPENCLOSE = 1
weewx.debug = 1
weeutil.logger.setup('cc3000', {})
if options.testcrc:
_check_crc(b'OK')
_check_crc(b'REC,2010/01/01 14:12, 64.5, 85,29.04,349, 2.4, 4.2, 0.00, 6.21, 0.25, 73.2,!B82C')
_check_crc(b'MSG,2010/01/01 20:22,CHARGER ON,!4CED')
exit(0)
with CC3000(options.port) as s:
s.flush()
s.wakeup()
s.set_echo()
if options.getver:
print(s.get_version())
if options.reboot:
print('rebooting...')
startup_msgs = s.reboot()
for line in startup_msgs:
print(line)
if options.status:
print("Firmware:", s.get_version())
print("Time:", s.get_time())
print("DST:", s.get_dst())
print("Units:", s.get_units())
print("Memory:", s.get_memory_status())
print("Interval:", s.get_interval() * 60)
print("Channel:", s.get_channel())
print("Charger:", s.get_charger())
print("Baro:", s.get_baro())
print("Rain:", s.get_rain())
print("Max values:", s.get_max())
print("Min values:", s.get_min())
if options.getch:
print(s.get_channel())
if options.setch is not None:
s.set_channel(int(options.setch))
if options.getbat:
print(s.get_charger())
if options.getcur:
print(s.get_current_data())
if options.getmem:
print(s.get_memory_status())
if options.getrec is not None:
i = 0
for r in s.gen_records(int(options.getrec)):
print(i, r)
i += 1
if options.gethead:
print(s.get_header())
if options.getunits:
print(s.get_units())
if options.setunits:
s.set_units(options.setunits)
if options.gettime:
print(s.get_time())
if options.settime:
s.set_time()
if options.getdst:
print(s.get_dst())
if options.setdst:
s.set_dst(options.setdst)
if options.getint:
print(s.get_interval() * 60)
if options.setint:
s.set_interval(int(options.setint) / 60)
if options.clear:
s.clear_memory()
if options.getrain:
print(s.get_rain())
if options.resetrain:
print(s.reset_rain())
if options.getmax:
print(s.get_max())
if options.resetmax:
print(s.reset_max())
if options.getmin:
print(s.get_min())
if options.resetmin:
print(s.reset_min())
if options.poll is not None:
cmd_mode = True
if options.poll == 0:
cmd_mode = False
s.set_auto()
while True:
print(s.get_current_data(cmd_mode))
time.sleep(options.poll)
| [
"[email protected]"
] | |
47bf2f00c6730182259d81aeab1bf82ce408ef5d | c7115a0a1470310792b81cd097e0aa47ed095195 | /django_thoughtapi/manage.py | 5045eb05410e0449491ad1e7a92edec2a1f3c746 | [
"MIT"
] | permissive | qwergram/thoughts_api | 80818424b3755f671cfb65fcddff5c0769fa9e27 | 47e9a76cc15e30c36232b253eb0e44bb5f401482 | refs/heads/master | 2020-12-24T22:29:12.401158 | 2016-04-30T22:45:20 | 2016-04-30T22:45:20 | 57,338,528 | 0 | 0 | null | 2016-04-29T23:40:38 | 2016-04-28T22:46:59 | null | UTF-8 | Python | false | false | 260 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "django_thoughtapi.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| [
"[email protected]"
] | |
69c0bb652daa62eea8c9a6a5378fd562629cf26a | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03095/s108733747.py | 8493da82e83df4f2a53a5e799e1313b9f63c0471 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,818 | py | import sys
import math
from collections import Counter
N = int(input())
S = input()
MOD = 1000000007
# baba
# a(2), b(2), ab(1), ba(3)
# baab
# a(2), b(2), ab(2), ba(2)
# 1文字の時は分かる、それは単に数えるだけ
# 2文字の時は?
# 'ab' 'a'どれ選ぶ? → その後ろにある'b'は…
# 全部やるなら2^100000、ムリー
# dpいけるか?
# dpを設計しよう
# dp[n] : n文字目まで見た時の答え
# dp[n] = dp[n-1]
# baab
# dp[0] = 1 (b)
# dp[1] = dp[0]((b)のみ選ぶ) + 1(aのみ選ぶ) + dp[0] * 1 (ab)
# それが新しい文字なら?
# dp[n] = dp[n-1](追加で選ばない) + dp[n-1](選ぶ) + 1
# それが見たことある文字なら?
# 1文字単位では増えない
# n文字単位なら、pickする選択肢が増える
# ba (3)
# baa → 3 + 1?
# dp[n] = dp[n] (そのまま) + 最後の文字を使う
# 最後の文字を使うならどうなるか? → それ以外の種類の文字でつくるんだけど大変じゃない????
# baba
# babで5 a, b(2), ab(1) ba(1)
# 最後の文字を使うなら、bをどちらか選ぶ (か、何も選ばない)
# bをpickするか? * どれをpickするか?
# bをpickしない場合(1) + bをpickする場合(どれを選ぶ?)
# (1 + 2)
# abca
# abcで6 a, b, c, ab, ac, bc
# 最後の文字を使うなら、残りのbcの組み合わせ
# bをpickする/しない * cをpickする/しない 4通り?
ans = 1
counter = Counter()
counter[S[0]] += 1
for ch in S[1:]:
if ch in counter:
tmp = 1
for k, cnt in counter.items():
if k == ch:
continue
tmp = (tmp * (1 + cnt)) % MOD
ans = (ans + tmp) % MOD
counter[ch] += 1
else:
ans = (2 * ans) % MOD
ans = (ans + 1) % MOD
counter[ch] += 1
print(ans)
| [
"[email protected]"
] | |
419c8a91a20a69ff1f0924b178d71876b2f3d74b | 255e19ddc1bcde0d3d4fe70e01cec9bb724979c9 | /all-gists/1035975/snippet.py | 5f3993a8803fe9637f988459e7009fb87ae47a03 | [
"MIT"
] | permissive | gistable/gistable | 26c1e909928ec463026811f69b61619b62f14721 | 665d39a2bd82543d5196555f0801ef8fd4a3ee48 | refs/heads/master | 2023-02-17T21:33:55.558398 | 2023-02-11T18:20:10 | 2023-02-11T18:20:10 | 119,861,038 | 76 | 19 | null | 2020-07-26T03:14:55 | 2018-02-01T16:19:24 | Python | UTF-8 | Python | false | false | 13,415 | py | import ast
from cStringIO import StringIO
import sys
INFSTR = '1e308'
def interleave(inter, f, seq):
seq = iter(seq)
try:
f(next(seq))
except StopIteration:
pass
else:
for x in seq:
inter()
f(x)
class PythonToPhp:
def __init__(self, source, indent = 0):
tree = ast.parse(source)
self.code = StringIO()
self.tabstop = 2
self._indent = indent
self.dispatch(tree)
def get_code(self):
return self.code.getvalue()
def fill(self, text = ''):
self.code.write('\n%s%s' % (' ' * self.tabstop * self._indent, text))
def write(self, text):
self.code.write(text)
def enter(self):
self.code.write(' {')
self._indent += 1
def leave(self):
self._indent -= 1
self.fill('}')
def error(self, msg):
print msg
sys.exit()
def dispatch(self, tree):
if isinstance(tree, list):
for t in tree:
self.dispatch(t)
return
meth = getattr(self, '_%s' % tree.__class__.__name__)
return meth(tree)
########## Transform Methods ##########
def _Module(self, tree):
for stmt in tree.body:
self.dispatch(stmt)
### Statement ###
def _Expr(self, tree):
self.fill()
self.dispatch(tree.value)
self.write(';')
def _Import(self, t):
self.error('import not supported')
def _ImportFrom(self, t):
self.error('import not supported')
def _Assign(self, t):
self.fill()
for target in t.targets:
if isinstance(target, ast.Tuple):
self._lvalue_tuple(target)
else:
self.dispatch(target)
self.write(' = ')
self.dispatch(t.value)
self.write(';')
def _AugAssign(self, t):
self.fill()
self.dispatch(t.target)
name = t.op.__class__.__name__
if name == 'Pow':
self.write(' = pow(')
self.dispatch(t.target)
self.write(', ')
self.dispatch(t.value)
self.write(');')
elif name == 'FloorDiv':
self.write(' = floor(')
self.dispatch(t.target)
self.write(' / ')
self.dispatch(t.value)
self.write(');')
else:
self.write(' %s= ' % self.binop[t.op.__class__.__name__])
self.dispatch(t.value)
self.write(';')
def _Return(self, t):
self.fill('return')
if t.value:
self.write(' ')
self.dispatch(t.value)
self.write(';')
def _Pass(self, t):
self.fill(';')
def _Break(self, t):
self.fill('break;')
def _Continue(self, t):
self.fill('continue;')
def _Delete(self, t):
for target in t.targets:
self.fill('unset(')
self.dispatch(target)
self.write(');')
def _Assert(self, t):
self.fill('assert(')
self.dispatch(t.test)
self.write(');')
def _Exec(self, t):
self.fill('eval(')
self.dispatch(t.body)
self.write(');')
def _Print(self, t):
self.fill('echo ')
sep = ''
for e in t.values:
self.write(sep)
self.dispatch(e)
sep = ', '
if t.nl:
self.write(sep)
self.write("'<br />'")
self.write(';')
def _Global(self, t):
self.fill('global ')
interleave(lambda: self.write(', '), self.write, t.names)
self.write(';')
def _Yield(self, t):
self.error('yield not supported')
def _Raise(self, t):
self.error('Exceptions not supported')
def _TryExcept(self, t):
self.error('Exceptions not supported')
def _TryFinally(self, t):
self.error('Exceptions not supported')
def _ExceptHandler(self, t):
self.error('Exceptions not supported')
def _ClassDef(self, t):
self.error('Class not supported')
def _FunctionDef(self, t):
self.fill('function ' + t.name + '(')
self.dispatch(t.args)
self.write(')')
self.enter()
self.dispatch(t.body)
self.leave()
def _For(self, t):
self.fill('foreach (')
self.dispatch(t.iter)
self.write(' as ')
self.dispatch(t.target)
self.write(')')
self.enter()
self.dispatch(t.body)
self.leave()
if t.orelse:
self.error('else clause for for statement not supported')
def _If(self, t):
self.fill("if (")
self.dispatch(t.test)
self.write(')')
self.enter()
self.dispatch(t.body)
self.leave()
# collapse nested ifs into equivalent elifs.
while (t.orelse and len(t.orelse) == 1 and
isinstance(t.orelse[0], ast.If)):
t = t.orelse[0]
self.fill("elseif (")
self.dispatch(t.test)
self.write(')')
self.enter()
self.dispatch(t.body)
self.leave()
# final else
if t.orelse:
self.fill("else")
self.enter()
self.dispatch(t.orelse)
self.leave()
def _While(self, t):
self.fill("while (")
self.dispatch(t.test)
self.write(')')
self.enter()
self.dispatch(t.body)
self.leave()
if t.orelse:
self.error('else clause for while statement not supported')
def _With(self, t):
self.error('with statement not supported')
### Expression ###
def _Str(self, t):
self.write(repr(t.s))
def _Name(self, t):
if t.id == 'True':
self.write('true')
elif t.id == 'False':
self.write('false')
elif t.id == 'None':
self.write('null')
else:
self.write('$%s' % t.id)
def _Repr(self, t):
self.write('var_export(')
self.dispatch(t.value)
self.write(", true)")
def _Num(self, t):
repr_n = repr(t.n)
if repr_n.startswith('-'):
self.write('(')
self.write(repr_n.replace('inf', INFSTR))
if repr_n.startswith('-'):
self.write(')')
def _List(self, t):
self.write('array(')
interleave(lambda: self.write(", "), self.dispatch, t.elts)
self.write(')')
def _ListComp(self, t):
if len(t.generators) > 1:
self.error('multiple generators in comprehension not supported')
generator = t.generators.pop()
self._comprehension(generator, 'left')
self.dispatch(t.elt)
self._comprehension(generator, 'right')
def _comprehension(self, t, part = 'left'):
if part == 'left':
if t.ifs:
self.write('array_filter(array_map(function(')
else:
self.write('array_map(function(')
self.dispatch(t.target)
self.write(') { return ')
elif part == 'right':
self.write('; }, ')
self.dispatch(t.iter)
if t.ifs:
self.write('), function(')
self.dispatch(t.target)
self.write(') { return ')
for if_clause in t.ifs:
self.dispatch(if_clause)
self.write('; })')
else:
self.write(')')
def _GeneratorExp(self, t):
if len(t.generators) > 1:
self.error('multiple generators in comprehension not supported')
generator = t.generators.pop()
self._comprehension(generator, 'left')
self.dispatch(t.elt)
self._comprehension(generator, 'right')
def _SetComp(self, t):
if len(t.generators) > 1:
self.error('multiple generators in comprehension not supported')
self.write('array_unique(')
generator = t.generators.pop()
self._comprehension(generator, 'left')
self.dispatch(t.elt)
self._comprehension(generator, 'right')
self.write(')')
def _DictComp(self, t):
self.error('dict comprehension not supported')
def _IfExp(self, t):
self.write("((")
self.dispatch(t.test)
self.write(') ? (')
self.dispatch(t.body)
self.write(') : (')
self.dispatch(t.orelse)
self.write('))')
def _Set(self, t):
assert(t.elts) # should be at least one element
self.write('array_unique(array(')
interleave(lambda: self.write(", "), self.dispatch, t.elts)
self.write('))')
def _Dict(self, t):
self.write('array(')
def write_pair(pair):
k, v = pair
self.dispatch(k)
self.write(' => ')
self.dispatch(v)
interleave(lambda: self.write(', '), write_pair, zip(t.keys, t.values))
self.write(')')
def _Tuple(self, t):
self.write('array(')
interleave(lambda: self.write(", "), self.dispatch, t.elts)
self.write(')')
def _lvalue_tuple(self, t):
self.write('list(')
interleave(lambda: self.write(", "), self.dispatch, t.elts)
self.write(')')
unop = {"Invert":"~", "Not": "!", "UAdd":"+", "USub":"-"}
def _UnaryOp(self, t):
self.write("(")
self.write(self.unop[t.op.__class__.__name__])
self.write(" ")
if isinstance(t.op, ast.USub) and isinstance(t.operand, ast.Num):
self.write("(")
self.dispatch(t.operand)
self.write(")")
else:
self.dispatch(t.operand)
self.write(")")
binop = {
"Add":"+",
"Sub":"-",
"Mult":"*",
"Div":"/",
"Mod":"%",
"LShift":"<<",
"RShift":">>",
"BitOr":"|",
"BitXor":"^",
"BitAnd":"&",
}
def _BinOp(self, t):
name = t.op.__class__.__name__
if name == 'Pow':
self.write("(pow(")
self.dispatch(t.left)
self.write(', ')
self.dispatch(t.right)
self.write('))')
elif name == 'FloorDiv':
self.write('(floor(')
self.dispatch(t.left)
self.write(' / ')
self.dispatch(t.right)
self.write('))')
elif name == 'Mod' and isinstance(t.left, ast.Str):
self.write('sprintf(')
self.dispatch(t.left)
self.write(', ')
if isinstance(t.right, ast.Str):
self.dispatch(t.right)
elif isinstance(t.right, ast.Tuple):
interleave(lambda: self.write(", "), self.dispatch, t.right.elts)
else:
self.error('impossible string substript error')
self.write(')')
else:
self.write("(")
self.dispatch(t.left)
self.write(" " + self.binop[name] + " ")
self.dispatch(t.right)
self.write(")")
cmpops = {
"Eq":"==",
"NotEq":"!=",
"Lt":"<",
"LtE":"<=",
"Gt":">",
"GtE":">=",
"Is":"===",
"IsNot":"!==",
}
def _Compare(self, t):
name = t.ops[0].__class__.__name__
self.write("(")
if name == 'In':
comparator = t.comparators.pop()
self.write('in_array(')
self.dispatch(t.left)
self.write(', ')
self.dispatch(comparator)
self.write(') || array_key_exists(')
self.dispatch(t.left)
self.write(', ')
self.dispatch(comparator)
self.write(')')
elif name == 'NotIn':
comparator = t.comparators.pop()
self.write('!in_array(')
self.dispatch(t.left)
self.write(', ')
self.dispatch(comparator)
self.write(') && !array_key_exists(')
self.dispatch(t.left)
self.write(', ')
self.dispatch(comparator)
self.write(')')
else:
self.dispatch(t.left)
for o, e in zip(t.ops, t.comparators):
self.write(" " + self.cmpops[o.__class__.__name__] + " ")
self.dispatch(e)
self.write(")")
boolops = {ast.And: '&&', ast.Or: '||'}
def _BoolOp(self, t):
self.write("(")
s = " %s " % self.boolops[t.op.__class__]
interleave(lambda: self.write(s), self.dispatch, t.values)
self.write(")")
def _Attribute(self,t):
self.dispatch(t.value)
self.write("->")
self.write(t.attr)
def _func_name(self, t):
self.write('%s' % t.id)
def _Call(self, t):
self._func_name(t.func)
self.write("(")
comma = False
for e in t.args:
if comma: self.write(", ")
else: comma = True
self.dispatch(e)
for e in t.keywords:
if comma: self.write(", ")
else: comma = True
self.dispatch(e)
if t.starargs:
self.error('function vararg not supported')
if t.kwargs:
self.error('function kwarg not supported')
self.write(")")
def _Subscript(self, t):
if isinstance(t.slice, ast.Index):
#self.dispatch(t.value)
#self.write("[")
#self.dispatch(t.slice)
#self.write("]")
self.write('pyphp_subscript(')
self.dispatch(t.value)
self.write(', ')
self.dispatch(t.slice)
self.write(')')
elif isinstance(t.slice, ast.Slice):
self.write('array_slice(')
self.dispatch(t.value)
self.write(', ')
self.dispatch(t.slice)
self.write(')')
def _Ellipsis(self, t):
self.error('ellipsis not supported')
def _Index(self, t):
self.dispatch(t.value)
def _Slice(self, t):
if t.lower:
self.dispatch(t.lower)
else:
self.write('0')
if t.upper:
self.write(", ")
self.write('(')
self.dispatch(t.upper)
self.write(' - ')
if t.lower:
self.dispatch(t.lower)
else:
self.write('0')
self.write(')')
if t.step:
self.error('slice step not supported')
def _ExtSlice(self, t):
self.error('extslice not supported')
#interleave(lambda: self.write(', '), self.dispatch, t.dims)
### Others ###
def _arguments(self, t):
first = True
defaults = [None] * (len(t.args) - len(t.defaults)) + t.defaults
for a,d in zip(t.args, defaults):
if first: first = False
else: self.write(", ")
self.dispatch(a),
if d:
self.write(" = ")
self.dispatch(d)
if t.vararg:
self.error('function vararg not supported')
if t.kwarg:
self.error('function kwarg not supported')
def _keyword(self, t):
self.write('$%s' % t.arg)
self.write(" = ")
self.dispatch(t.value)
def _Lambda(self, t):
self.write("(")
self.write("function(")
self.dispatch(t.args)
self.write(") {")
self.dispatch(t.body)
self.write("})")
def _alias(self, t):
self.error('alias not supported')
| [
"[email protected]"
] | |
ec88adb74b40ae3b44f04b1e117c8c881872eb99 | ba2d24fd6c5ce7d490ee57f224fd5435a1132093 | /setup.py | 7b0ac69b67ea99435a867d57e8b00a0787e5f3aa | [
"MIT"
] | permissive | FlowerOda/pytest-auto-parametrize | cb2aff37308bff571b980da88f222f8b88e4e36b | 9db33bb06de13c26f753bfd18e254ce10ae1256c | refs/heads/master | 2022-01-09T16:54:33.796383 | 2018-10-09T08:56:09 | 2018-10-09T08:56:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,591 | py | from setuptools import setup
from setuptools.command.test import test as TestCommand
import sys
__version__ = 'unknown'
# "import" __version__
for line in open('pytest_auto_parametrize.py'):
if line.startswith('__version__'):
exec(line)
break
class PyTest(TestCommand):
"""Enable "python setup.py test".
Stripped down from:
http://doc.pytest.org/en/latest/goodpractices.html#manual-integration
"""
def run_tests(self):
import pytest
sys.exit(pytest.main([]))
setup(
name='pytest-auto-parametrize',
py_modules=['pytest_auto_parametrize'],
version=__version__,
author='Matthias Geier',
author_email='[email protected]',
description='pytest plugin: avoid repeating arguments in parametrize',
long_description=open('README.rst').read(),
license='MIT',
keywords='parametrized testing'.split(),
url='https://github.com/mgeier/pytest-auto-parametrize',
platforms='any',
zip_safe=True,
classifiers=[
'Framework :: Pytest',
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Intended Audience :: Developers',
'Topic :: Software Development :: Testing',
],
entry_points={
'pytest11': ['pytest_auto_parametrize = pytest_auto_parametrize'],
},
tests_require=['pytest'],
cmdclass={'test': PyTest},
)
| [
"[email protected]"
] | |
005852383cf1e3ae176206e5dd95e2754cd001ce | 006341ca12525aa0979d6101600e78c4bd9532ab | /CMS/Zope-3.2.1/Dependencies/zope.app-Zope-3.2.1/zope.app/container/browser/find.py | ee744f8239d12401177ed371c83a4a3a56c523fe | [
"ZPL-2.1",
"Python-2.0",
"ICU",
"LicenseRef-scancode-public-domain",
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"ZPL-2.0"
] | permissive | germanfriday/code-examples-sandbox | d0f29e20a3eed1f8430d06441ac2d33bac5e4253 | 4c538584703754c956ca66392fdcecf0a0ca2314 | refs/heads/main | 2023-05-30T22:21:57.918503 | 2021-06-15T15:06:47 | 2021-06-15T15:06:47 | 377,200,448 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,567 | py | ##############################################################################
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Find View Class
$Id: find.py 29143 2005-02-14 22:43:16Z srichter $
"""
__docformat__ = 'restructuredtext'
from zope.app import zapi
from zope.app.container.find import SimpleIdFindFilter
from zope.app.container.interfaces import IFind
from zope.app.traversing.api import getName
from zope.app.publisher.browser import BrowserView
# Very simple implementation right now
class Find(BrowserView):
def findByIds(self, ids):
"""Do a find for the `ids` listed in `ids`, which is a string."""
finder = IFind(self.context)
ids = ids.split()
# if we don't have any ids listed, don't search at all
if not ids:
return []
request = self.request
result = []
for object in finder.find([SimpleIdFindFilter(ids)]):
url = zapi.absoluteURL(object, request)
result.append({ 'id': getName(object), 'url': url})
return result
| [
"[email protected]"
] | |
064b469872ad95e7487c3cf649ca3cfa62170bdd | 6f05f7d5a67b6bb87956a22b988067ec772ba966 | /data/test/python/068d64a694460d83bc9a67db9e2e5f1e4e03d3c3urls.py | 068d64a694460d83bc9a67db9e2e5f1e4e03d3c3 | [
"MIT"
] | permissive | harshp8l/deep-learning-lang-detection | 93b6d24a38081597c610ecf9b1f3b92c7d669be5 | 2a54293181c1c2b1a2b840ddee4d4d80177efb33 | refs/heads/master | 2020-04-07T18:07:00.697994 | 2018-11-29T23:21:23 | 2018-11-29T23:21:23 | 158,597,498 | 0 | 0 | MIT | 2018-11-21T19:36:42 | 2018-11-21T19:36:41 | null | UTF-8 | Python | false | false | 717 | py | from django.conf.urls import url
from . import views
SITE_SLUG = "(?P<site_slug>[-_\w]+)"
IMAGE_SLUG = "(?P<image_slug>[-_\w]+)"
urlpatterns = [
# Manage
url(r'^$', views.manage_redirect, name='manage_redirect'),
url(r'^manage/$', views.manage, name='manage'),
url(r'^manage/archives$', views.archives, name='archives'),
url(r'^manage/create/$', views.create, name='create'),
url(r'^manage/create_js/$', views.create_js, name='create_js'),
url(r'^manage/' + IMAGE_SLUG + '/trash$', views.trash, name='trash'),
# View
url(r'^' + IMAGE_SLUG + '$', views.view),
url(r'^' + IMAGE_SLUG + '.thumbnail', views.thumbnail),
url(r'^' + IMAGE_SLUG + '.original', views.original),
]
| [
"[email protected]"
] | |
94e1bcfdf5adabec1171a6844867b600be9ef5e8 | c93b0f008d0977e0b9327ad8b930489f5cccae97 | /platfrom/testdata/RawQosBuffering.py | 3dbfb80b4f23f72c376766ece3d0dc34e83de492 | [] | no_license | ParkPan/ATCasePackage | 15caa664bd94c014ccbd1780353bfc5fcc0caa87 | edad6c1d5a343c740e251821fee0c29336f3d435 | refs/heads/master | 2020-06-16T02:44:06.323352 | 2016-12-01T03:46:44 | 2016-12-01T03:46:44 | 75,251,843 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,336 | py | import random
import sys
import os
import datavars
import dataprovider
sys.path.append(os.path.abspath(os.path.dirname(__file__) + '/' + '..'))
from commonfunc import get_timestamp_by_time
class RawQosBuffering(dataprovider.Dataprovider):
tablename = 'raw_input_qos_buffering'
@classmethod
def gettablename(cls):
return cls.tablename
def makedata(self):
data_format = '%s,%d,%s,%s,itsavvidstring,%s,1111,222,%d,%d\n'
with open(os.path.abspath(os.path.dirname(__file__)) + '/RawQosBuffering.txt', 'w') as filedemanddata:
for i in range(24):
for j in [2, 6, 15, 26]:
id = datavars.id_range[random.randint(0,14)]
timestamp = get_timestamp_by_time(datavars.time_format% (i, j))
peerid = datavars.peeid_range[random.randint(0,9)]
url = datavars.url_range[random.randint(0,4)]
type = datavars.type_range[random.randint(0, 3)]
line = data_format % (
id, int(timestamp), peerid, url, type, int(timestamp)+random.randint(1,100),
int(timestamp) + random.randint(100,10000))
filedemanddata.write(line)
return os.path.abspath(os.path.dirname(__file__)) + '/RawQosBuffering.txt'
| [
"[email protected]"
] | |
ceab03c4764ad7cac99e7e1fcadaca2cdc5da95a | 159d4ae61f4ca91d94e29e769697ff46d11ae4a4 | /venv/lib/python3.9/site-packages/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_custom_frames.py | 94cabd744e1d3785ac2a728ff2ac0c584fccdf39 | [
"MIT"
] | permissive | davidycliao/bisCrawler | 729db002afe10ae405306b9eed45b782e68eace8 | f42281f35b866b52e5860b6a062790ae8147a4a4 | refs/heads/main | 2023-05-24T00:41:50.224279 | 2023-01-22T23:17:51 | 2023-01-22T23:17:51 | 411,470,732 | 8 | 0 | MIT | 2023-02-09T16:28:24 | 2021-09-28T23:48:13 | Python | UTF-8 | Python | false | false | 4,397 | py | from _pydevd_bundle.pydevd_constants import get_current_thread_id, Null, ForkSafeLock
from pydevd_file_utils import get_abs_path_real_path_and_base_from_frame
from _pydev_imps._pydev_saved_modules import thread, threading
import sys
from _pydev_bundle import pydev_log
DEBUG = False
class CustomFramesContainer:
# Actual Values initialized later on.
custom_frames_lock = None # : :type custom_frames_lock: threading.Lock
custom_frames = None
_next_frame_id = None
_py_db_command_thread_event = None
def custom_frames_container_init(): # Note: no staticmethod on jython 2.1 (so, use free-function)
CustomFramesContainer.custom_frames_lock = ForkSafeLock()
# custom_frames can only be accessed if properly locked with custom_frames_lock!
# Key is a string identifying the frame (as well as the thread it belongs to).
# Value is a CustomFrame.
#
CustomFramesContainer.custom_frames = {}
# Only to be used in this module
CustomFramesContainer._next_frame_id = 0
# This is the event we must set to release an internal process events. It's later set by the actual debugger
# when we do create the debugger.
CustomFramesContainer._py_db_command_thread_event = Null()
# Initialize it the first time (it may be reinitialized later on when dealing with a fork).
custom_frames_container_init()
class CustomFrame:
def __init__(self, name, frame, thread_id):
# 0 = string with the representation of that frame
self.name = name
# 1 = the frame to show
self.frame = frame
# 2 = an integer identifying the last time the frame was changed.
self.mod_time = 0
# 3 = the thread id of the given frame
self.thread_id = thread_id
def add_custom_frame(frame, name, thread_id):
'''
It's possible to show paused frames by adding a custom frame through this API (it's
intended to be used for coroutines, but could potentially be used for generators too).
:param frame:
The topmost frame to be shown paused when a thread with thread.ident == thread_id is paused.
:param name:
The name to be shown for the custom thread in the UI.
:param thread_id:
The thread id to which this frame is related (must match thread.ident).
:return: str
Returns the custom thread id which will be used to show the given frame paused.
'''
with CustomFramesContainer.custom_frames_lock:
curr_thread_id = get_current_thread_id(threading.current_thread())
next_id = CustomFramesContainer._next_frame_id = CustomFramesContainer._next_frame_id + 1
# Note: the frame id kept contains an id and thread information on the thread where the frame was added
# so that later on we can check if the frame is from the current thread by doing frame_id.endswith('|'+thread_id).
frame_custom_thread_id = '__frame__:%s|%s' % (next_id, curr_thread_id)
if DEBUG:
sys.stderr.write('add_custom_frame: %s (%s) %s %s\n' % (
frame_custom_thread_id, get_abs_path_real_path_and_base_from_frame(frame)[-1], frame.f_lineno, frame.f_code.co_name))
CustomFramesContainer.custom_frames[frame_custom_thread_id] = CustomFrame(name, frame, thread_id)
CustomFramesContainer._py_db_command_thread_event.set()
return frame_custom_thread_id
def update_custom_frame(frame_custom_thread_id, frame, thread_id, name=None):
with CustomFramesContainer.custom_frames_lock:
if DEBUG:
sys.stderr.write('update_custom_frame: %s\n' % frame_custom_thread_id)
try:
old = CustomFramesContainer.custom_frames[frame_custom_thread_id]
if name is not None:
old.name = name
old.mod_time += 1
old.thread_id = thread_id
except:
sys.stderr.write('Unable to get frame to replace: %s\n' % (frame_custom_thread_id,))
pydev_log.exception()
CustomFramesContainer._py_db_command_thread_event.set()
def remove_custom_frame(frame_custom_thread_id):
with CustomFramesContainer.custom_frames_lock:
if DEBUG:
sys.stderr.write('remove_custom_frame: %s\n' % frame_custom_thread_id)
CustomFramesContainer.custom_frames.pop(frame_custom_thread_id, None)
CustomFramesContainer._py_db_command_thread_event.set()
| [
"[email protected]"
] | |
ea30277fdda4769bc035c83cf910f8660e83b049 | 421f6ce9490876be113e5ed1ac173b1f6d70cb66 | /newYork/new_york_analysis/recursive_top_level/u_craigslist4237915975/craigslist4237915975scraper/craigslist4237915975scraper/items.py | 2ed8d4fb8cf4de54768e328577d307baa7ea0dfc | [] | no_license | EricSchles/humanTraffickingTalk | a1f4770c4380ea0424663baac79686be5b74733a | f399e6e6188601f34eab3fd8e7fc4a3ca30d9b14 | refs/heads/master | 2021-01-01T06:11:24.424134 | 2014-08-14T18:51:23 | 2014-08-14T18:51:23 | 14,879,906 | 17 | 5 | null | 2019-10-15T11:10:13 | 2013-12-03T01:15:11 | Python | UTF-8 | Python | false | false | 134 | py |
from scrapy.item import Item, Field
class craigslist4237915975Item(Item):
title = Field()
link = Field()
desc = Field()
| [
"[email protected]"
] | |
970feb65038f3cbb7891e048c6ec4edf3da55f5c | 27cd4886e5d08cca23bf36e24339ff1155b7db10 | /generators/adc_sar/BagModules/adc_sar_templates/TISARADC.py | 3397c883b0f4ee6cc3c04f5671d08c84f7ffb9ab | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | ucb-art/laygo | 8539accac6e9888122e8e0afd160d294ffb56bfc | 8f62ec1971480cb27cb592421fd97f590379cff9 | refs/heads/master | 2021-01-11T08:49:24.306674 | 2020-06-18T15:01:50 | 2020-06-18T15:01:50 | 194,750,788 | 24 | 9 | null | null | null | null | UTF-8 | Python | false | false | 19,542 | py | # -*- coding: utf-8 -*-
########################################################################################################################
#
# Copyright (c) 2014, Regents of the University of California
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
# following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following
# disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the
# following disclaimer in the documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
########################################################################################################################
from __future__ import (absolute_import, division,
print_function, unicode_literals)
# noinspection PyUnresolvedReferences,PyCompatibility
from builtins import *
import os
import pkg_resources
from bag.design import Module
yaml_file = pkg_resources.resource_filename(__name__, os.path.join('netlist_info', 'TISARADC.yaml'))
# noinspection PyPep8Naming
class adc_sar_templates__TISARADC(Module):
"""Module for library adc_sar_templates cell TISARADC.
Fill in high level description here.
"""
def __init__(self, bag_config, parent=None, prj=None, **kwargs):
Module.__init__(self, bag_config, yaml_file, parent=parent, prj=prj, **kwargs)
def design(self,
sar_lch,
sar_pw,
sar_nw,
sar_sa_m, sar_sa_m_d,
sar_sa_m_rst, sar_sa_m_rst_d,
sar_sa_m_rgnn, sar_sa_m_rgnp_d,
sar_sa_m_buf, doubleSA, sar_sa_m_smallrgnp,
vref_sf_m_mirror, vref_sf_m_bias, vref_sf_m_off, vref_sf_m_in, vref_sf_m_bias_dum, vref_sf_m_in_dum,
vref_sf_m_byp, vref_sf_m_byp_bias, vref_sf_bias_current, vref_sf,
sar_drv_m_list, sar_ckgen_m, sar_ckgen_fo,
sar_ckgen_ndelay, sar_ckgen_fast, sar_ckgen_muxfast,
sar_logic_m,
sar_fsm_m,
sar_ret_m,
sar_ret_fo,
sar_device_intent,
sar_c_m,
sar_rdx_array, sar_num_inv_bb,
samp_lch,
samp_wp,
samp_wn,
samp_fgn,
samp_fg_inbuf_list,
samp_fg_outbuf_list,
samp_nduml,
samp_ndumr,
samp_nsep,
samp_intent,
num_bits,
samp_use_laygo, samp_tgate,
sf_lch, sf_nw, sf_m_mirror, sf_m_bias, sf_m_off, sf_m_in, sf_m_bias_dum, sf_m_in_dum, sf_m_byp,
sf_m_byp_bias, sf_intent, bias_current, use_sf,
use_offset, num_slices,
clk_lch,
clk_pw,
clk_nw,
clk_cdac_bits,
clk_m_dff,
clk_m_inv1,
clk_m_inv2,
clk_m_tgate,
clk_n_pd,
clk_m_capsw,
clk_unit_cell,
clk_device_intent,
clk_pulse,
ret_lch,
ret_pw,
ret_nw,
ret_m_ibuf,
ret_m_obuf,
ret_m_latch,
ret_m_srbuf,
ret_m_sr,
ret_device_intent,
input_htree,
generate_dac,
space_msamp,
space_msar,
space_mdecap,
rdac_lch, rdac_pw, rdac_nw, rdac_m, rdac_m_bcap, rdac_num_series, rdac_num_bits, rdac_num_dacs, rdac_device_intent
):
"""To be overridden by subclasses to design this module.
This method should fill in values for all parameters in
self.parameters. To design instances of this module, you can
call their design() method or any other ways you coded.
To modify schematic structure, call:
rename_pin()
delete_instance()
replace_instance_master()
reconnect_instance_terminal()
restore_instance()
array_instance()
"""
self.parameters['sar_lch'] = sar_lch
self.parameters['sar_pw'] = sar_pw
self.parameters['sar_nw'] = sar_nw
self.parameters['sar_sa_m'] = sar_sa_m
self.parameters['sar_sa_m_d'] = sar_sa_m_d
self.parameters['sar_sa_m_rst'] = sar_sa_m_rst
self.parameters['sar_sa_m_rst_d'] = sar_sa_m_rst
self.parameters['sar_sa_m_rgnn'] = sar_sa_m_rgnn
self.parameters['sar_sa_m_rgnp_d'] = sar_sa_m_rgnp_d
self.parameters['sar_sa_m_buf'] = sar_sa_m_buf
self.parameters['doubleSA'] = doubleSA
self.parameters['sar_sa_m_smallrgnp'] = sar_sa_m_smallrgnp
self.parameters['vref_sf_m_mirror'] = vref_sf_m_mirror
self.parameters['vref_sf_m_bias'] = vref_sf_m_bias
self.parameters['vref_sf_m_in'] = vref_sf_m_in
self.parameters['vref_sf_m_off'] = vref_sf_m_off
self.parameters['vref_sf_m_bias_dum'] = vref_sf_m_bias_dum
self.parameters['vref_sf_m_in_dum'] = vref_sf_m_in_dum
self.parameters['vref_sf_m_byp'] = vref_sf_m_byp
self.parameters['vref_sf_m_byp_bias'] = vref_sf_m_byp_bias
self.parameters['vref_sf_bias_current'] = vref_sf_bias_current
self.parameters['vref_sf'] = vref_sf
self.parameters['sar_drv_m_list'] = sar_drv_m_list
self.parameters['sar_ckgen_m'] = sar_ckgen_m
self.parameters['sar_ckgen_fo'] = sar_ckgen_fo
self.parameters['sar_ckgen_ndelay'] = sar_ckgen_ndelay
self.parameters['sar_ckgen_fast'] = sar_ckgen_fast
self.parameters['sar_ckgen_muxfast'] = sar_ckgen_muxfast
self.parameters['sar_logic_m'] = sar_logic_m
self.parameters['sar_fsm_m'] = sar_fsm_m
self.parameters['sar_ret_m'] = sar_ret_m
self.parameters['sar_ret_fo'] = sar_ret_fo
self.parameters['sar_num_inv_bb'] = sar_num_inv_bb
self.parameters['sar_device_intent'] = sar_device_intent
self.parameters['sar_c_m'] = sar_c_m
self.parameters['sar_rdx_array'] = sar_rdx_array
self.parameters['samp_lch'] = samp_lch
self.parameters['samp_wp'] = samp_wp
self.parameters['samp_wn'] = samp_wn
self.parameters['samp_fgn'] = samp_fgn
self.parameters['samp_fg_inbuf_list'] = samp_fg_inbuf_list
self.parameters['samp_fg_outbuf_list'] = samp_fg_outbuf_list
self.parameters['samp_nduml'] = samp_nduml
self.parameters['samp_ndumr'] = samp_ndumr
self.parameters['samp_nsep'] = samp_nsep
self.parameters['samp_intent'] = samp_intent
self.parameters['num_bits'] = num_bits
self.parameters['samp_tgate'] = samp_tgate
self.parameters['samp_use_laygo'] = samp_use_laygo # if true, use laygo for sampler generation
self.parameters['sf_lch'] = sf_lch
self.parameters['sf_nw'] = sf_nw
self.parameters['sf_m_mirror'] = sf_m_mirror
self.parameters['sf_m_bias'] = sf_m_bias
self.parameters['sf_m_in'] = sf_m_in
self.parameters['sf_m_off'] = sf_m_off
self.parameters['sf_m_bias_dum'] = sf_m_bias_dum
self.parameters['sf_m_in_dum'] = sf_m_in_dum
self.parameters['sf_m_byp'] = sf_m_byp
self.parameters['sf_m_byp_bias'] = sf_m_byp_bias
self.parameters['sf_intent'] = sf_intent
self.parameters['use_offset'] = use_offset
self.parameters['num_slices'] = num_slices
self.parameters['clk_lch'] = clk_lch
self.parameters['clk_pw'] = clk_pw
self.parameters['clk_nw'] = clk_nw
self.parameters['clk_cdac_bits'] = clk_cdac_bits
self.parameters['clk_m_dff'] = clk_m_dff
self.parameters['clk_m_inv1'] = clk_m_inv1
self.parameters['clk_m_inv2'] = clk_m_inv2
self.parameters['clk_m_tgate'] = clk_m_tgate
self.parameters['clk_n_pd'] = clk_n_pd
self.parameters['clk_m_capsw'] = clk_m_capsw
self.parameters['clk_unit_cell'] = clk_unit_cell
self.parameters['clk_device_intent'] = clk_device_intent
self.parameters['ret_lch'] = ret_lch
self.parameters['ret_pw'] = ret_pw
self.parameters['ret_nw'] = ret_nw
self.parameters['ret_m_ibuf'] = ret_m_ibuf
self.parameters['ret_m_obuf'] = ret_m_obuf
self.parameters['ret_m_latch'] = ret_m_latch
self.parameters['ret_device_intent'] = ret_device_intent
self.parameters['input_htree'] = input_htree
self.parameters['generate_dac'] = generate_dac
self.parameters['space_msamp'] = space_msamp
self.parameters['space_msar'] = space_msar
self.parameters['space_mdecap'] = space_mdecap
self.parameters['rdac_lch'] = rdac_lch
self.parameters['rdac_pw'] = rdac_pw
self.parameters['rdac_nw'] = rdac_nw
self.parameters['rdac_m'] = rdac_m
self.parameters['rdac_m_bcap'] = rdac_m_bcap
self.parameters['rdac_num_series'] = rdac_num_series
self.parameters['rdac_num_bits'] = rdac_num_bits
self.parameters['rdac_num_dacs'] = rdac_num_dacs
self.parameters['rdac_device_intent'] = rdac_device_intent
# tisaradc_body
if use_sf == True and vref_sf == True:
term_list = [{
','.join(['INP%d' % (i) for i in range(num_slices)]): 'INP',
','.join(['INM%d' % (i) for i in range(num_slices)]): 'INM',
','.join(['OSP%d' % (i) for i in range(num_slices)]):
','.join(['OSP%d' % (i) for i in range(num_slices)]),
','.join(['OSM%d' % (i) for i in range(num_slices)]):
','.join(['OSM%d' % (i) for i in range(num_slices)]),
','.join(['ASCLKD%d<3:0>' % (i) for i in range(num_slices)]):
','.join(['ASCLKD%d<3:0>' % (i) for i in range(num_slices)]),
','.join(['EXTSEL_CLK%d' % (i) for i in range(num_slices)]):
','.join(['EXTSEL_CLK%d' % (i) for i in range(num_slices)]),
','.join(['MODESEL%d' % (i) for i in range(num_slices)]):
','.join(['MODESEL%d' % (i) for i in range(num_slices)]),
','.join(['CLKCAL%d<4:0>' % i for i in range(num_slices)]):
','.join(['CLKCAL%d<4:0>' % i for i in range(num_slices)]),
','.join(['ADCOUT%d<%d:0>' % (i, num_bits - 1) for i in range(num_slices)]):
','.join(['ADCOUT%d<%d:0>' % (i, num_bits - 1) for i in range(num_slices)]),
','.join(['SF_Voffp%d' % (i) for i in range(num_slices)]):
'RDAC_OUT<0:%d>'%(num_slices-1),
','.join(['SF_Voffn%d' % (i) for i in range(num_slices)]):
'RDAC_OUT<%d:%d>' % ((num_slices), (num_slices*2 - 1)),
','.join(['SF_BIAS%d' % (i) for i in range(num_slices)]):
'RDAC_OUT<%d>' % (num_slices * 2),
','.join(['VREF_SF_BIAS%d' % (i) for i in range(num_slices)]):
'RDAC_OUT<%d>' % (rdac_num_dacs-1),
}]
elif use_offset == True:
term_list = [{
','.join(['INP%d' % (i) for i in range(num_slices)]): 'INP',
','.join(['INM%d' % (i) for i in range(num_slices)]): 'INM',
','.join(['OSP%d' % (i) for i in range(num_slices)]):
'RDAC_OUT<0:%d>'%(num_slices-1),
','.join(['OSM%d' % (i) for i in range(num_slices)]):
'RDAC_OUT<%d:%d>' % ((num_slices), (num_slices*2 - 1)),
','.join(['ASCLKD%d<3:0>' % (i) for i in range(num_slices)]):
','.join(['ASCLKD%d<3:0>' % (i) for i in range(num_slices)]),
','.join(['EXTSEL_CLK%d' % (i) for i in range(num_slices)]):
','.join(['EXTSEL_CLK%d' % (i) for i in range(num_slices)]),
','.join(['MODESEL%d' % (i) for i in range(num_slices)]):
','.join(['MODESEL%d' % (i) for i in range(num_slices)]),
','.join(['CLKCAL%d<4:0>' % i for i in range(num_slices)]):
','.join(['CLKCAL%d<4:0>' % i for i in range(num_slices)]),
','.join(['ADCOUT%d<%d:0>' % (i, num_bits - 1) for i in range(num_slices)]):
','.join(['ADCOUT%d<%d:0>' % (i, num_bits - 1) for i in range(num_slices)]),
}]
elif use_offset == False:
term_list = [{
','.join(['INP%d' % (i) for i in range(num_slices)]): 'INP',
','.join(['INM%d' % (i) for i in range(num_slices)]): 'INM',
','.join(['ASCLKD%d<3:0>' % (i) for i in range(num_slices)]):
','.join(['ASCLKD%d<3:0>' % (i) for i in range(num_slices)]),
','.join(['EXTSEL_CLK%d' % (i) for i in range(num_slices)]):
','.join(['EXTSEL_CLK%d' % (i) for i in range(num_slices)]),
','.join(['MODESEL%d' % (i) for i in range(num_slices)]):
','.join(['MODESEL%d' % (i) for i in range(num_slices)]),
','.join(['CLKCAL%d<4:0>' % i for i in range(num_slices)]):
','.join(['CLKCAL%d<4:0>' % i for i in range(num_slices)]),
','.join(['ADCOUT%d<%d:0>' % (i, num_bits - 1) for i in range(num_slices)]):
','.join(['ADCOUT%d<%d:0>' % (i, num_bits - 1) for i in range(num_slices)]),
}]
name_list=(['ISAR'])
self.array_instance('ISAR', name_list, term_list=term_list)
self.instances['ISAR'][0].design(
sar_lch,
sar_pw,
sar_nw,
sar_sa_m, sar_sa_m_d,
sar_sa_m_rst, sar_sa_m_rst_d,
sar_sa_m_rgnn, sar_sa_m_rgnp_d,
sar_sa_m_buf, doubleSA, sar_sa_m_smallrgnp,
vref_sf_m_mirror, vref_sf_m_bias, vref_sf_m_off, vref_sf_m_in, vref_sf_m_bias_dum, vref_sf_m_in_dum,
vref_sf_m_byp, vref_sf_m_byp_bias, vref_sf_bias_current, vref_sf,
sar_drv_m_list, sar_ckgen_m, sar_ckgen_fo,
sar_ckgen_ndelay, sar_ckgen_fast, sar_ckgen_muxfast,
sar_logic_m,
sar_fsm_m,
sar_ret_m,
sar_ret_fo,
sar_device_intent,
sar_c_m,
sar_rdx_array, sar_num_inv_bb,
samp_lch,
samp_wp,
samp_wn,
samp_fgn,
samp_fg_inbuf_list,
samp_fg_outbuf_list,
samp_nduml,
samp_ndumr,
samp_nsep,
samp_intent,
num_bits,
samp_use_laygo, samp_tgate,
sf_lch, sf_nw, sf_m_mirror, sf_m_bias, sf_m_off, sf_m_in, sf_m_bias_dum, sf_m_in_dum, sf_m_byp,
sf_m_byp_bias, sf_intent, bias_current, use_sf,
use_offset, num_slices,
clk_lch,
clk_pw,
clk_nw,
clk_cdac_bits,
clk_m_dff,
clk_m_inv1,
clk_m_inv2,
clk_m_tgate,
clk_n_pd,
clk_m_capsw,
clk_unit_cell,
clk_device_intent,
clk_pulse,
ret_lch,
ret_pw,
ret_nw,
ret_m_ibuf,
ret_m_obuf,
ret_m_latch,
ret_m_srbuf,
ret_m_sr,
ret_device_intent,
input_htree,
space_msamp,
space_msar,
space_mdecap,
)
# RDAC generation
if generate_dac:
self.instances['IRDAC'].design(rdac_lch, rdac_pw, rdac_nw, rdac_m, rdac_m_bcap, rdac_num_series,
rdac_num_bits, rdac_num_dacs, rdac_device_intent)
self.reconnect_instance_terminal(inst_name='IRDAC', term_name='out<%d:0>'%(rdac_num_dacs-1),
net_name='RDAC_OUT<%d:0>'%(rdac_num_dacs-1))
self.reconnect_instance_terminal(inst_name='IRDAC', term_name='SEL<%d:0>'%(rdac_num_dacs*rdac_num_bits-1),
net_name='RDAC_SEL<%d:0>'%(rdac_num_dacs*rdac_num_bits-1))
self.rename_pin('RDAC_SEL', 'RDAC_SEL<%d:0>'%(rdac_num_dacs*rdac_num_bits-1))
else:
self.delete_instance('IRDAC')
self.rename_pin('RDAC_SEL', 'RDAC_OUT<%d:0>'%(rdac_num_dacs-1))
self.rename_pin('CLKCAL', ','.join(['CLKCAL%d<4:0>'%i for i in range(num_slices)]))
self.rename_pin('ASCLKD<3:0>', ','.join(['ASCLKD%d<3:0>'%(i) for i in range(num_slices)]))
self.rename_pin('EXTSEL_CLK', ','.join(['EXTSEL_CLK%d'%(i) for i in range(num_slices)]))
self.rename_pin('ADCOUT', ','.join(['ADCOUT%d<%d:0>'%(i, num_bits-1) for i in range(num_slices)]))
if sar_ckgen_muxfast == True:
self.rename_pin('MODESEL', ','.join(['MODESEL%d'%(i) for i in range(num_slices)]))
else:
self.remove_pin('MODESEL')
if vref_sf == False:
self.remove_pin('VREF_SF_bypass')
#self.remove_pin(','.join(['VREF_SF_BIAS%d' % (i) for i in range(num_slices)]))
if use_sf == False:
self.remove_pin('SF_bypass')
#self.remove_pin(','.join(['SF_BIAS%d' % (i) for i in range(num_slices)]))
#self.remove_pin(','.join(['SF_Voffp%d' % (i) for i in range(num_slices)]))
#self.remove_pin(','.join(['SF_Voffn%d' % (i) for i in range(num_slices)]))
def get_layout_params(self, **kwargs):
"""Returns a dictionary with layout parameters.
This method computes the layout parameters used to generate implementation's
layout. Subclasses should override this method if you need to run post-extraction
layout.
Parameters
----------
kwargs :
any extra parameters you need to generate the layout parameters dictionary.
Usually you specify layout-specific parameters here, like metal layers of
input/output, customizable wire sizes, and so on.
Returns
-------
params : dict[str, any]
the layout parameters dictionary.
"""
return {}
def get_layout_pin_mapping(self):
"""Returns the layout pin mapping dictionary.
This method returns a dictionary used to rename the layout pins, in case they are different
than the schematic pins.
Returns
-------
pin_mapping : dict[str, str]
a dictionary from layout pin names to schematic pin names.
"""
return {}
| [
"[email protected]"
] | |
6e907d99daab017e865c8e55609d42b30531e01b | d554b1aa8b70fddf81da8988b4aaa43788fede88 | /5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/222/users/4069/codes/1578_1331.py | f6e61de10d8ff19b5aa7a45b3dc1f2599615276b | [] | no_license | JosephLevinthal/Research-projects | a3bc3ca3b09faad16f5cce5949a2279cf14742ba | 60d5fd6eb864a5181f4321e7a992812f3c2139f9 | refs/heads/master | 2022-07-31T06:43:02.686109 | 2020-05-23T00:24:26 | 2020-05-23T00:24:26 | 266,199,309 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 30 | py | x = 50//3
y = 50%3
print(x, y) | [
"[email protected]"
] | |
ce9503d82749c331998242ffc827e8f6aa81c3bb | 1a8583a07b710a2b8d3344bddb3aa5dd2abc9547 | /fpn/symbols/resnet_v1_101_fpn_rcnn_sod_l0_focal_v4.py | d103e0f2366990e847198ebfde760930a3092875 | [
"MIT"
] | permissive | qilei123/sod_v1_demo | 9fec1377609acaa2c04ced0008208ecabce3f53e | a38f76e5a3af13f8f16d32aa40369f1a4f4fd839 | refs/heads/master | 2020-05-04T18:27:50.655652 | 2019-07-02T01:39:58 | 2019-07-02T01:39:58 | 179,354,251 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 92,739 | py | # --------------------------------------------------------
# Deformable Convolutional Networks
# Copyright (c) 2017 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Haozhi Qi
# --------------------------------------------------------
import cPickle
import mxnet as mx
from utils.symbol import Symbol
from operator_py.pyramid_proposal import *
from operator_py.proposal_target import *
from operator_py.fpn_roi_pooling import *
from operator_py.box_annotator_ohem import *
from operator_py.focal_loss_OptimizedVersion import *
class resnet_v1_101_fpn_rcnn_sod_l0_focal_v4(Symbol):
def __init__(self):
"""
Use __init__ to define parameter network needs
"""
self.shared_param_list = ['rpn_conv', 'rpn_cls_score', 'rpn_bbox_pred']
self.shared_param_dict = {}
for name in self.shared_param_list:
self.shared_param_dict[name + '_weight'] = mx.sym.Variable(name + '_weight')
self.shared_param_dict[name + '_bias'] = mx.sym.Variable(name + '_bias')
def get_resnet_backbone(self, data, with_dilated=False, with_dconv=False, with_dpyramid=False, eps=1e-5):
conv1 = mx.symbol.Convolution(name='conv1', data=data, num_filter=64, pad=(3, 3), kernel=(7, 7), stride=(2, 2), no_bias=True)
bn_conv1 = mx.symbol.BatchNorm(name='bn_conv1', data=conv1, use_global_stats=True, fix_gamma=False, eps=eps)
scale_conv1 = bn_conv1
conv1_relu = mx.symbol.Activation(name='conv1_relu', data=scale_conv1, act_type='relu')
pool1 = mx.symbol.Pooling(name='pool1', data=conv1_relu, pooling_convention='full', pad=(0, 0), kernel=(3, 3), stride=(2, 2), pool_type='max')
res2a_branch1 = mx.symbol.Convolution(name='res2a_branch1', data=pool1, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2a_branch1 = mx.symbol.BatchNorm(name='bn2a_branch1', data=res2a_branch1, use_global_stats=True, fix_gamma=False, eps=eps)
scale2a_branch1 = bn2a_branch1
res2a_branch2a = mx.symbol.Convolution(name='res2a_branch2a', data=pool1, num_filter=64, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2a_branch2a = mx.symbol.BatchNorm(name='bn2a_branch2a', data=res2a_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale2a_branch2a = bn2a_branch2a
res2a_branch2a_relu = mx.symbol.Activation(name='res2a_branch2a_relu', data=scale2a_branch2a, act_type='relu')
res2a_branch2b = mx.symbol.Convolution(name='res2a_branch2b', data=res2a_branch2a_relu, num_filter=64, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn2a_branch2b = mx.symbol.BatchNorm(name='bn2a_branch2b', data=res2a_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale2a_branch2b = bn2a_branch2b
res2a_branch2b_relu = mx.symbol.Activation(name='res2a_branch2b_relu', data=scale2a_branch2b, act_type='relu')
res2a_branch2c = mx.symbol.Convolution(name='res2a_branch2c', data=res2a_branch2b_relu, num_filter=256,
pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2a_branch2c = mx.symbol.BatchNorm(name='bn2a_branch2c', data=res2a_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale2a_branch2c = bn2a_branch2c
res2a = mx.symbol.broadcast_add(name='res2a', *[scale2a_branch1, scale2a_branch2c])
res2a_relu = mx.symbol.Activation(name='res2a_relu', data=res2a, act_type='relu')
res2b_branch2a = mx.symbol.Convolution(name='res2b_branch2a', data=res2a_relu, num_filter=64, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2b_branch2a = mx.symbol.BatchNorm(name='bn2b_branch2a', data=res2b_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale2b_branch2a = bn2b_branch2a
res2b_branch2a_relu = mx.symbol.Activation(name='res2b_branch2a_relu', data=scale2b_branch2a, act_type='relu')
res2b_branch2b = mx.symbol.Convolution(name='res2b_branch2b', data=res2b_branch2a_relu, num_filter=64,
pad=(1, 1),
kernel=(3, 3), stride=(1, 1), no_bias=True)
bn2b_branch2b = mx.symbol.BatchNorm(name='bn2b_branch2b', data=res2b_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale2b_branch2b = bn2b_branch2b
res2b_branch2b_relu = mx.symbol.Activation(name='res2b_branch2b_relu', data=scale2b_branch2b, act_type='relu')
res2b_branch2c = mx.symbol.Convolution(name='res2b_branch2c', data=res2b_branch2b_relu, num_filter=256,
pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2b_branch2c = mx.symbol.BatchNorm(name='bn2b_branch2c', data=res2b_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale2b_branch2c = bn2b_branch2c
res2b = mx.symbol.broadcast_add(name='res2b', *[res2a_relu, scale2b_branch2c])
res2b_relu = mx.symbol.Activation(name='res2b_relu', data=res2b, act_type='relu')
res2c_branch2a = mx.symbol.Convolution(name='res2c_branch2a', data=res2b_relu, num_filter=64, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2c_branch2a = mx.symbol.BatchNorm(name='bn2c_branch2a', data=res2c_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale2c_branch2a = bn2c_branch2a
res2c_branch2a_relu = mx.symbol.Activation(name='res2c_branch2a_relu', data=scale2c_branch2a, act_type='relu')
res2c_branch2b = mx.symbol.Convolution(name='res2c_branch2b', data=res2c_branch2a_relu, num_filter=64,
pad=(1, 1),
kernel=(3, 3), stride=(1, 1), no_bias=True)
bn2c_branch2b = mx.symbol.BatchNorm(name='bn2c_branch2b', data=res2c_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale2c_branch2b = bn2c_branch2b
res2c_branch2b_relu = mx.symbol.Activation(name='res2c_branch2b_relu', data=scale2c_branch2b, act_type='relu')
res2c_branch2c = mx.symbol.Convolution(name='res2c_branch2c', data=res2c_branch2b_relu, num_filter=256,
pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2c_branch2c = mx.symbol.BatchNorm(name='bn2c_branch2c', data=res2c_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale2c_branch2c = bn2c_branch2c
res2c = mx.symbol.broadcast_add(name='res2c', *[res2b_relu, scale2c_branch2c])
res2c_relu = mx.symbol.Activation(name='res2c_relu', data=res2c, act_type='relu')
res3a_branch1 = mx.symbol.Convolution(name='res3a_branch1', data=res2c_relu, num_filter=512, pad=(0, 0),
kernel=(1, 1), stride=(2, 2), no_bias=True)
bn3a_branch1 = mx.symbol.BatchNorm(name='bn3a_branch1', data=res3a_branch1, use_global_stats=True,
fix_gamma=False, eps=eps)
scale3a_branch1 = bn3a_branch1
res3a_branch2a = mx.symbol.Convolution(name='res3a_branch2a', data=res2c_relu, num_filter=128, pad=(0, 0),
kernel=(1, 1), stride=(2, 2), no_bias=True)
bn3a_branch2a = mx.symbol.BatchNorm(name='bn3a_branch2a', data=res3a_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale3a_branch2a = bn3a_branch2a
res3a_branch2a_relu = mx.symbol.Activation(name='res3a_branch2a_relu', data=scale3a_branch2a, act_type='relu')
res3a_branch2b = mx.symbol.Convolution(name='res3a_branch2b', data=res3a_branch2a_relu, num_filter=128,
pad=(1, 1),
kernel=(3, 3), stride=(1, 1), no_bias=True)
bn3a_branch2b = mx.symbol.BatchNorm(name='bn3a_branch2b', data=res3a_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale3a_branch2b = bn3a_branch2b
res3a_branch2b_relu = mx.symbol.Activation(name='res3a_branch2b_relu', data=scale3a_branch2b, act_type='relu')
res3a_branch2c = mx.symbol.Convolution(name='res3a_branch2c', data=res3a_branch2b_relu, num_filter=512,
pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3a_branch2c = mx.symbol.BatchNorm(name='bn3a_branch2c', data=res3a_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale3a_branch2c = bn3a_branch2c
res3a = mx.symbol.broadcast_add(name='res3a', *[scale3a_branch1, scale3a_branch2c])
res3a_relu = mx.symbol.Activation(name='res3a_relu', data=res3a, act_type='relu')
res3b1_branch2a = mx.symbol.Convolution(name='res3b1_branch2a', data=res3a_relu, num_filter=128, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3b1_branch2a = mx.symbol.BatchNorm(name='bn3b1_branch2a', data=res3b1_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale3b1_branch2a = bn3b1_branch2a
res3b1_branch2a_relu = mx.symbol.Activation(name='res3b1_branch2a_relu', data=scale3b1_branch2a,
act_type='relu')
res3b1_branch2b = mx.symbol.Convolution(name='res3b1_branch2b', data=res3b1_branch2a_relu, num_filter=128,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn3b1_branch2b = mx.symbol.BatchNorm(name='bn3b1_branch2b', data=res3b1_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale3b1_branch2b = bn3b1_branch2b
res3b1_branch2b_relu = mx.symbol.Activation(name='res3b1_branch2b_relu', data=scale3b1_branch2b,
act_type='relu')
res3b1_branch2c = mx.symbol.Convolution(name='res3b1_branch2c', data=res3b1_branch2b_relu, num_filter=512,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3b1_branch2c = mx.symbol.BatchNorm(name='bn3b1_branch2c', data=res3b1_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale3b1_branch2c = bn3b1_branch2c
res3b1 = mx.symbol.broadcast_add(name='res3b1', *[res3a_relu, scale3b1_branch2c])
res3b1_relu = mx.symbol.Activation(name='res3b1_relu', data=res3b1, act_type='relu')
res3b2_branch2a = mx.symbol.Convolution(name='res3b2_branch2a', data=res3b1_relu, num_filter=128, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3b2_branch2a = mx.symbol.BatchNorm(name='bn3b2_branch2a', data=res3b2_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale3b2_branch2a = bn3b2_branch2a
res3b2_branch2a_relu = mx.symbol.Activation(name='res3b2_branch2a_relu', data=scale3b2_branch2a,
act_type='relu')
res3b2_branch2b = mx.symbol.Convolution(name='res3b2_branch2b', data=res3b2_branch2a_relu, num_filter=128,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn3b2_branch2b = mx.symbol.BatchNorm(name='bn3b2_branch2b', data=res3b2_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale3b2_branch2b = bn3b2_branch2b
res3b2_branch2b_relu = mx.symbol.Activation(name='res3b2_branch2b_relu', data=scale3b2_branch2b,
act_type='relu')
res3b2_branch2c = mx.symbol.Convolution(name='res3b2_branch2c', data=res3b2_branch2b_relu, num_filter=512,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3b2_branch2c = mx.symbol.BatchNorm(name='bn3b2_branch2c', data=res3b2_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale3b2_branch2c = bn3b2_branch2c
res3b2 = mx.symbol.broadcast_add(name='res3b2', *[res3b1_relu, scale3b2_branch2c])
res3b2_relu = mx.symbol.Activation(name='res3b2_relu', data=res3b2, act_type='relu')
res3b3_branch2a = mx.symbol.Convolution(name='res3b3_branch2a', data=res3b2_relu, num_filter=128, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3b3_branch2a = mx.symbol.BatchNorm(name='bn3b3_branch2a', data=res3b3_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale3b3_branch2a = bn3b3_branch2a
res3b3_branch2a_relu = mx.symbol.Activation(name='res3b3_branch2a_relu', data=scale3b3_branch2a,
act_type='relu')
if with_dpyramid:
res3b3_branch2b_offset = mx.symbol.Convolution(name='res3b3_branch2b_offset', data=res3b3_branch2a_relu,
num_filter=72, pad=(1, 1), kernel=(3, 3), stride=(1, 1))
res3b3_branch2b = mx.contrib.symbol.DeformableConvolution(name='res3b3_branch2b', data=res3b3_branch2a_relu,
offset=res3b3_branch2b_offset,
num_filter=128, pad=(1, 1), kernel=(3, 3),
num_deformable_group=4,
stride=(1, 1), no_bias=True)
else:
res3b3_branch2b = mx.symbol.Convolution(name='res3b3_branch2b', data=res3b3_branch2a_relu, num_filter=128,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn3b3_branch2b = mx.symbol.BatchNorm(name='bn3b3_branch2b', data=res3b3_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale3b3_branch2b = bn3b3_branch2b
res3b3_branch2b_relu = mx.symbol.Activation(name='res3b3_branch2b_relu', data=scale3b3_branch2b,
act_type='relu')
res3b3_branch2c = mx.symbol.Convolution(name='res3b3_branch2c', data=res3b3_branch2b_relu, num_filter=512,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3b3_branch2c = mx.symbol.BatchNorm(name='bn3b3_branch2c', data=res3b3_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale3b3_branch2c = bn3b3_branch2c
res3b3 = mx.symbol.broadcast_add(name='res3b3', *[res3b2_relu, scale3b3_branch2c])
res3b3_relu = mx.symbol.Activation(name='res3b3_relu', data=res3b3, act_type='relu')
res4a_branch1 = mx.symbol.Convolution(name='res4a_branch1', data=res3b3_relu, num_filter=1024, pad=(0, 0),
kernel=(1, 1), stride=(2, 2), no_bias=True)
bn4a_branch1 = mx.symbol.BatchNorm(name='bn4a_branch1', data=res4a_branch1, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4a_branch1 = bn4a_branch1
res4a_branch2a = mx.symbol.Convolution(name='res4a_branch2a', data=res3b3_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(2, 2), no_bias=True)
bn4a_branch2a = mx.symbol.BatchNorm(name='bn4a_branch2a', data=res4a_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4a_branch2a = bn4a_branch2a
res4a_branch2a_relu = mx.symbol.Activation(name='res4a_branch2a_relu', data=scale4a_branch2a, act_type='relu')
res4a_branch2b = mx.symbol.Convolution(name='res4a_branch2b', data=res4a_branch2a_relu, num_filter=256,
pad=(1, 1),
kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4a_branch2b = mx.symbol.BatchNorm(name='bn4a_branch2b', data=res4a_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4a_branch2b = bn4a_branch2b
res4a_branch2b_relu = mx.symbol.Activation(name='res4a_branch2b_relu', data=scale4a_branch2b, act_type='relu')
res4a_branch2c = mx.symbol.Convolution(name='res4a_branch2c', data=res4a_branch2b_relu, num_filter=1024,
pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4a_branch2c = mx.symbol.BatchNorm(name='bn4a_branch2c', data=res4a_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4a_branch2c = bn4a_branch2c
res4a = mx.symbol.broadcast_add(name='res4a', *[scale4a_branch1, scale4a_branch2c])
res4a_relu = mx.symbol.Activation(name='res4a_relu', data=res4a, act_type='relu')
res4b1_branch2a = mx.symbol.Convolution(name='res4b1_branch2a', data=res4a_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b1_branch2a = mx.symbol.BatchNorm(name='bn4b1_branch2a', data=res4b1_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b1_branch2a = bn4b1_branch2a
res4b1_branch2a_relu = mx.symbol.Activation(name='res4b1_branch2a_relu', data=scale4b1_branch2a,
act_type='relu')
res4b1_branch2b = mx.symbol.Convolution(name='res4b1_branch2b', data=res4b1_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b1_branch2b = mx.symbol.BatchNorm(name='bn4b1_branch2b', data=res4b1_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b1_branch2b = bn4b1_branch2b
res4b1_branch2b_relu = mx.symbol.Activation(name='res4b1_branch2b_relu', data=scale4b1_branch2b,
act_type='relu')
res4b1_branch2c = mx.symbol.Convolution(name='res4b1_branch2c', data=res4b1_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b1_branch2c = mx.symbol.BatchNorm(name='bn4b1_branch2c', data=res4b1_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b1_branch2c = bn4b1_branch2c
res4b1 = mx.symbol.broadcast_add(name='res4b1', *[res4a_relu, scale4b1_branch2c])
res4b1_relu = mx.symbol.Activation(name='res4b1_relu', data=res4b1, act_type='relu')
res4b2_branch2a = mx.symbol.Convolution(name='res4b2_branch2a', data=res4b1_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b2_branch2a = mx.symbol.BatchNorm(name='bn4b2_branch2a', data=res4b2_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b2_branch2a = bn4b2_branch2a
res4b2_branch2a_relu = mx.symbol.Activation(name='res4b2_branch2a_relu', data=scale4b2_branch2a,
act_type='relu')
res4b2_branch2b = mx.symbol.Convolution(name='res4b2_branch2b', data=res4b2_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b2_branch2b = mx.symbol.BatchNorm(name='bn4b2_branch2b', data=res4b2_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b2_branch2b = bn4b2_branch2b
res4b2_branch2b_relu = mx.symbol.Activation(name='res4b2_branch2b_relu', data=scale4b2_branch2b,
act_type='relu')
res4b2_branch2c = mx.symbol.Convolution(name='res4b2_branch2c', data=res4b2_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b2_branch2c = mx.symbol.BatchNorm(name='bn4b2_branch2c', data=res4b2_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b2_branch2c = bn4b2_branch2c
res4b2 = mx.symbol.broadcast_add(name='res4b2', *[res4b1_relu, scale4b2_branch2c])
res4b2_relu = mx.symbol.Activation(name='res4b2_relu', data=res4b2, act_type='relu')
res4b3_branch2a = mx.symbol.Convolution(name='res4b3_branch2a', data=res4b2_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b3_branch2a = mx.symbol.BatchNorm(name='bn4b3_branch2a', data=res4b3_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b3_branch2a = bn4b3_branch2a
res4b3_branch2a_relu = mx.symbol.Activation(name='res4b3_branch2a_relu', data=scale4b3_branch2a,
act_type='relu')
res4b3_branch2b = mx.symbol.Convolution(name='res4b3_branch2b', data=res4b3_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b3_branch2b = mx.symbol.BatchNorm(name='bn4b3_branch2b', data=res4b3_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b3_branch2b = bn4b3_branch2b
res4b3_branch2b_relu = mx.symbol.Activation(name='res4b3_branch2b_relu', data=scale4b3_branch2b,
act_type='relu')
res4b3_branch2c = mx.symbol.Convolution(name='res4b3_branch2c', data=res4b3_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b3_branch2c = mx.symbol.BatchNorm(name='bn4b3_branch2c', data=res4b3_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b3_branch2c = bn4b3_branch2c
res4b3 = mx.symbol.broadcast_add(name='res4b3', *[res4b2_relu, scale4b3_branch2c])
res4b3_relu = mx.symbol.Activation(name='res4b3_relu', data=res4b3, act_type='relu')
res4b4_branch2a = mx.symbol.Convolution(name='res4b4_branch2a', data=res4b3_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b4_branch2a = mx.symbol.BatchNorm(name='bn4b4_branch2a', data=res4b4_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b4_branch2a = bn4b4_branch2a
res4b4_branch2a_relu = mx.symbol.Activation(name='res4b4_branch2a_relu', data=scale4b4_branch2a,
act_type='relu')
res4b4_branch2b = mx.symbol.Convolution(name='res4b4_branch2b', data=res4b4_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b4_branch2b = mx.symbol.BatchNorm(name='bn4b4_branch2b', data=res4b4_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b4_branch2b = bn4b4_branch2b
res4b4_branch2b_relu = mx.symbol.Activation(name='res4b4_branch2b_relu', data=scale4b4_branch2b,
act_type='relu')
res4b4_branch2c = mx.symbol.Convolution(name='res4b4_branch2c', data=res4b4_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b4_branch2c = mx.symbol.BatchNorm(name='bn4b4_branch2c', data=res4b4_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b4_branch2c = bn4b4_branch2c
res4b4 = mx.symbol.broadcast_add(name='res4b4', *[res4b3_relu, scale4b4_branch2c])
res4b4_relu = mx.symbol.Activation(name='res4b4_relu', data=res4b4, act_type='relu')
res4b5_branch2a = mx.symbol.Convolution(name='res4b5_branch2a', data=res4b4_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b5_branch2a = mx.symbol.BatchNorm(name='bn4b5_branch2a', data=res4b5_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b5_branch2a = bn4b5_branch2a
res4b5_branch2a_relu = mx.symbol.Activation(name='res4b5_branch2a_relu', data=scale4b5_branch2a,
act_type='relu')
res4b5_branch2b = mx.symbol.Convolution(name='res4b5_branch2b', data=res4b5_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b5_branch2b = mx.symbol.BatchNorm(name='bn4b5_branch2b', data=res4b5_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b5_branch2b = bn4b5_branch2b
res4b5_branch2b_relu = mx.symbol.Activation(name='res4b5_branch2b_relu', data=scale4b5_branch2b,
act_type='relu')
res4b5_branch2c = mx.symbol.Convolution(name='res4b5_branch2c', data=res4b5_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b5_branch2c = mx.symbol.BatchNorm(name='bn4b5_branch2c', data=res4b5_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b5_branch2c = bn4b5_branch2c
res4b5 = mx.symbol.broadcast_add(name='res4b5', *[res4b4_relu, scale4b5_branch2c])
res4b5_relu = mx.symbol.Activation(name='res4b5_relu', data=res4b5, act_type='relu')
res4b6_branch2a = mx.symbol.Convolution(name='res4b6_branch2a', data=res4b5_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b6_branch2a = mx.symbol.BatchNorm(name='bn4b6_branch2a', data=res4b6_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b6_branch2a = bn4b6_branch2a
res4b6_branch2a_relu = mx.symbol.Activation(name='res4b6_branch2a_relu', data=scale4b6_branch2a,
act_type='relu')
res4b6_branch2b = mx.symbol.Convolution(name='res4b6_branch2b', data=res4b6_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b6_branch2b = mx.symbol.BatchNorm(name='bn4b6_branch2b', data=res4b6_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b6_branch2b = bn4b6_branch2b
res4b6_branch2b_relu = mx.symbol.Activation(name='res4b6_branch2b_relu', data=scale4b6_branch2b,
act_type='relu')
res4b6_branch2c = mx.symbol.Convolution(name='res4b6_branch2c', data=res4b6_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b6_branch2c = mx.symbol.BatchNorm(name='bn4b6_branch2c', data=res4b6_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b6_branch2c = bn4b6_branch2c
res4b6 = mx.symbol.broadcast_add(name='res4b6', *[res4b5_relu, scale4b6_branch2c])
res4b6_relu = mx.symbol.Activation(name='res4b6_relu', data=res4b6, act_type='relu')
res4b7_branch2a = mx.symbol.Convolution(name='res4b7_branch2a', data=res4b6_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b7_branch2a = mx.symbol.BatchNorm(name='bn4b7_branch2a', data=res4b7_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b7_branch2a = bn4b7_branch2a
res4b7_branch2a_relu = mx.symbol.Activation(name='res4b7_branch2a_relu', data=scale4b7_branch2a,
act_type='relu')
res4b7_branch2b = mx.symbol.Convolution(name='res4b7_branch2b', data=res4b7_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b7_branch2b = mx.symbol.BatchNorm(name='bn4b7_branch2b', data=res4b7_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b7_branch2b = bn4b7_branch2b
res4b7_branch2b_relu = mx.symbol.Activation(name='res4b7_branch2b_relu', data=scale4b7_branch2b,
act_type='relu')
res4b7_branch2c = mx.symbol.Convolution(name='res4b7_branch2c', data=res4b7_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b7_branch2c = mx.symbol.BatchNorm(name='bn4b7_branch2c', data=res4b7_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b7_branch2c = bn4b7_branch2c
res4b7 = mx.symbol.broadcast_add(name='res4b7', *[res4b6_relu, scale4b7_branch2c])
res4b7_relu = mx.symbol.Activation(name='res4b7_relu', data=res4b7, act_type='relu')
res4b8_branch2a = mx.symbol.Convolution(name='res4b8_branch2a', data=res4b7_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b8_branch2a = mx.symbol.BatchNorm(name='bn4b8_branch2a', data=res4b8_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b8_branch2a = bn4b8_branch2a
res4b8_branch2a_relu = mx.symbol.Activation(name='res4b8_branch2a_relu', data=scale4b8_branch2a,
act_type='relu')
res4b8_branch2b = mx.symbol.Convolution(name='res4b8_branch2b', data=res4b8_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b8_branch2b = mx.symbol.BatchNorm(name='bn4b8_branch2b', data=res4b8_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b8_branch2b = bn4b8_branch2b
res4b8_branch2b_relu = mx.symbol.Activation(name='res4b8_branch2b_relu', data=scale4b8_branch2b,
act_type='relu')
res4b8_branch2c = mx.symbol.Convolution(name='res4b8_branch2c', data=res4b8_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b8_branch2c = mx.symbol.BatchNorm(name='bn4b8_branch2c', data=res4b8_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b8_branch2c = bn4b8_branch2c
res4b8 = mx.symbol.broadcast_add(name='res4b8', *[res4b7_relu, scale4b8_branch2c])
res4b8_relu = mx.symbol.Activation(name='res4b8_relu', data=res4b8, act_type='relu')
res4b9_branch2a = mx.symbol.Convolution(name='res4b9_branch2a', data=res4b8_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b9_branch2a = mx.symbol.BatchNorm(name='bn4b9_branch2a', data=res4b9_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b9_branch2a = bn4b9_branch2a
res4b9_branch2a_relu = mx.symbol.Activation(name='res4b9_branch2a_relu', data=scale4b9_branch2a,
act_type='relu')
res4b9_branch2b = mx.symbol.Convolution(name='res4b9_branch2b', data=res4b9_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b9_branch2b = mx.symbol.BatchNorm(name='bn4b9_branch2b', data=res4b9_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b9_branch2b = bn4b9_branch2b
res4b9_branch2b_relu = mx.symbol.Activation(name='res4b9_branch2b_relu', data=scale4b9_branch2b,
act_type='relu')
res4b9_branch2c = mx.symbol.Convolution(name='res4b9_branch2c', data=res4b9_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b9_branch2c = mx.symbol.BatchNorm(name='bn4b9_branch2c', data=res4b9_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b9_branch2c = bn4b9_branch2c
res4b9 = mx.symbol.broadcast_add(name='res4b9', *[res4b8_relu, scale4b9_branch2c])
res4b9_relu = mx.symbol.Activation(name='res4b9_relu', data=res4b9, act_type='relu')
res4b10_branch2a = mx.symbol.Convolution(name='res4b10_branch2a', data=res4b9_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b10_branch2a = mx.symbol.BatchNorm(name='bn4b10_branch2a', data=res4b10_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b10_branch2a = bn4b10_branch2a
res4b10_branch2a_relu = mx.symbol.Activation(name='res4b10_branch2a_relu', data=scale4b10_branch2a,
act_type='relu')
res4b10_branch2b = mx.symbol.Convolution(name='res4b10_branch2b', data=res4b10_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b10_branch2b = mx.symbol.BatchNorm(name='bn4b10_branch2b', data=res4b10_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b10_branch2b = bn4b10_branch2b
res4b10_branch2b_relu = mx.symbol.Activation(name='res4b10_branch2b_relu', data=scale4b10_branch2b,
act_type='relu')
res4b10_branch2c = mx.symbol.Convolution(name='res4b10_branch2c', data=res4b10_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b10_branch2c = mx.symbol.BatchNorm(name='bn4b10_branch2c', data=res4b10_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b10_branch2c = bn4b10_branch2c
res4b10 = mx.symbol.broadcast_add(name='res4b10', *[res4b9_relu, scale4b10_branch2c])
res4b10_relu = mx.symbol.Activation(name='res4b10_relu', data=res4b10, act_type='relu')
res4b11_branch2a = mx.symbol.Convolution(name='res4b11_branch2a', data=res4b10_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b11_branch2a = mx.symbol.BatchNorm(name='bn4b11_branch2a', data=res4b11_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b11_branch2a = bn4b11_branch2a
res4b11_branch2a_relu = mx.symbol.Activation(name='res4b11_branch2a_relu', data=scale4b11_branch2a,
act_type='relu')
res4b11_branch2b = mx.symbol.Convolution(name='res4b11_branch2b', data=res4b11_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b11_branch2b = mx.symbol.BatchNorm(name='bn4b11_branch2b', data=res4b11_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b11_branch2b = bn4b11_branch2b
res4b11_branch2b_relu = mx.symbol.Activation(name='res4b11_branch2b_relu', data=scale4b11_branch2b,
act_type='relu')
res4b11_branch2c = mx.symbol.Convolution(name='res4b11_branch2c', data=res4b11_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b11_branch2c = mx.symbol.BatchNorm(name='bn4b11_branch2c', data=res4b11_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b11_branch2c = bn4b11_branch2c
res4b11 = mx.symbol.broadcast_add(name='res4b11', *[res4b10_relu, scale4b11_branch2c])
res4b11_relu = mx.symbol.Activation(name='res4b11_relu', data=res4b11, act_type='relu')
res4b12_branch2a = mx.symbol.Convolution(name='res4b12_branch2a', data=res4b11_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b12_branch2a = mx.symbol.BatchNorm(name='bn4b12_branch2a', data=res4b12_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b12_branch2a = bn4b12_branch2a
res4b12_branch2a_relu = mx.symbol.Activation(name='res4b12_branch2a_relu', data=scale4b12_branch2a,
act_type='relu')
res4b12_branch2b = mx.symbol.Convolution(name='res4b12_branch2b', data=res4b12_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b12_branch2b = mx.symbol.BatchNorm(name='bn4b12_branch2b', data=res4b12_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b12_branch2b = bn4b12_branch2b
res4b12_branch2b_relu = mx.symbol.Activation(name='res4b12_branch2b_relu', data=scale4b12_branch2b,
act_type='relu')
res4b12_branch2c = mx.symbol.Convolution(name='res4b12_branch2c', data=res4b12_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b12_branch2c = mx.symbol.BatchNorm(name='bn4b12_branch2c', data=res4b12_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b12_branch2c = bn4b12_branch2c
res4b12 = mx.symbol.broadcast_add(name='res4b12', *[res4b11_relu, scale4b12_branch2c])
res4b12_relu = mx.symbol.Activation(name='res4b12_relu', data=res4b12, act_type='relu')
res4b13_branch2a = mx.symbol.Convolution(name='res4b13_branch2a', data=res4b12_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b13_branch2a = mx.symbol.BatchNorm(name='bn4b13_branch2a', data=res4b13_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b13_branch2a = bn4b13_branch2a
res4b13_branch2a_relu = mx.symbol.Activation(name='res4b13_branch2a_relu', data=scale4b13_branch2a,
act_type='relu')
res4b13_branch2b = mx.symbol.Convolution(name='res4b13_branch2b', data=res4b13_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b13_branch2b = mx.symbol.BatchNorm(name='bn4b13_branch2b', data=res4b13_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b13_branch2b = bn4b13_branch2b
res4b13_branch2b_relu = mx.symbol.Activation(name='res4b13_branch2b_relu', data=scale4b13_branch2b,
act_type='relu')
res4b13_branch2c = mx.symbol.Convolution(name='res4b13_branch2c', data=res4b13_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b13_branch2c = mx.symbol.BatchNorm(name='bn4b13_branch2c', data=res4b13_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b13_branch2c = bn4b13_branch2c
res4b13 = mx.symbol.broadcast_add(name='res4b13', *[res4b12_relu, scale4b13_branch2c])
res4b13_relu = mx.symbol.Activation(name='res4b13_relu', data=res4b13, act_type='relu')
res4b14_branch2a = mx.symbol.Convolution(name='res4b14_branch2a', data=res4b13_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b14_branch2a = mx.symbol.BatchNorm(name='bn4b14_branch2a', data=res4b14_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b14_branch2a = bn4b14_branch2a
res4b14_branch2a_relu = mx.symbol.Activation(name='res4b14_branch2a_relu', data=scale4b14_branch2a,
act_type='relu')
res4b14_branch2b = mx.symbol.Convolution(name='res4b14_branch2b', data=res4b14_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b14_branch2b = mx.symbol.BatchNorm(name='bn4b14_branch2b', data=res4b14_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b14_branch2b = bn4b14_branch2b
res4b14_branch2b_relu = mx.symbol.Activation(name='res4b14_branch2b_relu', data=scale4b14_branch2b,
act_type='relu')
res4b14_branch2c = mx.symbol.Convolution(name='res4b14_branch2c', data=res4b14_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b14_branch2c = mx.symbol.BatchNorm(name='bn4b14_branch2c', data=res4b14_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b14_branch2c = bn4b14_branch2c
res4b14 = mx.symbol.broadcast_add(name='res4b14', *[res4b13_relu, scale4b14_branch2c])
res4b14_relu = mx.symbol.Activation(name='res4b14_relu', data=res4b14, act_type='relu')
res4b15_branch2a = mx.symbol.Convolution(name='res4b15_branch2a', data=res4b14_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b15_branch2a = mx.symbol.BatchNorm(name='bn4b15_branch2a', data=res4b15_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b15_branch2a = bn4b15_branch2a
res4b15_branch2a_relu = mx.symbol.Activation(name='res4b15_branch2a_relu', data=scale4b15_branch2a,
act_type='relu')
res4b15_branch2b = mx.symbol.Convolution(name='res4b15_branch2b', data=res4b15_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b15_branch2b = mx.symbol.BatchNorm(name='bn4b15_branch2b', data=res4b15_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b15_branch2b = bn4b15_branch2b
res4b15_branch2b_relu = mx.symbol.Activation(name='res4b15_branch2b_relu', data=scale4b15_branch2b,
act_type='relu')
res4b15_branch2c = mx.symbol.Convolution(name='res4b15_branch2c', data=res4b15_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b15_branch2c = mx.symbol.BatchNorm(name='bn4b15_branch2c', data=res4b15_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b15_branch2c = bn4b15_branch2c
res4b15 = mx.symbol.broadcast_add(name='res4b15', *[res4b14_relu, scale4b15_branch2c])
res4b15_relu = mx.symbol.Activation(name='res4b15_relu', data=res4b15, act_type='relu')
res4b16_branch2a = mx.symbol.Convolution(name='res4b16_branch2a', data=res4b15_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b16_branch2a = mx.symbol.BatchNorm(name='bn4b16_branch2a', data=res4b16_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b16_branch2a = bn4b16_branch2a
res4b16_branch2a_relu = mx.symbol.Activation(name='res4b16_branch2a_relu', data=scale4b16_branch2a,
act_type='relu')
res4b16_branch2b = mx.symbol.Convolution(name='res4b16_branch2b', data=res4b16_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b16_branch2b = mx.symbol.BatchNorm(name='bn4b16_branch2b', data=res4b16_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b16_branch2b = bn4b16_branch2b
res4b16_branch2b_relu = mx.symbol.Activation(name='res4b16_branch2b_relu', data=scale4b16_branch2b,
act_type='relu')
res4b16_branch2c = mx.symbol.Convolution(name='res4b16_branch2c', data=res4b16_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b16_branch2c = mx.symbol.BatchNorm(name='bn4b16_branch2c', data=res4b16_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b16_branch2c = bn4b16_branch2c
res4b16 = mx.symbol.broadcast_add(name='res4b16', *[res4b15_relu, scale4b16_branch2c])
res4b16_relu = mx.symbol.Activation(name='res4b16_relu', data=res4b16, act_type='relu')
res4b17_branch2a = mx.symbol.Convolution(name='res4b17_branch2a', data=res4b16_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b17_branch2a = mx.symbol.BatchNorm(name='bn4b17_branch2a', data=res4b17_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b17_branch2a = bn4b17_branch2a
res4b17_branch2a_relu = mx.symbol.Activation(name='res4b17_branch2a_relu', data=scale4b17_branch2a,
act_type='relu')
res4b17_branch2b = mx.symbol.Convolution(name='res4b17_branch2b', data=res4b17_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b17_branch2b = mx.symbol.BatchNorm(name='bn4b17_branch2b', data=res4b17_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b17_branch2b = bn4b17_branch2b
res4b17_branch2b_relu = mx.symbol.Activation(name='res4b17_branch2b_relu', data=scale4b17_branch2b,
act_type='relu')
res4b17_branch2c = mx.symbol.Convolution(name='res4b17_branch2c', data=res4b17_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b17_branch2c = mx.symbol.BatchNorm(name='bn4b17_branch2c', data=res4b17_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b17_branch2c = bn4b17_branch2c
res4b17 = mx.symbol.broadcast_add(name='res4b17', *[res4b16_relu, scale4b17_branch2c])
res4b17_relu = mx.symbol.Activation(name='res4b17_relu', data=res4b17, act_type='relu')
res4b18_branch2a = mx.symbol.Convolution(name='res4b18_branch2a', data=res4b17_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b18_branch2a = mx.symbol.BatchNorm(name='bn4b18_branch2a', data=res4b18_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b18_branch2a = bn4b18_branch2a
res4b18_branch2a_relu = mx.symbol.Activation(name='res4b18_branch2a_relu', data=scale4b18_branch2a,
act_type='relu')
res4b18_branch2b = mx.symbol.Convolution(name='res4b18_branch2b', data=res4b18_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b18_branch2b = mx.symbol.BatchNorm(name='bn4b18_branch2b', data=res4b18_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b18_branch2b = bn4b18_branch2b
res4b18_branch2b_relu = mx.symbol.Activation(name='res4b18_branch2b_relu', data=scale4b18_branch2b,
act_type='relu')
res4b18_branch2c = mx.symbol.Convolution(name='res4b18_branch2c', data=res4b18_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b18_branch2c = mx.symbol.BatchNorm(name='bn4b18_branch2c', data=res4b18_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b18_branch2c = bn4b18_branch2c
res4b18 = mx.symbol.broadcast_add(name='res4b18', *[res4b17_relu, scale4b18_branch2c])
res4b18_relu = mx.symbol.Activation(name='res4b18_relu', data=res4b18, act_type='relu')
res4b19_branch2a = mx.symbol.Convolution(name='res4b19_branch2a', data=res4b18_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b19_branch2a = mx.symbol.BatchNorm(name='bn4b19_branch2a', data=res4b19_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b19_branch2a = bn4b19_branch2a
res4b19_branch2a_relu = mx.symbol.Activation(name='res4b19_branch2a_relu', data=scale4b19_branch2a,
act_type='relu')
res4b19_branch2b = mx.symbol.Convolution(name='res4b19_branch2b', data=res4b19_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b19_branch2b = mx.symbol.BatchNorm(name='bn4b19_branch2b', data=res4b19_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b19_branch2b = bn4b19_branch2b
res4b19_branch2b_relu = mx.symbol.Activation(name='res4b19_branch2b_relu', data=scale4b19_branch2b,
act_type='relu')
res4b19_branch2c = mx.symbol.Convolution(name='res4b19_branch2c', data=res4b19_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b19_branch2c = mx.symbol.BatchNorm(name='bn4b19_branch2c', data=res4b19_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b19_branch2c = bn4b19_branch2c
res4b19 = mx.symbol.broadcast_add(name='res4b19', *[res4b18_relu, scale4b19_branch2c])
res4b19_relu = mx.symbol.Activation(name='res4b19_relu', data=res4b19, act_type='relu')
res4b20_branch2a = mx.symbol.Convolution(name='res4b20_branch2a', data=res4b19_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b20_branch2a = mx.symbol.BatchNorm(name='bn4b20_branch2a', data=res4b20_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b20_branch2a = bn4b20_branch2a
res4b20_branch2a_relu = mx.symbol.Activation(name='res4b20_branch2a_relu', data=scale4b20_branch2a,
act_type='relu')
res4b20_branch2b = mx.symbol.Convolution(name='res4b20_branch2b', data=res4b20_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b20_branch2b = mx.symbol.BatchNorm(name='bn4b20_branch2b', data=res4b20_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b20_branch2b = bn4b20_branch2b
res4b20_branch2b_relu = mx.symbol.Activation(name='res4b20_branch2b_relu', data=scale4b20_branch2b,
act_type='relu')
res4b20_branch2c = mx.symbol.Convolution(name='res4b20_branch2c', data=res4b20_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b20_branch2c = mx.symbol.BatchNorm(name='bn4b20_branch2c', data=res4b20_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b20_branch2c = bn4b20_branch2c
res4b20 = mx.symbol.broadcast_add(name='res4b20', *[res4b19_relu, scale4b20_branch2c])
res4b20_relu = mx.symbol.Activation(name='res4b20_relu', data=res4b20, act_type='relu')
res4b21_branch2a = mx.symbol.Convolution(name='res4b21_branch2a', data=res4b20_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b21_branch2a = mx.symbol.BatchNorm(name='bn4b21_branch2a', data=res4b21_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b21_branch2a = bn4b21_branch2a
res4b21_branch2a_relu = mx.symbol.Activation(name='res4b21_branch2a_relu', data=scale4b21_branch2a,
act_type='relu')
res4b21_branch2b = mx.symbol.Convolution(name='res4b21_branch2b', data=res4b21_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b21_branch2b = mx.symbol.BatchNorm(name='bn4b21_branch2b', data=res4b21_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b21_branch2b = bn4b21_branch2b
res4b21_branch2b_relu = mx.symbol.Activation(name='res4b21_branch2b_relu', data=scale4b21_branch2b,
act_type='relu')
res4b21_branch2c = mx.symbol.Convolution(name='res4b21_branch2c', data=res4b21_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b21_branch2c = mx.symbol.BatchNorm(name='bn4b21_branch2c', data=res4b21_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b21_branch2c = bn4b21_branch2c
res4b21 = mx.symbol.broadcast_add(name='res4b21', *[res4b20_relu, scale4b21_branch2c])
res4b21_relu = mx.symbol.Activation(name='res4b21_relu', data=res4b21, act_type='relu')
res4b22_branch2a = mx.symbol.Convolution(name='res4b22_branch2a', data=res4b21_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b22_branch2a = mx.symbol.BatchNorm(name='bn4b22_branch2a', data=res4b22_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b22_branch2a = bn4b22_branch2a
res4b22_branch2a_relu = mx.symbol.Activation(name='res4b22_branch2a_relu', data=scale4b22_branch2a,
act_type='relu')
if with_dpyramid:
res4b22_branch2b_offset = mx.symbol.Convolution(name='res4b22_branch2b_offset', data=res4b22_branch2a_relu,
num_filter=72, pad=(1, 1), kernel=(3, 3), stride=(1, 1))
res4b22_branch2b = mx.contrib.symbol.DeformableConvolution(name='res4b22_branch2b', data=res4b22_branch2a_relu,
offset=res4b22_branch2b_offset,
num_filter=256, pad=(1, 1), kernel=(3, 3),
num_deformable_group=4,
stride=(1, 1), no_bias=True)
else:
res4b22_branch2b = mx.symbol.Convolution(name='res4b22_branch2b', data=res4b22_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b22_branch2b = mx.symbol.BatchNorm(name='bn4b22_branch2b', data=res4b22_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b22_branch2b = bn4b22_branch2b
res4b22_branch2b_relu = mx.symbol.Activation(name='res4b22_branch2b_relu', data=scale4b22_branch2b,
act_type='relu')
res4b22_branch2c = mx.symbol.Convolution(name='res4b22_branch2c', data=res4b22_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b22_branch2c = mx.symbol.BatchNorm(name='bn4b22_branch2c', data=res4b22_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b22_branch2c = bn4b22_branch2c
res4b22 = mx.symbol.broadcast_add(name='res4b22', *[res4b21_relu, scale4b22_branch2c])
res4b22_relu = mx.symbol.Activation(name='res4b22_relu', data=res4b22, act_type='relu')
if with_dilated:
res5_stride = (1, 1)
res5_dilate = (2, 2)
else:
res5_stride = (2, 2)
res5_dilate = (1, 1)
# res5a-bottleneck
res5a_branch2a = mx.symbol.Convolution(name='res5a_branch2a', data=res4b22_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=res5_stride, no_bias=True)
bn5a_branch2a = mx.symbol.BatchNorm(name='bn5a_branch2a', data=res5a_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale5a_branch2a = bn5a_branch2a
res5a_branch2a_relu = mx.symbol.Activation(name='res5a_branch2a_relu', data=scale5a_branch2a, act_type='relu')
if with_dconv:
res5a_branch2b_offset = mx.symbol.Convolution(name='res5a_branch2b_offset', data=res5a_branch2a_relu, num_filter=72, pad=res5_dilate, kernel=(3, 3), dilate=res5_dilate)
res5a_branch2b = mx.contrib.symbol.DeformableConvolution(name='res5a_branch2b', data=res5a_branch2a_relu, offset=res5a_branch2b_offset, num_filter=512,
pad=res5_dilate, kernel=(3, 3), num_deformable_group=4, stride=(1, 1), dilate=res5_dilate, no_bias=True)
else:
res5a_branch2b = mx.symbol.Convolution(name='res5a_branch2b', data=res5a_branch2a_relu, num_filter=512, pad=res5_dilate,
kernel=(3, 3), stride=(1, 1), dilate=res5_dilate, no_bias=True)
bn5a_branch2b = mx.symbol.BatchNorm(name='bn5a_branch2b', data=res5a_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale5a_branch2b = bn5a_branch2b
res5a_branch2b_relu = mx.symbol.Activation(name='res5a_branch2b_relu', data=scale5a_branch2b, act_type='relu')
res5a_branch2c = mx.symbol.Convolution(name='res5a_branch2c', data=res5a_branch2b_relu, num_filter=2048, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn5a_branch2c = mx.symbol.BatchNorm(name='bn5a_branch2c', data=res5a_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale5a_branch2c = bn5a_branch2c
# res5a-shortcut
res5a_branch1 = mx.symbol.Convolution(name='res5a_branch1', data=res4b22_relu, num_filter=2048, pad=(0, 0), kernel=(1, 1), stride=res5_stride, no_bias=True)
bn5a_branch1 = mx.symbol.BatchNorm(name='bn5a_branch1', data=res5a_branch1, use_global_stats=True, fix_gamma=False, eps=eps)
scale5a_branch1 = bn5a_branch1
res5a = mx.symbol.broadcast_add(name='res5a', *[scale5a_branch1, scale5a_branch2c])
res5a_relu = mx.symbol.Activation(name='res5a_relu', data=res5a, act_type='relu')
# res5b-bottleneck
res5b_branch2a = mx.symbol.Convolution(name='res5b_branch2a', data=res5a_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn5b_branch2a = mx.symbol.BatchNorm(name='bn5b_branch2a', data=res5b_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale5b_branch2a = bn5b_branch2a
res5b_branch2a_relu = mx.symbol.Activation(name='res5b_branch2a_relu', data=scale5b_branch2a, act_type='relu')
if with_dconv:
res5b_branch2b_offset = mx.symbol.Convolution(name='res5b_branch2b_offset', data=res5b_branch2a_relu, num_filter=72, pad=res5_dilate, kernel=(3, 3), dilate=res5_dilate)
res5b_branch2b = mx.contrib.symbol.DeformableConvolution(name='res5b_branch2b', data=res5b_branch2a_relu, offset=res5b_branch2b_offset, num_filter=512,
pad=res5_dilate, kernel=(3, 3), num_deformable_group=4, dilate=res5_dilate, no_bias=True)
else:
res5b_branch2b = mx.symbol.Convolution(name='res5b_branch2b', data=res5b_branch2a_relu, num_filter=512, pad=res5_dilate,
kernel=(3, 3), stride=(1, 1), dilate=res5_dilate, no_bias=True)
bn5b_branch2b = mx.symbol.BatchNorm(name='bn5b_branch2b', data=res5b_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale5b_branch2b = bn5b_branch2b
res5b_branch2b_relu = mx.symbol.Activation(name='res5b_branch2b_relu', data=scale5b_branch2b, act_type='relu')
res5b_branch2c = mx.symbol.Convolution(name='res5b_branch2c', data=res5b_branch2b_relu, num_filter=2048, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn5b_branch2c = mx.symbol.BatchNorm(name='bn5b_branch2c', data=res5b_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale5b_branch2c = bn5b_branch2c
# res5b-shortcut
res5b = mx.symbol.broadcast_add(name='res5b', *[res5a_relu, scale5b_branch2c])
res5b_relu = mx.symbol.Activation(name='res5b_relu', data=res5b, act_type='relu')
# res5c-bottleneck
res5c_branch2a = mx.symbol.Convolution(name='res5c_branch2a', data=res5b_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn5c_branch2a = mx.symbol.BatchNorm(name='bn5c_branch2a', data=res5c_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale5c_branch2a = bn5c_branch2a
res5c_branch2a_relu = mx.symbol.Activation(name='res5c_branch2a_relu', data=scale5c_branch2a, act_type='relu')
if with_dconv:
res5c_branch2b_offset = mx.symbol.Convolution(name='res5c_branch2b_offset', data=res5c_branch2a_relu, num_filter=72, pad=res5_dilate, kernel=(3, 3), dilate=res5_dilate)
res5c_branch2b = mx.contrib.symbol.DeformableConvolution(name='res5c_branch2b', data=res5c_branch2a_relu, offset=res5c_branch2b_offset, num_filter=512,
pad=res5_dilate, kernel=(3, 3), num_deformable_group=4, dilate=res5_dilate, no_bias=True)
else:
res5c_branch2b = mx.symbol.Convolution(name='res5c_branch2b', data=res5c_branch2a_relu, num_filter=512, pad=res5_dilate,
kernel=(3, 3), stride=(1, 1), dilate=res5_dilate, no_bias=True)
bn5c_branch2b = mx.symbol.BatchNorm(name='bn5c_branch2b', data=res5c_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale5c_branch2b = bn5c_branch2b
res5c_branch2b_relu = mx.symbol.Activation(name='res5c_branch2b_relu', data=scale5c_branch2b, act_type='relu')
res5c_branch2c = mx.symbol.Convolution(name='res5c_branch2c', data=res5c_branch2b_relu, num_filter=2048, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn5c_branch2c = mx.symbol.BatchNorm(name='bn5c_branch2c', data=res5c_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale5c_branch2c = bn5c_branch2c
# res5c-shortcut
res5c = mx.symbol.broadcast_add(name='res5c', *[res5b_relu, scale5c_branch2c])
res5c_relu = mx.symbol.Activation(name='res5c_relu', data=res5c, act_type='relu')
return data, conv1_relu, res2c_relu, res3b3_relu, res4b22_relu, res5c_relu
def get_fpn_feature(self, c0, c1, c2, c3, c4, c5, feature_dim=256):
# lateral connection
fpn_p5_1x1 = mx.symbol.Convolution(data=c5, kernel=(1, 1), pad=(0, 0), stride=(1, 1), num_filter=feature_dim, name='fpn_p5_1x1')
fpn_p4_1x1 = mx.symbol.Convolution(data=c4, kernel=(1, 1), pad=(0, 0), stride=(1, 1), num_filter=feature_dim, name='fpn_p4_1x1')
fpn_p3_1x1 = mx.symbol.Convolution(data=c3, kernel=(1, 1), pad=(0, 0), stride=(1, 1), num_filter=feature_dim, name='fpn_p3_1x1')
fpn_p2_1x1 = mx.symbol.Convolution(data=c2, kernel=(1, 1), pad=(0, 0), stride=(1, 1), num_filter=feature_dim, name='fpn_p2_1x1')
fpn_p1_1x1 = mx.symbol.Convolution(data=c1, kernel=(1, 1), pad=(0, 0), stride=(1, 1), num_filter=feature_dim, name='fpn_p1_1x1')
fpn_p0_1x1 = mx.symbol.Convolution(data=c0, kernel=(1, 1), pad=(0, 0), stride=(1, 1), num_filter=feature_dim, name='fpn_p0_1x1')
# top-down connection
fpn_p5_upsample = mx.symbol.UpSampling(fpn_p5_1x1, scale=2, sample_type='nearest', name='fpn_p5_upsample')
fpn_p4_plus = mx.sym.ElementWiseSum(*[fpn_p5_upsample, fpn_p4_1x1], name='fpn_p4_sum')
fpn_p4_upsample = mx.symbol.UpSampling(fpn_p4_plus, scale=2, sample_type='nearest', name='fpn_p4_upsample')
fpn_p3_plus = mx.sym.ElementWiseSum(*[fpn_p4_upsample, fpn_p3_1x1], name='fpn_p3_sum')
fpn_p3_upsample = mx.symbol.UpSampling(fpn_p3_plus, scale=2, sample_type='nearest', name='fpn_p3_upsample')
fpn_p2_plus = mx.sym.ElementWiseSum(*[fpn_p3_upsample, fpn_p2_1x1], name='fpn_p2_sum')
fpn_p2_upsample = mx.symbol.UpSampling(fpn_p2_plus, scale=2, sample_type='nearest', name='fpn_p2_upsample')
fpn_p1_plus = mx.sym.ElementWiseSum(*[fpn_p2_upsample, fpn_p1_1x1], name='fpn_p1_sum')
fpn_p1_upsample = mx.symbol.UpSampling(fpn_p1_plus, scale=2, sample_type='nearest', name='fpn_p1_upsample')
fpn_p0_plus = mx.sym.ElementWiseSum(*[fpn_p1_upsample, fpn_p0_1x1], name='fpn_p0_sum')
# FPN feature
fpn_p6 = mx.sym.Convolution(data=c5, kernel=(3, 3), pad=(1, 1), stride=(2, 2), num_filter=feature_dim, name='fpn_p6')
'''
fpn_p5 = mx.symbol.Convolution(data=fpn_p5_1x1, kernel=(3, 3), pad=(1, 1), stride=(1, 1), num_filter=feature_dim, name='fpn_p5')
fpn_p4 = mx.symbol.Convolution(data=fpn_p4_plus, kernel=(3, 3), pad=(1, 1), stride=(1, 1), num_filter=feature_dim, name='fpn_p4')
fpn_p3 = mx.symbol.Convolution(data=fpn_p3_plus, kernel=(3, 3), pad=(1, 1), stride=(1, 1), num_filter=feature_dim, name='fpn_p3')
fpn_p2 = mx.symbol.Convolution(data=fpn_p2_plus, kernel=(3, 3), pad=(1, 1), stride=(1, 1), num_filter=feature_dim, name='fpn_p2')
fpn_p1 = mx.symbol.Convolution(data=fpn_p1_plus, kernel=(3, 3), pad=(1, 1), stride=(1, 1), num_filter=feature_dim, name='fpn_p1')
'''
fpn_p0 = mx.symbol.Convolution(data=fpn_p0_plus, kernel=(3, 3), pad=(1, 1), stride=(1, 1), num_filter=feature_dim, name='fpn_p0')
#return fpn_p0, fpn_p1, fpn_p2, fpn_p3, fpn_p4, fpn_p5, fpn_p6
return fpn_p0,fpn_p6
def get_fpn_feature1(self, c0, c1, c2, c3, c4, c5, feature_dim=128):
eps = 1e-5
# lateral connection
fpn_p5_1x1 = mx.symbol.Convolution(data=c5, kernel=(1, 1), pad=(0, 0), stride=(1, 1), num_filter=feature_dim, name='fpn_p5_1x1')
bn_fpn_p5_1x1 = mx.symbol.BatchNorm(name='bn_fpn_p5_1x1', data=fpn_p5_1x1, use_global_stats=True, fix_gamma=False, eps=eps)
bn_fpn_p5_1x1_relu = mx.symbol.Activation(name='bn_fpn_p5_1x1_relu', data=bn_fpn_p5_1x1, act_type='relu')
fpn_p4_1x1 = mx.symbol.Convolution(data=c4, kernel=(1, 1), pad=(0, 0), stride=(1, 1), num_filter=feature_dim, name='fpn_p4_1x1')
bn_fpn_p4_1x1 = mx.symbol.BatchNorm(name='bn_fpn_p4_1x1', data=fpn_p4_1x1, use_global_stats=True, fix_gamma=False, eps=eps)
bn_fpn_p4_1x1_relu = mx.symbol.Activation(name='bn_fpn_p4_1x1_relu', data=bn_fpn_p4_1x1, act_type='relu')
fpn_p3_1x1 = mx.symbol.Convolution(data=c3, kernel=(1, 1), pad=(0, 0), stride=(1, 1), num_filter=feature_dim, name='fpn_p3_1x1')
bn_fpn_p3_1x1 = mx.symbol.BatchNorm(name='bn_fpn_p3_1x1', data=fpn_p3_1x1, use_global_stats=True, fix_gamma=False, eps=eps)
bn_fpn_p3_1x1_relu = mx.symbol.Activation(name='bn_fpn_p3_1x1_relu', data=bn_fpn_p3_1x1, act_type='relu')
fpn_p2_1x1 = mx.symbol.Convolution(data=c2, kernel=(1, 1), pad=(0, 0), stride=(1, 1), num_filter=feature_dim, name='fpn_p2_1x1')
bn_fpn_p2_1x1 = mx.symbol.BatchNorm(name='bn_fpn_p2_1x1', data=fpn_p2_1x1, use_global_stats=True, fix_gamma=False, eps=eps)
bn_fpn_p2_1x1_relu = mx.symbol.Activation(name='bn_fpn_p2_1x1_relu', data=bn_fpn_p2_1x1, act_type='relu')
fpn_p1_1x1 = mx.symbol.Convolution(data=c1, kernel=(1, 1), pad=(0, 0), stride=(1, 1), num_filter=feature_dim, name='fpn_p1_1x1')
bn_fpn_p1_1x1 = mx.symbol.BatchNorm(name='bn_fpn_p1_1x1', data=fpn_p1_1x1, use_global_stats=True, fix_gamma=False, eps=eps)
bn_fpn_p1_1x1_relu = mx.symbol.Activation(name='bn_fpn_p1_1x1_relu', data=bn_fpn_p1_1x1, act_type='relu')
fpn_p0_1x1 = mx.symbol.Convolution(data=c0, kernel=(1, 1), pad=(0, 0), stride=(1, 1), num_filter=feature_dim, name='fpn_p0_1x1')
bn_fpn_p0_1x1 = mx.symbol.BatchNorm(name='bn_fpn_p0_1x1', data=fpn_p0_1x1, use_global_stats=True, fix_gamma=False, eps=eps)
bn_fpn_p0_1x1_relu = mx.symbol.Activation(name='bn_fpn_p0_1x1_relu', data=bn_fpn_p0_1x1, act_type='relu')
# top-down connection
_kernel=(4,4)
_stride=(2,2)
_pad=(1,1)
fpn_p5_deconv = mx.symbol.Deconvolution(bn_fpn_p5_1x1_relu,kernel=_kernel, stride=_stride,pad=_pad,num_filter=feature_dim,name='fpn_p5_deconv')
fpn_p4_plus = mx.sym.ElementWiseSum(*[fpn_p5_deconv, bn_fpn_p4_1x1_relu], name='fpn_p4_sum')
fpn_p4_deconv = mx.symbol.Deconvolution(fpn_p4_plus,kernel=_kernel, stride=_stride,pad=_pad,num_filter=feature_dim,name='fpn_p4_deconv')
fpn_p3_plus = mx.sym.ElementWiseSum(*[fpn_p4_deconv, bn_fpn_p3_1x1_relu], name='fpn_p3_sum')
fpn_p3_deconv = mx.symbol.Deconvolution(fpn_p3_plus,kernel=_kernel, stride=_stride,pad=_pad,num_filter=feature_dim,name='fpn_p3_deconv')
fpn_p2_plus = mx.sym.ElementWiseSum(*[fpn_p3_deconv, bn_fpn_p2_1x1_relu], name='fpn_p2_sum')
fpn_p2_deconv = mx.symbol.Deconvolution(fpn_p2_plus,kernel=_kernel, stride=_stride,pad=_pad,num_filter=feature_dim,name='fpn_p2_deconv')
fpn_p1_plus = mx.sym.ElementWiseSum(*[fpn_p2_deconv, bn_fpn_p1_1x1_relu], name='fpn_p1_sum')
fpn_p1_deconv = mx.symbol.Deconvolution(fpn_p1_plus,kernel=_kernel, stride=_stride,pad=_pad,num_filter=feature_dim,name='fpn_p1_deconv')
fpn_p0_plus = mx.sym.ElementWiseSum(*[fpn_p1_deconv, bn_fpn_p0_1x1_relu], name='fpn_p0_sum')
# FPN feature
fpn_p6 = mx.sym.Convolution(data=c5, kernel=(3, 3), pad=(1, 1), stride=(2, 2), num_filter=feature_dim, name='fpn_p6')
'''
fpn_p5 = mx.symbol.Convolution(data=fpn_p5_1x1, kernel=(3, 3), pad=(1, 1), stride=(1, 1), num_filter=feature_dim, name='fpn_p5')
fpn_p4 = mx.symbol.Convolution(data=fpn_p4_plus, kernel=(3, 3), pad=(1, 1), stride=(1, 1), num_filter=feature_dim, name='fpn_p4')
fpn_p3 = mx.symbol.Convolution(data=fpn_p3_plus, kernel=(3, 3), pad=(1, 1), stride=(1, 1), num_filter=feature_dim, name='fpn_p3')
fpn_p2 = mx.symbol.Convolution(data=fpn_p2_plus, kernel=(3, 3), pad=(1, 1), stride=(1, 1), num_filter=feature_dim, name='fpn_p2')
fpn_p1 = mx.symbol.Convolution(data=fpn_p1_plus, kernel=(3, 3), pad=(1, 1), stride=(1, 1), num_filter=feature_dim, name='fpn_p1')
'''
fpn_p0 = mx.symbol.Convolution(data=fpn_p0_plus, kernel=(3, 3), pad=(1, 1), stride=(1, 1), num_filter=feature_dim, name='fpn_p0')
return fpn_p0,fpn_p6#,fpn_p5#, fpn_p1, fpn_p2, fpn_p3, fpn_p4, fpn_p5,
def get_rpn_subnet(self, data, num_anchors, suffix):
rpn_conv = mx.sym.Convolution(data=data, kernel=(3, 3), pad=(1, 1), num_filter=128, name='rpn_conv_' + suffix,
weight=self.shared_param_dict['rpn_conv_weight'], bias=self.shared_param_dict['rpn_conv_bias'])
rpn_relu = mx.sym.Activation(data=rpn_conv, act_type='relu', name='rpn_relu_' + suffix)
rpn_cls_score = mx.sym.Convolution(data=rpn_relu, kernel=(1, 1), pad=(0, 0), num_filter=2 * num_anchors, name='rpn_cls_score_' + suffix,
weight=self.shared_param_dict['rpn_cls_score_weight'], bias=self.shared_param_dict['rpn_cls_score_bias'])
rpn_bbox_pred = mx.sym.Convolution(data=rpn_relu, kernel=(1, 1), pad=(0, 0), num_filter=4 * num_anchors, name='rpn_bbox_pred_' + suffix,
weight=self.shared_param_dict['rpn_bbox_pred_weight'], bias=self.shared_param_dict['rpn_bbox_pred_bias'])
# n x (2*A) x H x W => n x 2 x (A*H*W)
rpn_cls_score_t1 = mx.sym.Reshape(data=rpn_cls_score, shape=(0, 2, -1, 0), name='rpn_cls_score_t1_' + suffix)
rpn_cls_score_t2 = mx.sym.Reshape(data=rpn_cls_score_t1, shape=(0, 2, -1), name='rpn_cls_score_t2_' + suffix)
rpn_cls_prob = mx.sym.SoftmaxActivation(data=rpn_cls_score_t1, mode='channel', name='rpn_cls_prob_' + suffix)
rpn_cls_prob_t = mx.sym.Reshape(data=rpn_cls_prob, shape=(0, 2 * num_anchors, -1, 0), name='rpn_cls_prob_t_' + suffix)
rpn_bbox_pred_t = mx.sym.Reshape(data=rpn_bbox_pred, shape=(0, 0, -1), name='rpn_bbox_pred_t_' + suffix)
return rpn_cls_score_t2, rpn_cls_prob_t, rpn_bbox_pred_t, rpn_bbox_pred
def get_symbol(self, cfg, is_train=True):
# config alias for convenient
num_classes = cfg.dataset.NUM_CLASSES
num_reg_classes = (2 if cfg.CLASS_AGNOSTIC else num_classes)
data = mx.sym.Variable(name="data")
im_info = mx.sym.Variable(name="im_info")
# shared convolutional layers
res0, res1, res2, res3, res4, res5 = self.get_resnet_backbone(data)
#fpn_p0, fpn_p1, fpn_p2, fpn_p3, fpn_p4, fpn_p5, fpn_p6 = self.get_fpn_feature(res0, res1, res2, res3, res4, res5)
fpn_p0,fpn_p6 = self.get_fpn_feature(res0, res1, res2, res3, res4, res5)
#fpn_p0, fpn_p1, fpn_p2, fpn_p3,fpn_p4 = self.get_fpn_feature(res0, res1, res2, res3, res4, res5)
#rpn_cls_score_p0, rpn_prob_p0, rpn_bbox_loss_p0, rpn_bbox_pred_p0 = self.get_rpn_subnet(fpn_p0, cfg.network.NUM_ANCHORS, 'p0')
#rpn_cls_score_p1, rpn_prob_p1, rpn_bbox_loss_p1, rpn_bbox_pred_p1 = self.get_rpn_subnet(fpn_p1, cfg.network.NUM_ANCHORS, 'p1')
#rpn_cls_score_p2, rpn_prob_p2, rpn_bbox_loss_p2, rpn_bbox_pred_p2 = self.get_rpn_subnet(fpn_p2, cfg.network.NUM_ANCHORS, 'p2')
#rpn_cls_score_p3, rpn_prob_p3, rpn_bbox_loss_p3, rpn_bbox_pred_p3 = self.get_rpn_subnet(fpn_p3, cfg.network.NUM_ANCHORS, 'p3')
#rpn_cls_score_p4, rpn_prob_p4, rpn_bbox_loss_p4, rpn_bbox_pred_p4 = self.get_rpn_subnet(fpn_p4, cfg.network.NUM_ANCHORS, 'p4')
#rpn_cls_score_p5, rpn_prob_p5, rpn_bbox_loss_p5, rpn_bbox_pred_p5 = self.get_rpn_subnet(fpn_p5, cfg.network.NUM_ANCHORS, 'p5')
rpn_cls_score_p6, rpn_prob_p6, rpn_bbox_loss_p6, rpn_bbox_pred_p6 = self.get_rpn_subnet(fpn_p6, cfg.network.NUM_ANCHORS, 'p6')
rpn_cls_prob_dict = {
'rpn_cls_prob_stride64': rpn_prob_p6,
#'rpn_cls_prob_stride32': rpn_prob_p5,
#'rpn_cls_prob_stride16': rpn_prob_p4,
#'rpn_cls_prob_stride8': rpn_prob_p3,
#'rpn_cls_prob_stride4': rpn_prob_p2,
#'rpn_cls_prob_stride2': rpn_prob_p1,
#'rpn_cls_prob_stride1': rpn_prob_p0,
}
rpn_bbox_pred_dict = {
'rpn_bbox_pred_stride64': rpn_bbox_pred_p6,
#'rpn_bbox_pred_stride32': rpn_bbox_pred_p5,
#'rpn_bbox_pred_stride16': rpn_bbox_pred_p4,
#'rpn_bbox_pred_stride8': rpn_bbox_pred_p3,
#'rpn_bbox_pred_stride4': rpn_bbox_pred_p2,
#'rpn_bbox_pred_stride2': rpn_bbox_pred_p1,
#'rpn_bbox_pred_stride1': rpn_bbox_pred_p0,
}
arg_dict = dict(rpn_cls_prob_dict.items() + rpn_bbox_pred_dict.items())
if is_train:
rpn_label = mx.sym.Variable(name='label')
rpn_bbox_target = mx.sym.Variable(name='bbox_target')
rpn_bbox_weight = mx.sym.Variable(name='bbox_weight')
gt_boxes = mx.sym.Variable(name="gt_boxes")
#rpn_cls_score = mx.sym.Concat(rpn_cls_score_p0,rpn_cls_score_p1,rpn_cls_score_p2, rpn_cls_score_p3, rpn_cls_score_p4, rpn_cls_score_p5, rpn_cls_score_p6, dim=2)
#rpn_bbox_loss = mx.sym.Concat(rpn_bbox_loss_p0,rpn_bbox_loss_p1,rpn_bbox_loss_p2, rpn_bbox_loss_p3, rpn_bbox_loss_p4, rpn_bbox_loss_p5, rpn_bbox_loss_p6, dim=2)
rpn_cls_score = mx.sym.Concat(rpn_cls_score_p6,dim=2)
rpn_bbox_loss = mx.sym.Concat(rpn_bbox_loss_p6,dim=2)
# RPN classification loss
rpn_cls_output = mx.sym.SoftmaxOutput(data=rpn_cls_score, label=rpn_label, multi_output=True, normalization='valid',
use_ignore=True, ignore_label=-1, name='rpn_cls_prob')
# bounding box regression
rpn_bbox_loss = rpn_bbox_weight * mx.sym.smooth_l1(name='rpn_bbox_loss_l1', scalar=3.0, data=(rpn_bbox_loss - rpn_bbox_target))
rpn_bbox_loss = mx.sym.MakeLoss(name='rpn_bbox_loss', data=rpn_bbox_loss, grad_scale=1.0 / cfg.TRAIN.RPN_BATCH_SIZE)
aux_dict = {
'op_type': 'pyramid_proposal', 'name': 'rois',
'im_info': im_info, 'feat_stride': tuple(cfg.network.RPN_FEAT_STRIDE),
'scales': tuple(cfg.network.ANCHOR_SCALES), 'ratios': tuple(cfg.network.ANCHOR_RATIOS),
'rpn_pre_nms_top_n': cfg.TRAIN.RPN_PRE_NMS_TOP_N, 'rpn_post_nms_top_n': cfg.TRAIN.RPN_POST_NMS_TOP_N,
'threshold': cfg.TRAIN.RPN_NMS_THRESH, 'rpn_min_size': cfg.TRAIN.RPN_MIN_SIZE
}
# ROI proposal
rois = mx.sym.Custom(**dict(arg_dict.items() + aux_dict.items()))
# ROI proposal target
gt_boxes_reshape = mx.sym.Reshape(data=gt_boxes, shape=(-1, 5), name='gt_boxes_reshape')
rois, label, bbox_target, bbox_weight \
= mx.sym.Custom(rois=rois, gt_boxes=gt_boxes_reshape, op_type='proposal_target', num_classes=num_reg_classes, batch_images=cfg.TRAIN.BATCH_IMAGES,
batch_rois=cfg.TRAIN.BATCH_ROIS, cfg=cPickle.dumps(cfg), fg_fraction=cfg.TRAIN.FG_FRACTION)
else:
aux_dict = {
'op_type': 'pyramid_proposal', 'name': 'rois',
'im_info': im_info, 'feat_stride': tuple(cfg.network.RPN_FEAT_STRIDE),
'scales': tuple(cfg.network.ANCHOR_SCALES), 'ratios': tuple(cfg.network.ANCHOR_RATIOS),
'rpn_pre_nms_top_n': cfg.TEST.RPN_PRE_NMS_TOP_N, 'rpn_post_nms_top_n': cfg.TEST.RPN_POST_NMS_TOP_N,
'threshold': cfg.TEST.RPN_NMS_THRESH, 'rpn_min_size': cfg.TEST.RPN_MIN_SIZE
}
# ROI proposal
rois = mx.sym.Custom(**dict(arg_dict.items() + aux_dict.items()))
roi_pool = mx.symbol.Custom(data_p0=fpn_p0,
rois=rois, op_type='fpn_roi_pooling', name='fpn_roi_pooling', feat_strides = '(1)')
# 2 fc
fc_new_1 = mx.symbol.FullyConnected(name='fc_new_1', data=roi_pool, num_hidden=1024)
fc_new_1_relu = mx.sym.Activation(data=fc_new_1, act_type='relu', name='fc_new_1_relu')
fc_new_2 = mx.symbol.FullyConnected(name='fc_new_2', data=fc_new_1_relu, num_hidden=1024)
fc_new_2_relu = mx.sym.Activation(data=fc_new_2, act_type='relu', name='fc_new_2_relu')
# cls_score/bbox_pred
cls_score = mx.symbol.FullyConnected(name='cls_score', data=fc_new_2_relu, num_hidden=num_classes)
bbox_pred = mx.symbol.FullyConnected(name='bbox_pred', data=fc_new_2_relu, num_hidden=num_reg_classes * 4)
if is_train:
if cfg.TRAIN.ENABLE_OHEM:
labels_ohem, bbox_weights_ohem = mx.sym.Custom(op_type='BoxAnnotatorOHEM', num_classes=num_classes,
num_reg_classes=num_reg_classes, roi_per_img=cfg.TRAIN.BATCH_ROIS_OHEM,
cls_score=cls_score, bbox_pred=bbox_pred, labels=label,
bbox_targets=bbox_target, bbox_weights=bbox_weight)
cls_prob = mx.sym.SoftmaxOutput(name='cls_prob', data=cls_score, label=labels_ohem, normalization='valid', use_ignore=True, ignore_label=-1)
bbox_loss_ = bbox_weights_ohem * mx.sym.smooth_l1(name='bbox_loss_', scalar=1.0, data=(bbox_pred - bbox_target))
bbox_loss = mx.sym.MakeLoss(name='bbox_loss', data=bbox_loss_, grad_scale=1.0 / cfg.TRAIN.BATCH_ROIS_OHEM)
rcnn_label = labels_ohem
elif cfg.TRAIN.ENABLE_FOCAL_LOSS:
cls_prob = mx.sym.Custom(op_type='FocalLoss', name='cls_prob', data=cls_score, labels=label, gamma= 2,alpha = 0.25)
# cls_prob = mx.sym.SoftmaxOutput(name='cls_prob', data=cls_score, label=label, normalization='valid')
bbox_loss_ = bbox_weight * mx.sym.smooth_l1(name='bbox_loss_', scalar=1.0,
data=(bbox_pred - bbox_target))
bbox_loss = mx.sym.MakeLoss(name='bbox_loss', data=bbox_loss_, grad_scale=1.0 / cfg.TRAIN.BATCH_ROIS)
#bbox_loss = mx.sym.Custom(op_type='FocalLoss', name='cls_prob', data=bbox_pred, labels=bbox_target, gamma= 2,alpha = 0.25)
rcnn_label = label
else:
cls_prob = mx.sym.SoftmaxOutput(name='cls_prob', data=cls_score, label=label, normalization='valid')
bbox_loss_ = bbox_weight * mx.sym.smooth_l1(name='bbox_loss_', scalar=1.0, data=(bbox_pred - bbox_target))
bbox_loss = mx.sym.MakeLoss(name='bbox_loss', data=bbox_loss_, grad_scale=1.0 / cfg.TRAIN.BATCH_ROIS)
rcnn_label = label
# reshape output
rcnn_label = mx.sym.Reshape(data=rcnn_label, shape=(cfg.TRAIN.BATCH_IMAGES, -1), name='label_reshape')
cls_prob = mx.sym.Reshape(data=cls_prob, shape=(cfg.TRAIN.BATCH_IMAGES, -1, num_classes), name='cls_prob_reshape')
bbox_loss = mx.sym.Reshape(data=bbox_loss, shape=(cfg.TRAIN.BATCH_IMAGES, -1, 4 * num_reg_classes), name='bbox_loss_reshape')
# group = mx.sym.Group([rpn_cls_output, rpn_bbox_loss, mx.sym.BlockGrad(cls_prob), mx.sym.BlockGrad(bbox_loss), mx.sym.BlockGrad(rcnn_label)])
group = mx.sym.Group([rpn_cls_output, rpn_bbox_loss, cls_prob, bbox_loss, mx.sym.BlockGrad(rcnn_label)])
else:
cls_prob = mx.sym.SoftmaxActivation(name='cls_prob', data=cls_score)
cls_prob = mx.sym.Reshape(data=cls_prob, shape=(cfg.TEST.BATCH_IMAGES, -1, num_classes), name='cls_prob_reshape')
bbox_pred = mx.sym.Reshape(data=bbox_pred, shape=(cfg.TEST.BATCH_IMAGES, -1, 4 * num_reg_classes), name='bbox_pred_reshape')
group = mx.sym.Group([rois, cls_prob, bbox_pred])
self.sym = group
return group
def init_weight_rcnn(self, cfg, arg_params, aux_params):
arg_params['fc_new_1_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fc_new_1_weight'])
arg_params['fc_new_1_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fc_new_1_bias'])
arg_params['fc_new_2_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fc_new_2_weight'])
arg_params['fc_new_2_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fc_new_2_bias'])
arg_params['cls_score_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['cls_score_weight'])
arg_params['cls_score_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['cls_score_bias'])
arg_params['bbox_pred_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['bbox_pred_weight'])
arg_params['bbox_pred_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['bbox_pred_bias'])
def init_weight_fpn(self, cfg, arg_params, aux_params):
arg_params['fpn_p6_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p6_weight'])
arg_params['fpn_p6_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p6_bias'])
'''
arg_params['fpn_p5_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p5_weight'])
arg_params['fpn_p5_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p5_bias'])
arg_params['fpn_p4_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p4_weight'])
arg_params['fpn_p4_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p4_bias'])
arg_params['fpn_p3_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p3_weight'])
arg_params['fpn_p3_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p3_bias'])
arg_params['fpn_p2_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p2_weight'])
arg_params['fpn_p2_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p2_bias'])
arg_params['fpn_p1_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p1_weight'])
arg_params['fpn_p1_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p1_bias'])
'''
arg_params['fpn_p0_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p0_weight'])
arg_params['fpn_p0_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p0_bias'])
arg_params['fpn_p5_1x1_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p5_1x1_weight'])
arg_params['fpn_p5_1x1_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p5_1x1_bias'])
arg_params['fpn_p4_1x1_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p4_1x1_weight'])
arg_params['fpn_p4_1x1_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p4_1x1_bias'])
arg_params['fpn_p3_1x1_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p3_1x1_weight'])
arg_params['fpn_p3_1x1_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p3_1x1_bias'])
arg_params['fpn_p2_1x1_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p2_1x1_weight'])
arg_params['fpn_p2_1x1_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p2_1x1_bias'])
arg_params['fpn_p1_1x1_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p1_1x1_weight'])
arg_params['fpn_p1_1x1_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p1_1x1_bias'])
arg_params['fpn_p0_1x1_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p0_1x1_weight'])
arg_params['fpn_p0_1x1_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p0_1x1_bias'])
'''
for i in range(6):
arg_params['bn_fpn_p'+str(i)+'_1x1_gamma'] = mx.nd.ones(shape=self.arg_shape_dict['bn_fpn_p'+str(i)+'_1x1_gamma'])
arg_params['bn_fpn_p'+str(i)+'_1x1_beta'] = mx.nd.zeros(shape=self.arg_shape_dict['bn_fpn_p'+str(i)+'_1x1_beta'])
aux_params['bn_fpn_p'+str(i)+'_1x1_moving_mean'] = mx.nd.zeros(shape=self.aux_shape_dict['bn_fpn_p'+str(i)+'_1x1_moving_mean'])
aux_params['bn_fpn_p'+str(i)+'_1x1_moving_var'] = mx.nd.ones(shape=self.aux_shape_dict['bn_fpn_p'+str(i)+'_1x1_moving_var'])
arg_params['fpn_p5_deconv_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p5_deconv_weight'])
#arg_params['fpn_p5_deconv_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p5_deconv_bias'])
arg_params['fpn_p4_deconv_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p4_deconv_weight'])
#arg_params['fpn_p4_deconv_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p4_deconv_bias'])
arg_params['fpn_p3_deconv_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p3_deconv_weight'])
#arg_params['fpn_p3_deconv_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p3_deconv_bias'])
arg_params['fpn_p2_deconv_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p2_deconv_weight'])
#arg_params['fpn_p2_deconv_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p2_deconv_bias'])
arg_params['fpn_p1_deconv_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p1_deconv_weight'])
#arg_params['fpn_p1_deconv_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p1_deconv_bias'])
'''
def init_weight(self, cfg, arg_params, aux_params):
for name in self.shared_param_list:
arg_params[name + '_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict[name + '_weight'])
arg_params[name + '_bias'] = mx.nd.zeros(shape=self.arg_shape_dict[name + '_bias'])
self.init_weight_rcnn(cfg, arg_params, aux_params)
self.init_weight_fpn(cfg, arg_params, aux_params)
| [
"[email protected]"
] | |
a4e44762a7511ec359dd8e19c070b721d03e6d4c | ce6fc44470dcb5fca78cdd3349a7be70d75f2e3a | /AtCoder/Panasonic 2020/C.py | df4a723d90f0c2af78b234c8e09df7cc7078f4ca | [] | no_license | cormackikkert/competitive-programming | f3fa287fcb74248ba218ecd763f8f6df31d57424 | 3a1200b8ff9b6941c422371961a127d7be8f2e00 | refs/heads/master | 2022-12-17T02:02:40.892608 | 2020-09-20T11:47:15 | 2020-09-20T11:47:15 | 266,775,265 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 137 | py | a, b, c = map(int, input().split())
if (c - a - b) >= 0 and 4 * a * b < (c - a - b) * (c - a - b):
print("Yes")
else:
print("No") | [
"[email protected]"
] | |
566bdadc52d20472b63a9220e98e6d64c70af204 | 12fb02e7d946002beee4e095ea23f4d98c968afa | /tscripts/yunwei/operate/compress.py | 2322013f32616938001a146dfb17314ba7e2ad9c | [] | no_license | cash2one/yunwei-1 | 0ab4ec0783c061739dc9a6c3db2f9379605746fd | b929fe23fd95ea1f18bd809b82523101eb414309 | refs/heads/master | 2020-07-02T14:31:00.776030 | 2016-09-09T05:31:52 | 2016-09-09T05:31:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,511 | py | #!/usr/bin/env python
#-*- coding:utf-8 -*-
'''
date: 2016/08/20
role: 压缩解压
usage: cmb = compressBase(log_path) 实例化
cmb.zipp(source_dir,zipfile_path)
cmb.tar(source_dir,tarfile_path)
cmb.unzip(zipfile_path,target_dir)
cmb.untar(tarfile_path,target_dir)
'''
from __future__ import absolute_import
from yunwei.operate.prefix import log
logIns = log('117')
import os,zipfile,tarfile
###压缩解压操作类
class compressBase:
def __init__(self,log_path):
###log_path为日志写入文件
logIns = log('117',log_path)
self.zf = ''
###析构函数
def __del__(self):
try:
self.zf.close()
except:
pass
###zip压缩
def zipp(self,source_dir,zipfile_path):
###判断文件或目录是否存在
if not os.path.exists(source_dir):
logIns.writeLog('error','%s not exists' %source_dir)
raise ValueError('117,%s not exists' %source_dir)
###循环把文件加入列表
file_list = []
if os.path.isfile(source_dir):
file_list.append(source_dir)
else:
for root, dirs, files in os.walk(source_dir):
for name in files:
file_list.append(os.path.join(root, name))
###调用zipfile模块
self.zf = zipfile.ZipFile(zipfile_path, "w", zipfile.zlib.DEFLATED)
for file_one in file_list:
arc_name = file_one[len(source_dir):]
self.zf.write(file_one,arc_name)
###解压zip
def unzip(self,zipfile_path, unzip_dir):
if not os.path.exists(unzip_dir):
os.makedirs(unzip_dir, 0777)
self.zf = zipfile.ZipFile(zipfile_path)
for name in self.zf.namelist():
name = name.replace('\\','/')
if name.endswith('/'):
os.makedirs(os.path.join(unzip_dir, name))
else:
ext_file = os.path.join(unzip_dir, name)
ext_dir = os.path.dirname(ext_file)
if not os.path.exists(ext_dir) :
os.makedirs(ext_dir,0777)
with open(ext_file, 'wb') as ef:
ef.write(self.zf.read(name))
###tar压缩
def tar(self,source_dir,tarfile_path):
###判断文件或目录是否存在
if not os.path.exists(source_dir):
logIns.writeLog('error','%s not exists' %source_dir)
raise ValueError('117,%s not exists' %source_dir)
###调用tarfile模块
self.zf = tarfile.open(tarfile_path, "w:gz")
###判断源目录长度
len_source = len(source_dir)
###循环把文件加入列表
for root, dirs, files in os.walk(source_dir):
for name in files:
full_path = os.path.join(root,name)
self.zf.add(full_path,arcname=os.path.join(root[len_source:],name))
###解压tar
def untar(self,tarfile_path, untar_dir):
if not os.path.exists(untar_dir):
os.makedirs(untar_dir, 0777)
try:
self.zf = tarfile.open(tarfile_path, "r:gz")
file_names = self.zf.getnames()
for file_name in file_names:
self.zf.extract(file_name, untar_dir)
except Exception, e:
logIns.writeLog('error','%s untar error' %tarfile_path)
raise ValueError('error','%s untar error' %tarfile_path)
| [
"[email protected]"
] | |
308f47876d956e476994e9c9fe6924bde8b25f3c | 22e9d7c194cf22513d68b61b97c49405a47e8708 | /Number_Theory/sieves_primality_test.py | ef64fdf8d48dbf9a21543d0f6f5e2a11e959499b | [] | no_license | SandeepPadhi/Algorithmic_Database | 44c26f9300a99539781c5beb5587997b3ecadfe1 | ab8040a7dad94c84ec88f40e44b8520edcbe2443 | refs/heads/main | 2023-06-22T02:04:29.362315 | 2021-07-19T17:48:40 | 2021-07-19T17:48:40 | 338,329,340 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 295 | py | import math
maxn=1000000
spf=[i for i in range(maxn+1)]
def sieve(spf):
for i in range(2,int(math.sqrt(maxn))+1,1):
if spf[i]==i:
for j in range(i*i,maxn+1):
spf[j]=i
def isPrime(x):
return True if spf[x]==x else False
sieve(spf)
print(isPrime(31)) | [
"[email protected]"
] | |
3fc066210b8493bb0d40a8279d61d93f4157055a | 602fa0e4ce194d3073d78230c61f7053281f9f9b | /code/python/src/util/data_util.py | b08a962dd0200b383f1b66164fc7b1e43403c0a4 | [] | no_license | ziqizhang/wop | 111cfdda1686a874ff1fc11a453a23fb52d43af1 | ea0c37f444de9f2d5303f74b989f6d1a09feb61d | refs/heads/master | 2022-09-14T20:14:11.575021 | 2021-12-10T21:23:24 | 2021-12-10T21:23:24 | 166,239,995 | 2 | 1 | null | 2022-09-01T23:11:13 | 2019-01-17T14:33:51 | Python | UTF-8 | Python | false | false | 13,037 | py | #read csv data as dataframe, perform stratisfied sampling and output the required sample
import collections
import csv
import json
import pickle
import numpy
import pandas as pd
from sklearn import model_selection
from sklearn.model_selection import train_test_split
#prepare data to fasttext format
def to_fasttext(inCSV, textCol, classCol, outfile):
df = pd.read_csv(inCSV, delimiter="\t", quoting=0, encoding="utf-8"
).as_matrix()
df.astype(str)
X = df[:, textCol]
y = df[:, classCol]
counter = collections.Counter(y)
single_instance = 0
with open(outfile, mode='w') as file:
csvwriter = csv.writer(file, delimiter='\t', quotechar='"', quoting=csv.QUOTE_MINIMAL)
for i in range(len(X)):
label = y[i]
if counter[label] == 1:
single_instance += 1
continue
text=X[i]
csvwriter.writerow(["__label__"+label, text])
print(str(single_instance) + " has only one instance and are deleted")
def subset(inCSV, textCol, classCol, outfolder, percentage):
df = pd.read_csv(inCSV, delimiter="\t", quoting=0, encoding="utf-8"
).as_matrix()
df.astype(str)
X=df[:, textCol]
y = df[:, classCol]
counter=collections.Counter(y)
X_new=[]
y_new=[]
single_instance=0
for i in range(len(X)):
label=y[i]
if counter[label]==1:
single_instance+=1
else:
X_new.append(X[i])
y_new.append(y[i])
print(str(single_instance)+" has only one instance and are deleted")
X_train, X_test, y_train, y_test, \
indices_train, indices_test= model_selection.train_test_split(X_new, y_new, range(len(X_new)), test_size=percentage, random_state=0,
stratify=y_new)
filename=inCSV[inCSV.rfind("/")+1: inCSV.rfind(".tsv")]
with open(outfolder+"/"+filename+"_"+str(percentage)+".index", 'w') as f:
for i in indices_test:
f.write(str(i)+"\n")
with open(outfolder+"/"+filename+"_"+str(percentage)+".tsv", mode='w') as employee_file:
csvwriter = csv.writer(employee_file, delimiter='\t', quotechar='"', quoting=csv.QUOTE_MINIMAL)
for i in range(len(X_test)):
label = y_test[i]
text=X_test[i]
csvwriter.writerow([text, label])
''''
This method reads the json data file (train/val/test) in the SWC2020 mwpd format and save them as a matrix where each
row is an instance with the following columns:
- 0: id
- 1: name
- 2: description
- 3: categorytext
- 4: url
- 5: lvl1
- 6: lvl2
- 7: lvl3
'''
def read_mwpdformat_to_matrix(in_file):
matrix=[]
with open(in_file) as file:
line = file.readline()
while line is not None and len(line)>0:
js=json.loads(line)
row=[js['ID'],js['Name'],js['Description'],js['CategoryText'],js['URL'],js['lvl1'],js['lvl2'],js['lvl3']]
matrix.append(row)
line=file.readline()
return matrix
'''
given the input WDC prod cat GS product offers, and also the train/test set of 2155 clusters
containing product offers derived from this GS, split the GS into train/test containing product offers
based on their cluster membership and the cluster's presence in train/test
'''
def split_wdcGS_by_traintestsplit(train_file, test_file, gs_file,
outfolder):
train_ids=set()
test_ids=set()
with open(train_file) as file:
line = file.readline()
js=json.loads(line)
for ent in js:
train_ids.add(ent['cluster_id'])
with open(test_file) as file:
line = file.readline()
js=json.loads(line)
for ent in js:
test_ids.add(ent['cluster_id'])
writer_train=open(outfolder+"/wdc_gs_train.json",'w')
writer_test=open(outfolder+"/wds_gs_test.json",'w')
with open(gs_file) as file:
line = file.readline()
js=json.loads(line)
for ent in js:
out_line = json.dumps(ent)
if str(ent['cluster_id']) in train_ids:
writer_train.write(out_line+"\n")
else:
writer_test.write(out_line+"\n")
writer_train.close()
writer_test.close()
def read_wdcgsformat_to_matrix(in_file):
matrix=[]
with open(in_file) as file:
line = file.readline()
while line is not None and len(line) > 0:
ent = json.loads(line)
#id, name, desc, brand, manufacturer, url, label
# if ent['cluster_id']==12261043:
# print("")
try:
row=[ent['cluster_id'],"","","","",ent['url'],ent['categoryLabel']]
schema_prop=ent['schema.org_properties']
for d in schema_prop:
if '/name' in d.keys():
row[1]=d['/name'][1:-2].strip()
elif '/description' in d.keys():
row[2]= d['/description'][1:-2].strip()
elif '/brand' in d.keys():
row[3]=d['/brand'][1:-2].strip()
elif '/manufacturer' in d.keys():
row[4]=d['/manufacturer'][1:-2].strip()
schema_prop = ent['parent_schema.org_properties']
for d in schema_prop:
if row[1]=='' and '/name' in d.keys():
row[1]=d['/name'][1:-2].strip()
elif row[1]=='' and '/title' in d.keys():
row[1]=d['/title'][1:-2].strip()
elif row[2]=='' and'/description' in d.keys():
row[2]= d['/description'][1:-2].strip()
elif row[3]=='' and'/brand' in d.keys():
row[3]=d['/brand'][1:-2].strip()
elif row[4]=='' and'/manufacturer' in d.keys():
row[4]=d['/manufacturer'][1:-2].strip()
matrix.append(row)
except:
print("Error encountered")
line=file.readline()
# row=[js['ID'],js['Name'],js['Description'],js['CategoryText'],js['URL'],js['lvl1'],js['lvl2'],js['lvl3']]
# matrix.append(row)
# line=file.readline()
return matrix
''''
This method reads the json data file (train/val/test) in the SWC2020 mwpd format and save them as a matrix where each
row is an instance with the following columns:
- 0: ID
- 1: Description.URL
- 2: Brand
- 3: SummaryDescription.LongSummaryDescription
- 4: Title
- 5: Category.CategoryID
- 6: Category.Name.Value
'''
def read_icecatformat_to_matrix(in_file):
matrix=[]
with open(in_file) as file:
line = file.readline()
while line is not None and len(line)>0:
js=json.loads(line)
row=[js['ID'],js['Description.URL'],
js['Brand'],js['SummaryDescription.LongSummaryDescription'],
js['Title'],js['Category.CategoryID'],js['Category.Name.Value']]
# row = [js['ID'], "",
# "", "",
# js['Title'], "",""]
matrix.append(row)
line=file.readline()
return matrix
''''
This method reads the original training data file for the fakeproductreview dataset
fake_reviews_dataset.csv (download from https://osf.io/3vds7/)
and randomly split it into 80:20 train/test split
'''
def split_fakeproductrev_to_holdout(in_file, out_folder):
df = pd.read_csv(in_file, header=0, delimiter=",", quoting=0, encoding="utf-8",
)
headers=["category","rating","text","label"]
X=[]
for index, row in df.iterrows():
r = [row[0], row[1], row[3]]
X.append(r)
y = df.iloc[:,2]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.20, random_state = 42)
y_train = list(y_train)
y_test=list(y_test)
outfile=out_folder+"/fakeproductrev_train.csv"
with open(outfile, 'w', newline='') as f:
writer = csv.writer(f)
writer.writerow(headers)
for i in range(0, len(X_train)):
r = X_train[i]
r.append(y_train[i])
writer.writerow(r)
outfile=out_folder+"/fakeproductrev_test.csv"
with open(outfile, 'w', newline='') as f:
writer = csv.writer(f)
writer.writerow(headers)
for i in range(0, len(X_test)):
r = X_test[i]
r.append(y_test[i])
writer.writerow(r)
def convert_icecatformat_to_json(in_file, out_file):
outwriter = open(out_file,"w")
with open(in_file, 'rb') as f:
data = pickle.load(f)
# for h in list(data.columns.values):
# print(h)
count=0
for id, row_data in data.iterrows():
entry={}
count+=1
entry['ID'] = str(id)
entry['Description.URL']=replace_nan(row_data['Description.URL'])
entry['Brand']=replace_nan(row_data['Brand'])
entry['SummaryDescription.LongSummaryDescription']= replace_nan(row_data['SummaryDescription.LongSummaryDescription'])
entry['Title']=replace_nan(row_data['Title'])
entry['Category.CategoryID'] = replace_nan(row_data['Category.CategoryID'])
entry['Category.Name.Value'] = replace_nan(row_data['Category.Name.Value'])
line=json.dumps(entry)
outwriter.write(line+"\n")
if count%5000 ==0:
print(count)
print(count)
outwriter.close()
def replace_nan(v):
if type(v) is float and numpy.isnan(v):
return ""
else:
return v
#Brand
#Category.Name.Value
#SummaryDescription.ShortSummaryDescription
#Title
#Description.URL
if __name__ == "__main__":
split_fakeproductrev_to_holdout("/home/zz/Work/data/wop_productfakerev/fake_reviews_dataset.csv",
"/home/zz/Work/data/wop_productfakerev")
exit(0)
# train=read_wdcgsformat_to_matrix("/home/zz/Cloud/GDrive/ziqizhang/project/mwpd/prodcls/data/WDC_CatGS/wdc_gs_train.json")
# test=read_wdcgsformat_to_matrix("/home/zz/Cloud/GDrive/ziqizhang/project/mwpd/prodcls/data/WDC_CatGS/wdc_gs_test.json")
# cls = set()
# for l in train:
# cls.add(l[6])
# print(len(cls))
# exit(0)
# train = read_icecatformat_to_matrix(
# "/home/zz/Work/data/IceCAT/icecat_data_train.json")
# val = read_icecatformat_to_matrix(
# "/home/zz/Work/data/IceCAT/icecat_data_validate.json")
# test = read_icecatformat_to_matrix(
# "/home/zz/Work/data/IceCAT/icecat_data_test.json")
#
# cls = set()
# for l in train:
# cls.add(l[6])
# print(len(cls))
# exit(0)
train = read_mwpdformat_to_matrix(
"/home/zz/Cloud/GDrive/ziqizhang/project/mwpd/prodcls/data/swc2020/train.json")
val = read_mwpdformat_to_matrix(
"/home/zz/Cloud/GDrive/ziqizhang/project/mwpd/prodcls/data/swc2020/validation.json")
test = read_mwpdformat_to_matrix(
"/home/zz/Cloud/GDrive/ziqizhang/project/mwpd/prodcls/data/swc2020/test.json")
cls1 = set()
cls2 = set()
cls3 = set()
for l in train:
cls1.add(l[5])
cls2.add(l[6])
cls3.add(l[7])
print(len(cls1))
exit(0)
#inCSV="/home/zz/Work/data/Rakuten/rdc-catalog-train.tsv"
# outfolder="/home/zz/Work/data/Rakuten/"
# subset(inCSV, 0, 1, outfolder, 0.2)
#
# inCSV = "/home/zz/Work/data/Rakuten/rdc-catalog-gold.tsv"
# outfolder = "/home/zz/Work/data/Rakuten/"
# subset(inCSV, 0, 1, outfolder, 0.2)
# inCSV = "/home/zz/Work/data/Rakuten/rdc-catalog-train.tsv"
# outCSV="/home/zz/Work/data/Rakuten/rdc-catalog-train_fasttext.tsv"
# to_fasttext(inCSV,0,1,outCSV)
#
# inCSV = "/home/zz/Work/data/Rakuten/rdc-catalog-gold.tsv"
# outCSV = "/home/zz/Work/data/Rakuten/rdc-catalog-gold_fasttext.tsv"
# to_fasttext(inCSV, 0, 1, outCSV)
#categories_clusters_testing.json
# read_wdcformat_to_matrix("/home/zz/Cloud/GDrive/ziqizhang/project/mwpd/prodcls/data/WDC_CatGS/categories_clusters_training.json")
# print("end")
# split_wdcGS_by_traintestsplit('/home/zz/Cloud/GDrive/ziqizhang/project/mwpd/prodcls/data/WDC_CatGS/categories_clusters_training.json',
# '/home/zz/Cloud/GDrive/ziqizhang/project/mwpd/prodcls/data/WDC_CatGS/categories_clusters_testing.json',
# '/home/zz/Cloud/GDrive/ziqizhang/project/mwpd/prodcls/data/WDC_CatGS/categories_gold_standard_offers.json',
# '/home/zz/Cloud/GDrive/ziqizhang/project/mwpd/prodcls/data/WDC_CatGS')
convert_icecatformat_to_json("/home/zz/Work/data/IceCAT/icecat_data_validate.pkl",
"/home/zz/Work/data/IceCAT/icecat_data_validate.json")
#read_icecatformat_to_matrx("/home/zz/Work/data/IceCAT/icecat_data_test_target.pkl") | [
"[email protected]"
] | |
b74c7a408b72582b81de14ddae925d60aa364fdf | 86cf79436659ff8d69d6d7a8d9cb358f0d1b4f1c | /AOJ/0383/0383.py | 366208a7d42f41637177a43b9108f38835ec689a | [] | no_license | pombredanne/problem-solving | d96a367851a34fb4f947b3b7a95ad364cf94ea8f | fefdbfb89ba04dbcd7df93c02968759ea970db06 | refs/heads/master | 2020-05-20T12:34:23.654253 | 2019-03-31T09:57:55 | 2019-03-31T09:57:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 237 | py | A,B,X = map(int, input().split())
ans = a = b = 0
if X % 500 != 0: X += 500 - X%500
if A < B:
a = X//1000 + (1 if (X%1000>0) else 0)
elif A > 2*B:
b = X//500
else:
a = X//1000; X %= 1000
b = X//500
print(A*a + B*b)
| [
"[email protected]"
] | |
54a92741481e50fdde73c533ad52c1b313d363a4 | cb3bce599e657188c30366adb0af3007ff9b8f96 | /src/note/test_proxy.py | bd9bcba2da944244a78ca5f41ac1a3c0cc431346 | [] | no_license | skk4/python_study | 534339e6c378d686c29af6d81429c472fca19d6d | 4bdd2a50f4bdfd28fdb89a881cb2ebb9eac26987 | refs/heads/master | 2021-01-01T04:36:52.037184 | 2017-12-08T01:04:27 | 2017-12-08T01:04:27 | 97,207,719 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 574 | py | # -*- coding:utf-8 -*-
#import socket
import random
import urllib2
iplist = ['111.13.7.42:81']
url = 'http://www.whatismyip.com.tw/'
proxy = {'http': random.choice(iplist)}
proxy_support = urllib2.ProxyHandler(proxy)
opener = urllib2.build_opener(proxy_support)
opener.addheaders = [('User-Agent', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.90 Safari/537.36')]
urllib2.install_opener(opener)
rq = urllib2.Request(url)
print rq.get_full_url()
fd = urllib2.urlopen(rq)
print fd.read()
fd.close()
| [
"[email protected]"
] | |
6d594e11da8a7b220ea7286f7fb5b4a2a98c0b15 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/16/usersdata/78/6015/submittedfiles/triangulo.py | 8f53086208c10d248c43bc38a441462edf00389a | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 427 | py | # -*- coding: utf-8 -*-
from __future__ import division
import math
a=input('digite o valor de a:')
b=input('digite o valor de b:')
c=input('digite o valor de c:')
if a>=b>=c>0:
print('s')
if a>b+c:
print('n')
if a**2==(b**2)+(c**2):
print('Re')
if a**2>(b**2)+(c**2):
print('Ob')
if a**2<(b**2)+(c**2):
print('Ac')
if a==b==c:
print('Eq')
if b==c!=a:
print('Is')
if a!=b!=c:
print('Es')
| [
"[email protected]"
] | |
e8f6627e5ca6c6c236f176ab86c0fa1405ddd68d | 691d3f3e04d354e11772335064f33245e1ed8c28 | /lib/galaxy/tools/test.py | ec7c7c7d1a8913c9ba7ecbcc555ce0d7d27eba56 | [
"CC-BY-2.5",
"MIT"
] | permissive | dbcls/dbcls-galaxy | 934a27cc13663549d5208158fc0b2821609399a8 | 6142165ef27f6a02aee42f26e0b94fed67ecc896 | refs/heads/master | 2016-09-05T22:53:27.553419 | 2009-09-09T06:35:28 | 2009-09-09T06:35:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,997 | py | import new, sys
import galaxy.util
import parameters
from parameters import basic
from parameters import grouping
from elementtree.ElementTree import XML
class ToolTestBuilder( object ):
"""
Encapsulates information about a tool test, and allows creation of a
dynamic TestCase class (the unittest framework is very class oriented,
doing dynamic tests in this was allows better integration)
"""
def __init__( self, tool, name ):
self.tool = tool
self.name = name
self.required_files = []
self.inputs = []
self.outputs = []
self.error = False
self.exception = None
def add_param( self, name, value, extra ):
try:
if name not in self.tool.inputs:
for input_name, input_value in self.tool.inputs.items():
if isinstance( input_value, grouping.Conditional ) or isinstance( input_value, grouping.Repeat ):
self.__expand_grouping_for_data_input(name, value, extra, input_name, input_value)
elif isinstance( self.tool.inputs[name], parameters.DataToolParameter ):
self.required_files.append( ( value, extra ) )
except: pass
self.inputs.append( ( name, value, extra ) )
def add_output( self, name, file ):
self.outputs.append( ( name, file ) )
def __expand_grouping_for_data_input( self, name, value, extra, grouping_name, grouping_value ):
# Currently handles grouping.Conditional and grouping.Repeat
if isinstance( grouping_value, grouping.Conditional ):
if name != grouping_value.test_param.name:
for case in grouping_value.cases:
for case_input_name, case_input_value in case.inputs.items():
if case_input_name == name and isinstance( case_input_value, basic.DataToolParameter ):
self.required_files.append( ( value, extra ) )
return True
elif isinstance( case_input_value, grouping.Conditional ):
self.__expand_grouping_for_data_input(name, value, extra, case_input_name, case_input_value)
elif isinstance( grouping_value, grouping.Repeat ):
# FIXME: grouping.Repeat can only handle 1 repeat param element since the param name
# is something like "input2" and the expanded page display is something like "queries_0|input2".
# The problem is that the only param name on the page is "input2", and adding more test input params
# with the same name ( "input2" ) is not yet supported in our test code ( the lat one added is the only
# one used ).
for input_name, input_value in grouping_value.inputs.items():
if input_name == name and isinstance( input_value, basic.DataToolParameter ):
self.required_files.append( ( value, extra ) )
return True
| [
"[email protected]"
] | |
852f5bf1d22e53bc8195742775a96253742e89ed | 428b0c174d532f362af755164c01d517c5d28eff | /backend/manage.py | 8b76f37a158a57a733ccc8d742f26e9451df73f1 | [] | no_license | crowdbotics-apps/envy-budget-4817 | e861af5ce1631efffd15ee56fcedc7554427976f | 6e4eab96e9786db16e403844be0e22c92f085a62 | refs/heads/master | 2023-01-05T01:03:04.949154 | 2019-06-18T03:33:41 | 2019-06-18T03:33:41 | 192,458,782 | 0 | 0 | null | 2022-12-30T10:31:26 | 2019-06-18T03:30:29 | Python | UTF-8 | Python | false | false | 636 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'envy_budget_4817.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
835ae6671986312e9febcc5c4269d9c60e34366d | 32cba9d6b0cb420e13a2a26c9e8c3d07e2c127b6 | /附录A 梯度下降法/最速下降法(原生Python+scipy导数计算实现).py | cee687d26b7245cfa1b086e591ae59819bbce477 | [] | no_license | wanglg007/Lihang-Statistical-learning-methods-Code | bed22551a2883b40e93340d3f96cf2fcf9e19ef2 | 190d16310be154282550e1f55eaadd8c4dd83263 | refs/heads/main | 2023-07-03T17:00:35.809206 | 2021-08-02T08:37:33 | 2021-08-02T08:37:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,826 | py | from scipy.misc import derivative
def partial_derivative(func, arr, dx=1e-6):
"""计算n元函数在某点各个自变量的梯度向量(偏导数列表)
:param func: [function] n元函数
:param arr: [list/tuple] 目标点的自变量坐标
:param dx: [int/float] 计算时x的增量
:return: [list] 偏导数
"""
n_features = len(arr)
ans = []
for i in range(n_features):
def f(x):
arr2 = list(arr)
arr2[i] = x
return func(arr2)
ans.append(derivative(f, arr[i], dx=dx))
return ans
def golden_section_for_line_search(func, a0, b0, epsilon):
"""一维搜索极小值点(黄金分割法)
:param func: [function] 一元函数
:param a0: [int/float] 目标区域左侧边界
:param b0: [int/float] 目标区域右侧边界
:param epsilon: [int/float] 精度
"""
a1, b1 = a0 + 0.382 * (b0 - a0), b0 - 0.382 * (b0 - a0)
fa, fb = func(a1), func(b1)
while b1 - a1 > epsilon:
if fa <= fb:
b0, b1, fb = b1, a1, fa
a1 = a0 + 0.382 * (b0 - a0)
fa = func(a1)
else:
a0, a1, fa = a1, b1, fb
b1 = b0 - 0.382 * (b0 - a0)
fb = func(b1)
return (a1 + b1) / 2
def steepest_descent(func, n_features, epsilon, distance=3, maximum=1000):
"""梯度下降法
:param func: [function] n元目标函数
:param n_features: [int] 目标函数元数
:param epsilon: [int/float] 学习精度
:param distance: [int/float] 每次一维搜索的长度范围(distance倍梯度的模)
:param maximum: [int] 最大学习次数
:return: [list] 结果点坐标
"""
x0 = [0] * n_features # 取自变量初值
y0 = func(x0) # 计算函数值
for _ in range(maximum):
nabla = partial_derivative(func, x0) # 计算梯度
# 当梯度的模长小于精度要求时,停止迭代
if pow(sum([nabla[i] ** 2 for i in range(n_features)]), 0.5) < epsilon:
return x0
def f(x):
"""梯度方向的一维函数"""
x2 = [x0[i] - x * nabla[i] for i in range(n_features)]
return func(x2)
lk = golden_section_for_line_search(f, 0, distance, epsilon=1e-6) # 一维搜索寻找驻点
x1 = [x0[i] - lk * nabla[i] for i in range(n_features)] # 迭代自变量
y1 = func(x1) # 计算函数值
if abs(y1 - y0) < epsilon: # 如果当前变化量小于学习精度,则结束学习
return x1
x0, y0 = x1, y1
if __name__ == "__main__":
# [0]
print(steepest_descent(lambda x: x[0] ** 2, 1, epsilon=1e-6))
# [-2.9999999999635865, -3.999999999951452]
print(steepest_descent(lambda x: ((x[0] + 3) ** 2 + (x[1] + 4) ** 2) / 2, 2, epsilon=1e-6))
| [
"[email protected]"
] | |
e894a478cf49f5d808333ba19573bf3ba9434e8e | 13f5984be7be77852e4de29ab98d5494a7fc6767 | /Exam/商汤/环形赛道小游戏.py | cfeda51c8d56cb04a31ed5a4d36ff1e03e2acc17 | [] | no_license | YuanXianguo/Python-Interview-Master | 4252514763fc3f563d9b94e751aa873de1719f91 | 2f73786e8c51dbd248341559de171e18f67f9bf2 | refs/heads/master | 2020-11-26T18:14:50.190812 | 2019-12-20T02:18:03 | 2019-12-20T02:18:03 | 229,169,825 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 259 | py | def get_sum(nums):
n = len(nums)
nums.extend(nums)
dp = [nums[0]] * len(nums)
for i in range(2 * n):
dp[i] = max(dp[i]+nums[i], nums[i])
return dp[-1]
n = int(input())
nums = list(map(int, input().split()))
print(get_sum(nums))
| [
"[email protected]"
] | |
be490d67d8abd9e56665d7b6ef9536c0352d1325 | fd62d8096dc95923341cfac29f0209bfbea887b4 | /models_evaluation/xgboost/grid_search/jobs_test/5.0_0.03_0.0_200.0_10.0.job.py | 9abbc493eabf624713f7efad4e08eff3f17a4fed | [] | no_license | Eulerianial/premise-selection-deepmath-style | 06c8f2f540bc7e3840c6db0a66c5b30b5f4257f9 | 8684a59b5d8beab1d02a3a7c568a16c790ea4b45 | refs/heads/master | 2021-07-17T17:04:13.472687 | 2017-10-25T13:54:44 | 2017-10-25T13:54:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,637 | py | import xgboost as xgb
import argparse
import sys
import os
from saving_loading import *
#####################################
p = {
"max_depth":int(5.0),
"eta":0.03,
"gamma":0.0,
"num_boost_round":int(200.0),
"early_stopping_rounds":int(10.0)
}
#####################################
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Run CV for xgboost with particular combination of parameters')
parser.add_argument("X",
help = "path to CSR matrix with features of pairs (theorem, premise)")
parser.add_argument("y",
help = "path to CSV file with labels reflecting relevances of pairs (theorem, premise)")
parser.add_argument("output_directory",
help = "path to directory where performance of tested model should be saved")
args = parser.parse_args()
y = read_csv(os.path.abspath(args.y), type_of_records = "int")
X = load_obj(os.path.abspath(args.X))
output_directory = os.path.abspath(args.output_directory)
dtrain = xgb.DMatrix(X, label = y)
params = {
"max_depth":p["max_depth"],
"eta":p["eta"],
"gamma":p["gamma"],
"objective":"binary:logistic"
}
x = xgb.cv(
params = params,
dtrain = dtrain,
num_boost_round = p["num_boost_round"],
early_stopping_rounds = p["early_stopping_rounds"],
nfold = 4,
metrics = {"error","auc","logloss"}
)
output_name = os.path.join(output_directory, "_".join(map(str, list(p.values())))+".pkl")
save_obj({"params":p, "stats":x}, output_name)
| [
"[email protected]"
] | |
5a37f7eb85b6bd929fabe005a19a2a43d41f15d5 | da5bc6efaebc9ff015938d207b25c7804bc03b33 | /11_class/quiz03/quiz03.py | 58c5273addfe4332aba0c15c597067916327331e | [] | no_license | codud0954/megait_python_20201116 | b0f68f50a1e0d41c3c35535e718d5a236a7b1a98 | a71f57d4332027406953599612cd014de2d26713 | refs/heads/master | 2023-01-31T11:14:27.611468 | 2020-12-18T09:03:11 | 2020-12-18T09:03:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,127 | py | # 제품관리 설계도
class Product:
# 생성자
def __init__(self, name, price, expired_date):
self.name = name
self.price = price
self.expired_date = expired_date
# 제품 정보
def product_info(self):
print("이름:", self.name)
print("가격:", self.price)
print("유통기한", self.expired_date)
# 제품 n개의 가격
def price_of_product(self, count):
return count * self.price
# 판매 가능 여부
def sale_status(self):
# 오늘 날짜 <= 유통기한 날짜 : 판매 가능 상품
# 오늘 날짜 > 유통기한 날짜 : 판매 불가 상품
today = "2020-12-14"
if today <= self.expired_date:
return "판매 가능 상품"
else:
return "판매 불가 상품"
# 객체 생성
shrimp = Product("새우깡", 1300, "2021-03-01")
shrimp.product_info()
print()
print("제품 5개의 가격 : %d" % shrimp.price_of_product(5))
print("제품 13개의 가격 : %d" % shrimp.price_of_product(13))
print(shrimp.sale_status())
| [
"[email protected]"
] | |
ef9173cfa8a6c3ee550b53d9ab4739412550077e | 567b880347a4ace3a64060753bf9bfadb42fb242 | /demo/app.py | e158c660ac3904f01488022ac78189149d5840be | [] | no_license | land-pack/intuition | 7b8335a8c0a07975c862d8e0daaa1f814bd9f63b | bc0a4e847ebe2b4c80c18d6a7e6e16a828c2a712 | refs/heads/master | 2020-03-23T07:03:36.530012 | 2018-07-18T06:26:09 | 2018-07-18T06:26:09 | 141,245,462 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 414 | py | import requests
from flask import Flask, render_template
app = Flask(__name__)
@app.route("/")
def index():
r = requests.get('http://127.0.0.1:5001/api/preview')
data = r.json()
images = data.get('images')
return render_template('index.html', images=images)
@app.route("/upload")
def upload():
return render_template('upload.html')
if __name__ == '__main__':
app.run(debug=True)
| [
"[email protected]"
] | |
fc07856387a10a3a8dbed500fe7a51d73eaeb050 | e59273ecf45ddc40af8f51607e3ca1fb46632bb1 | /Payload_Types/apfell/mythic/agent_functions/download.py | fcfff9bf3b21b6ee4794053ec13673c5fa3ac9f6 | [
"BSD-3-Clause",
"MIT"
] | permissive | thiagomayllart/Mythic | 62ae01a42027ac1a71564775c8cc7ac8d0e88aa4 | bb1a90fb3c3e37c284fc812548b8f7ae5ffc1fb1 | refs/heads/master | 2023-06-02T08:12:09.099400 | 2021-06-19T23:30:26 | 2021-06-19T23:30:26 | 326,127,766 | 0 | 1 | NOASSERTION | 2021-06-20T03:20:21 | 2021-01-02T06:59:04 | Python | UTF-8 | Python | false | false | 1,829 | py | from CommandBase import *
import json
from MythicResponseRPC import *
class DownloadArguments(TaskArguments):
def __init__(self, command_line):
super().__init__(command_line)
self.args = {}
async def parse_arguments(self):
if len(self.command_line) > 0:
if self.command_line[0] == "{":
temp_json = json.loads(self.command_line)
if "host" in temp_json:
# this means we have tasking from the file browser rather than the popup UI
# the apfell agent doesn't currently have the ability to do _remote_ listings, so we ignore it
self.command_line = temp_json["path"] + "/" + temp_json["file"]
else:
raise Exception("Unsupported JSON")
class DownloadCommand(CommandBase):
cmd = "download"
needs_admin = False
help_cmd = "download {path to remote file}"
description = "Download a file from the victim machine to the Mythic server in chunks (no need for quotes in the path)."
version = 1
is_exit = False
is_file_browse = False
is_process_list = False
is_download_file = True
is_remove_file = False
is_upload_file = False
author = "@its_a_feature_"
parameters = []
attackmapping = ["T1020", "T1030", "T1041"]
argument_class = DownloadArguments
browser_script = BrowserScript(script_name="download", author="@its_a_feature_")
async def create_tasking(self, task: MythicTask) -> MythicTask:
resp = await MythicResponseRPC(task).register_artifact(
artifact_instance="$.NSFileHandle.fileHandleForReadingAtPath, readDataOfLength",
artifact_type="API Called",
)
return task
async def process_response(self, response: AgentResponse):
pass
| [
"[email protected]"
] | |
2e22a7b99d1f8c4f1f5ce4eb4dafcbd83332bbf1 | d5fe9d0c7c93c3250b9e212435b02d8373dec091 | /code/65.py | 1dd66f853d153787cc61be6ee33a280ffb264627 | [] | no_license | HarshaaArunachalam/GUV | 6937adb84f0928f08c9fbc519310abc06ef3541a | c047887bf6c19a4950c5f634111e1c02966367e5 | refs/heads/master | 2020-05-31T10:52:23.280052 | 2019-08-10T20:23:11 | 2019-08-10T20:23:11 | 190,249,464 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 94 | py | N=int(input())
Na=input().split()
Na=list(Na)
for i in Na:
if(int(i)<N):
print(i)
| [
"[email protected]"
] | |
ec6fcf9d5ab20c814125e6ac6e0b78fc36051033 | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /data/p4VQE/R1/benchmark/startPyquil196.py | 6360a951c9bd056e2dd8006aa958ef69a2c7c95e | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,184 | py | # qubit number=4
# total number=12
import pyquil
from pyquil.api import local_forest_runtime, QVMConnection
from pyquil import Program, get_qc
from pyquil.gates import *
import numpy as np
conn = QVMConnection()
def make_circuit()-> Program:
prog = Program() # circuit begin
prog += H(0) # number=1
prog += H(1) # number=2
prog += H(2) # number=3
prog += H(3) # number=4
prog += CNOT(2,0) # number=5
prog += H(0) # number=9
prog += CZ(2,0) # number=10
prog += H(0) # number=11
prog += X(3) # number=7
prog += X(3) # number=8
# circuit end
return prog
def summrise_results(bitstrings) -> dict:
d = {}
for l in bitstrings:
if d.get(l) is None:
d[l] = 1
else:
d[l] = d[l] + 1
return d
if __name__ == '__main__':
prog = make_circuit()
qvm = get_qc('4q-qvm')
results = qvm.run_and_measure(prog,1024)
bitstrings = np.vstack([results[i] for i in qvm.qubits()]).T
bitstrings = [''.join(map(str, l)) for l in bitstrings]
writefile = open("../data/startPyquil196.csv","w")
print(summrise_results(bitstrings),file=writefile)
writefile.close()
| [
"[email protected]"
] | |
a2a54db18153e09c2bdd4306052b808031bbdae2 | eba5e5ff22bcba73001fba729218c02cd257759f | /assets/utils/webssh.py | 2cb35bc81b8a867099b45e74389da231bd5cb930 | [] | no_license | duoyichen/Ops-1 | d04ea66aa37c0732ddeff08889819d8ca830985e | 56d3838a40dc0644a0fe8e58f40be421eaddc693 | refs/heads/master | 2020-04-29T11:48:34.329401 | 2019-03-14T10:29:38 | 2019-03-14T10:29:38 | 166,679,958 | 0 | 1 | null | 2019-01-20T15:59:18 | 2019-01-20T15:59:18 | null | UTF-8 | Python | false | false | 5,155 | py | # -*- coding: utf-8 -*-
import paramiko
import threading
import time
import os
import logging
from socket import timeout
from assets.tasks import admin_file
from channels.generic.websocket import WebsocketConsumer
from assets.models import ServerAssets, AdminRecord
from django.conf import settings
from utils.crypt_pwd import CryptPwd
class MyThread(threading.Thread):
def __init__(self, chan):
super(MyThread, self).__init__()
self.chan = chan
self._stop_event = threading.Event()
self.start_time = time.time()
self.current_time = time.strftime(settings.TIME_FORMAT)
self.stdout = []
self.read_lock = threading.RLock()
def stop(self):
self._stop_event.set()
def run(self):
with self.read_lock:
while not self._stop_event.is_set():
time.sleep(0.1)
try:
data = self.chan.chan.recv(1024)
if data:
str_data = bytes.decode(data)
self.chan.send(str_data)
self.stdout.append([time.time() - self.start_time, 'o', str_data])
except timeout:
break
self.chan.send('\n由于长时间没有操作,连接已断开!')
self.stdout.append([time.time() - self.start_time, 'o', '\n由于长时间没有操作,连接已断开!'])
self.chan.close()
def record(self):
record_path = os.path.join(settings.MEDIA_ROOT, 'admin_ssh_records', self.chan.scope['user'].username,
time.strftime('%Y-%m-%d'))
if not os.path.exists(record_path):
os.makedirs(record_path, exist_ok=True)
record_file_name = '{}.{}.cast'.format(self.chan.host_ip, time.strftime('%Y%m%d%H%M%S'))
record_file_path = os.path.join(record_path, record_file_name)
header = {
"version": 2,
"width": self.chan.width,
"height": self.chan.height,
"timestamp": round(self.start_time),
"title": "Demo",
"env": {
"TERM": os.environ.get('TERM'),
"SHELL": os.environ.get('SHELL', '/bin/bash')
},
}
admin_file.delay(record_file_path, self.stdout, header)
login_status_time = time.time() - self.start_time
if login_status_time >= 60:
login_status_time = '{} m'.format(round(login_status_time / 60, 2))
elif login_status_time >= 3600:
login_status_time = '{} h'.format(round(login_status_time / 3660, 2))
else:
login_status_time = '{} s'.format(round(login_status_time))
try:
AdminRecord.objects.create(
admin_login_user=self.chan.scope['user'],
admin_server=self.chan.host_ip,
admin_remote_ip=self.chan.remote_ip,
admin_start_time=self.current_time,
admin_login_status_time=login_status_time,
admin_record_file=record_file_path.split('media/')[1]
)
except Exception as e:
logging.getLogger().error('数据库添加用户操作记录失败,原因:{}'.format(e))
class SSHConsumer(WebsocketConsumer):
def __init__(self, *args, **kwargs):
super(SSHConsumer, self).__init__(*args, **kwargs)
self.ssh = paramiko.SSHClient()
self.group_name = self.scope['url_route']['kwargs']['group_name']
self.server = ServerAssets.objects.select_related('assets').get(id=self.scope['path'].split('/')[3])
self.host_ip = self.server.assets.asset_management_ip
self.width = 150
self.height = 30
self.t1 = MyThread(self)
self.remote_ip = self.scope['query_string'].decode('utf8')
self.chan = None
def connect(self):
self.accept()
username = self.server.username
try:
self.ssh.load_system_host_keys()
self.ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
self.ssh.connect(self.host_ip, int(self.server.port), username,
CryptPwd().decrypt_pwd(self.server.password), timeout=5)
except Exception as e:
logging.getLogger().error('用户{}通过webssh连接{}失败!原因:{}'.format(username, self.host_ip, e))
self.send('用户{}通过webssh连接{}失败!原因:{}'.format(username, self.host_ip, e))
self.close()
self.chan = self.ssh.invoke_shell(term='xterm', width=self.width, height=self.height)
# 设置如果3分钟没有任何输入,就断开连接
self.chan.settimeout(60 * 3)
self.t1.setDaemon(True)
self.t1.start()
def receive(self, text_data=None, bytes_data=None):
self.chan.send(text_data)
def disconnect(self, close_code):
try:
self.t1.record()
finally:
self.ssh.close()
self.t1.stop()
| [
"[email protected]"
] | |
55a9e15caa3390bc0770bedd2dfc2dc21ce45dea | 43204546c687d7ec6bba04dc925eb07fc3f938e7 | /angrdbg/server.py | 478c2e72b3c3c6f84679b0c78a5ca6077afea852 | [
"BSD-2-Clause"
] | permissive | jhscheer/angrdbg | 5ac4a278b02e4009442e1033a1cbd9bb5d024806 | 50f257fcfea1dde8e4e76625fe64e3ac4e5eca51 | refs/heads/master | 2020-03-29T05:38:19.115641 | 2018-09-17T10:15:26 | 2018-09-17T10:15:26 | 149,591,381 | 0 | 0 | BSD-2-Clause | 2018-09-20T10:20:11 | 2018-09-20T10:20:11 | null | UTF-8 | Python | false | false | 7,990 | py | #!/usr/bin/env python
"""
classic rpyc server running a SlaveService + angrdbg + IPython shell
usage:
angrdbg-srv.py # default settings
angrdbg-srv.py --host HOST --port PORT # custom settings
# ssl-authenticated server (keyfile and certfile are required)
angrdbg-srv.py --ssl-keyfile keyfile.pem --ssl-certfile certfile.pem --ssl-cafile cafile.pem
"""
import sys
import os
import rpyc
import threading
import signal
import Queue
from plumbum import cli
from rpyc.utils.server import Server
from rpyc.utils.classic import DEFAULT_SERVER_PORT, DEFAULT_SERVER_SSL_PORT
from rpyc.utils.registry import REGISTRY_PORT
from rpyc.utils.registry import UDPRegistryClient, TCPRegistryClient
from rpyc.utils.authenticators import SSLAuthenticator
from rpyc.lib import setup_logger
from rpyc.core import SlaveService
BANNER = "[angrdbg server v1.0]"
#######################
import angr
import claripy
import pyvex
import angrdbg
import IPython
#from angrdbg import *
#######################
class WeirdServer(Server): # n1 threaded n2 forked
def __init__(self, service, done_event, **kwargs):
self.num_conns = 2
self.thread = None
self.proc = None
self.done_event = done_event
Server.__init__(self, service, **kwargs)
@classmethod
def _handle_sigchld(cls, signum, unused):
try:
while True:
pid, dummy = os.waitpid(-1, os.WNOHANG)
if pid <= 0:
break
except OSError:
pass
# re-register signal handler (see man signal(2), under Portability)
signal.signal(signal.SIGCHLD, cls._handle_sigchld)
def _accept_method(self, sock):
self.num_conns -= 1
if self.num_conns == 1:
t = threading.Thread(
target=self._authenticate_and_serve_client,
args=[sock])
t.start()
self.thread = t
else:
pid = os.fork()
if pid == 0:
# child
try:
self.logger.debug("child process created")
# 76: call signal.siginterrupt(False) in forked child
signal.siginterrupt(signal.SIGCHLD, False)
self.listener.close()
self.clients.clear()
self._authenticate_and_serve_client(sock)
except BaseException:
self.logger.exception(
"child process terminated abnormally")
else:
self.logger.debug("child process terminated")
finally:
self.logger.debug("child terminated")
os._exit(0)
else:
# parent
self.proc = pid
sock.close()
if self.num_conns == 0:
self.done_event.set()
self.listener.close()
self.join()
def join(self):
self.thread.join()
try:
pid, dummy = os.waitpid(self.proc, 0) # os.WNOHANG)
except OSError as ee:
print ee
class AngrDbgServer(cli.Application):
port = cli.SwitchAttr(["-p", "--port"], cli.Range(0, 65535), default=None,
help="The TCP listener port (default = %s, default for SSL = %s)" %
(DEFAULT_SERVER_PORT, DEFAULT_SERVER_SSL_PORT), group="Socket Options")
host = cli.SwitchAttr(
["--host"],
str,
default="127.0.0.1",
help="The host to bind to. "
"The default is INADDR_ANY",
group="Socket Options")
ipv6 = cli.Flag(["--ipv6"], help="Enable IPv6", group="Socket Options")
logfile = cli.SwitchAttr(
"--logfile",
str,
default=None,
help="Specify the log file to use; "
"the default is stderr",
group="Logging")
quiet = cli.Flag(["-q",
"--quiet"],
help="Quiet mode (only errors will be logged)",
group="Logging")
ssl_keyfile = cli.SwitchAttr(
"--ssl-keyfile",
cli.ExistingFile,
help="The keyfile to use for SSL. Required for SSL",
group="SSL",
requires=["--ssl-certfile"])
ssl_certfile = cli.SwitchAttr(
"--ssl-certfile",
cli.ExistingFile,
help="The certificate file to use for SSL. Required for SSL",
group="SSL",
requires=["--ssl-keyfile"])
ssl_cafile = cli.SwitchAttr(
"--ssl-cafile",
cli.ExistingFile,
help="The certificate authority chain file to use for SSL. Optional; enables client-side "
"authentication",
group="SSL",
requires=["--ssl-keyfile"])
auto_register = cli.Flag(
"--register",
help="Asks the server to attempt registering with "
"a registry server. By default, the server will not attempt to register",
group="Registry")
registry_type = cli.SwitchAttr(
"--registry-type",
cli.Set(
"UDP",
"TCP"),
default="UDP",
help="Specify a UDP or TCP registry",
group="Registry")
registry_port = cli.SwitchAttr(
"--registry-port",
cli.Range(
0,
65535),
default=REGISTRY_PORT,
help="The registry's UDP/TCP port",
group="Registry")
registry_host = cli.SwitchAttr(
"--registry-host",
str,
default=None,
help="The registry host machine. For UDP, the default is 255.255.255.255; "
"for TCP, a value is required",
group="Registry")
def main(self):
if self.registry_type == "UDP":
if self.registry_host is None:
self.registry_host = "255.255.255.255"
self.registrar = UDPRegistryClient(
ip=self.registry_host, port=self.registry_port)
else:
if self.registry_host is None:
raise ValueError(
"With TCP registry, you must specify --registry-host")
self.registrar = TCPRegistryClient(
ip=self.registry_host, port=self.registry_port)
if self.ssl_keyfile:
self.authenticator = SSLAuthenticator(
self.ssl_keyfile, self.ssl_certfile, self.ssl_cafile)
default_port = DEFAULT_SERVER_SSL_PORT
else:
self.authenticator = None
default_port = DEFAULT_SERVER_PORT
if self.port is None:
self.port = default_port
setup_logger(self.quiet, self.logfile)
sys.stdout.write(
BANNER + " starting at %s %s\n" %
(self.host, self.port))
sys.stdout.flush()
done_event = threading.Event()
srv = WeirdServer(
SlaveService,
done_event,
hostname=self.host,
port=self.port,
reuse_addr=True,
ipv6=self.ipv6,
authenticator=self.authenticator,
registrar=self.registrar,
auto_register=self.auto_register)
t = threading.Thread(target=self._serve, args=[srv])
t.start()
# wait for 2 connections
done_event.wait()
IPython.embed(
banner1=BANNER + " client connected\n",
banner2="", # "tip: call serve_all() on the client to have a full working shell here.",
exit_msg=BANNER + " shell closed.\nexiting...\n"
)
os.kill(srv.proc, signal.SIGKILL)
os._exit(0)
def _serve(self, srv):
srv.start()
sys.stdout.write("\n" + BANNER + " client disconnected.\nexiting...\n")
os._exit(0)
def main():
AngrDbgServer.run()
'''simple client
import rpyc
import thread
conn1 = rpyc.classic.connect("localhost")
conn2 = rpyc.classic.connect("localhost")
thread.start_new_thread(conn2.serve_all, tuple())
'''
| [
"[email protected]"
] | |
9ae8d7ce445ae3cc95832b024c28c453579539ec | 2b7c7e9b00ed9b2dbbac943ee4b79865a96d10de | /Figure_script/Figure_Sobol_env_heatmap.py | c9d4513a3b6f3d83f0f93bf2429b86ad119e7dbf | [] | no_license | YaojieLu/Plant_traits_inversion | ad973e60bb32717d9d718f774c2ec77433c38ced | ec83642ae2a2e6ef96502e58f8074bffdadfefe8 | refs/heads/master | 2021-06-21T15:22:00.225498 | 2020-12-13T22:12:21 | 2020-12-13T22:12:21 | 140,017,309 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,163 | py |
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# import data
df = pd.read_csv('../Results/Sobol_env.txt', sep = ',', index_col = 0)
df = df[df['T'] == 30]
df['D'] = round(df['D'], 4)
# labels
paras = ['c', 'L', 'p50', 'ps']
latex = ['$\\mathit{c}$', '$\\mathit{L}$',
'$\\psi_{x50}$', '$\\psi_{s}$']
labels = dict(zip(paras, latex))
# figure
sns.set(font_scale = 1.3)
fig = plt.figure(figsize = (16, 16))
for i in range(len(paras)):
ax = fig.add_subplot(2, len(paras)/2, i+1)
df_para = df.pivot(index = 'I', columns = 'D', values = paras[i])
sns.heatmap(df_para, cmap = 'viridis', xticklabels = 3, yticklabels = 3)
#plt.xlim
#plt.ylim([0, 1])
if i > 1:
plt.xlabel('$\\mathit{D}$', fontsize = 20)
else:
ax.axes.get_xaxis().set_visible(False)
if i == 0 or i == 2:
plt.ylabel('$\\mathit{I}$', fontsize = 20)
else:
ax.axes.get_yaxis().set_visible(False)
plt.title(labels[paras[i]], fontsize = 20)
plt.tight_layout
plt.subplots_adjust(wspace = 0, hspace = 0.15)
plt.savefig('../Figures/Figure Sobol_env_heatmap.png', bbox_inches = 'tight')
| [
"="
] | = |
479c2117988d2ed2dca6b2805202adc6d5027b9d | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02397/s357415256.py | f7d7f986e5b38a58810ae61c71f351e5d8d9603c | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 165 | py | while True:
c = input().split()
x, y = int(c[0]), int(c[1])
if x == y == 0:
break
if y < x:
x, y = y, x
print("%d %d" % (x, y))
| [
"[email protected]"
] | |
081a8a4aa09d2eafd182ca6436c7c72218f6dcc5 | 3efee0cf2bd9e0c34bfdd94ab24a15cb88c04509 | /TMM_examples/TMM_fabry_perot.py | 13a671883453a7e29f38c3f94209049946a45615 | [
"MIT"
] | permissive | luwl85/Rigorous-Coupled-Wave-Analysis | bf5016ec70525f5e7bf59dfa93a03902afdfac12 | a28fdf90b5b5fc0fedacc8bb44a0a0c2f2a02143 | refs/heads/master | 2023-04-25T20:46:45.397976 | 2021-05-20T22:17:54 | 2021-05-20T22:17:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,761 | py | '''
TMM applied to a single uniform layer
should recover the analytic fabry perot solution
'''
import os
import sys
module_path = os.path.abspath(os.path.join('..'))
if module_path not in sys.path:
sys.path.append(module_path)
import numpy as np
import matplotlib.pyplot as plt;
import cmath;
from TMM_functions import run_TMM_simulation as rTMM
## GOAL: simulate a BRAGG MIRROR at some wavelength (1 micron)
#%% DEFINE SIMULATION PARAMETers
#% General Units
degrees = np.pi/180;
L0 = 1e-6; #units of microns;
eps0 = 8.854e-12;
mu0 = 4*np.pi*10**-7;
c0 = 1/(np.sqrt(mu0*eps0))
## normalized units
#z' = k0*z;
#k = k/k0;
## REFLECTION AND TRANSMSSION SPACE epsilon and mu PARAMETERS
m_r = 1; e_r = 1; incident_medium = [e_r, m_r];
m_t = 1; e_t = 1; transmission_medium = [e_t, m_t];
## set wavelength scanning range
wavelengths = np.linspace(0.5,1.6,500); #500 nm to 1000 nm
kmagnitude_scan = 2 * np.pi / wavelengths; #no
omega = c0 * kmagnitude_scan; #using the dispersion wavelengths
#source parameters
theta = 0 * degrees; #%elevation angle; #off -normal incidence does not excite guided resonances...
phi = 0 * degrees; #%azimuthal angle
## incident wave properties, at this point, everything is in units of k_0
n_i = np.sqrt(e_r*m_r);
#k0 = np.sqrt(kx**2+ky**2+kz**2); we know k0, theta, and phi
#actually, in the definitions here, kx = k0*sin(theta)*cos(phi), so kx, ky here are normalized
kx = n_i*np.sin(theta)*np.cos(phi); #constant in ALL LAYERS; kx = 0 for normal incidence
ky = n_i*np.sin(theta)*np.sin(phi); #constant in ALL LAYERS; ky = 0 for normal incidence
print((n_i**2, kx**2+ky**2))
kz_inc = cmath.sqrt(e_r * m_r - kx ** 2 - ky ** 2);
normal_vector = np.array([0, 0, -1]) #positive z points down;
ate_vector = np.matrix([0, 1, 0]); #vector for the out of plane E-field
#ampltidue of the te vs tm modes (which are decoupled)
pte = 1; #1/np.sqrt(2);
ptm = 0; #cmath.sqrt(-1)/np.sqrt(2);
polarization_amplitudes = [pte, ptm]
k_inc = [kx, ky];
print('--------incident wave paramters----------------')
print('incident n_i: '+str(n_i))
print('kx_inc: '+str(kx)+' ky_inc: '+str(ky))
print('kz_inc: ' + str(kz_inc));
print('-----------------------------------------------')
#thickness 0 means L = 0, which only pops up in the xponential part of the expression
ER = [12]
UR = [1]
layer_thicknesses = [0.6]
## run simulation
Ref, Tran = rTMM.run_TMM_simulation(wavelengths, polarization_amplitudes, theta, phi, ER, UR, layer_thicknesses,\
transmission_medium, incident_medium)
plt.figure();
plt.plot(wavelengths, Ref);
plt.plot(wavelengths, Tran);
plt.title('Spectrum of a Bragg Mirror')
plt.xlabel('wavelength ($\mu m$)')
plt.ylabel('R/T')
plt.legend(('Ref','Tran'))
plt.savefig('bragg_TMM.png');
plt.show(); | [
"[email protected]"
] | |
4943cfc0184c4c3df9fbab1578b3b2e97fc886c8 | c4e1afc488da5411dfcd27c16ed23efba14b0fd6 | /src/lliurex/variables/dhcp_deny_unknown_clients.py | 6f8929cf529faad2033749955a42967bedbc7691 | [] | no_license | lliurex/python-llxvars | 7eba023b489e5d466ff24ea0fd53a5417a65e214 | 51745a7725d6342918431807e3897b032e6c2f6e | refs/heads/master | 2021-06-21T00:10:27.845595 | 2021-01-28T09:08:32 | 2021-01-28T09:08:32 | 165,815,501 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 104 | py | def init(args=None):
try:
name = args['DENY_UNKNOWN']
except:
name = 'no'
return name
#def init
| [
"[email protected]"
] | |
6a6762c469e81d373c201c6a168dd6ee3e4c665c | ed75b99e824b5724746d72f2d529781eccf8ef0d | /biostar/celeryconfig.py | 8b2c7b42b8db811498eb7a13032c3e9671c2e8aa | [
"MIT"
] | permissive | satra/biostar-central | 6799c4df4d12de1278f60fb2b29623acf8cc7640 | 794c67d2972a4fe700c79841f5f3c0c562352738 | refs/heads/master | 2021-01-12T20:32:14.356389 | 2014-03-20T15:37:27 | 2014-03-20T15:37:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,202 | py | from __future__ import absolute_import
from datetime import timedelta
from celery.schedules import crontab
CELERY_RESULT_BACKEND = 'djcelery.backends.database:DatabaseBackend'
BROKER_URL = 'django://'
CELERY_TASK_SERIALIZER = 'pickle'
CELERY_ACCEPT_CONTENT = ['pickle']
CELERYBEAT_SCHEDULE = {
'prune_data': {
'task': 'biostar.celery.call_command',
'schedule': timedelta(days=1),
'kwargs': dict(name="prune_data")
},
'sitemap': {
'task': 'biostar.celery.call_command',
'schedule': timedelta(hours=6),
'kwargs': dict(name="sitemap")
},
'update_index': {
'task': 'biostar.celery.call_command',
'schedule': timedelta(minutes=15),
'args': ["update_index"],
'kwargs': {"age": 1}
},
'hourly_dump': {
'task': 'biostar.celery.call_command',
'schedule': crontab(minute=10),
'args': ["biostar_pg_dump"],
'kwargs': {"hourly": True}
},
'daily_dump': {
'task': 'biostar.celery.call_command',
'schedule': crontab(hour=22),
'args': ["biostar_pg_dump"],
},
}
CELERY_TIMEZONE = 'UTC' | [
"[email protected]"
] | |
325a93e9027f90d97fe0431288393f2f293520c7 | 90b8d12660adc7dcf63bffce20ba1b7ede64386a | /official/vision/beta/serving/export_saved_model.py | 95027be136a8209c9e2a438072cf195c7d18771c | [
"Apache-2.0"
] | permissive | thalitadru/models | 7109797ed536ccb10e17bba6add0f571a1c1c96d | 7faaa572db44621f8e2998abd8dc6a22e86001f2 | refs/heads/master | 2022-05-05T15:04:01.683629 | 2022-03-23T16:20:46 | 2022-03-23T16:20:46 | 82,706,460 | 3 | 0 | null | 2017-02-21T17:14:13 | 2017-02-21T17:14:12 | null | UTF-8 | Python | false | false | 3,850 | py | # Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Vision models export binary for serving/inference.
To export a trained checkpoint in saved_model format (shell script):
EXPERIMENT_TYPE = XX
CHECKPOINT_PATH = XX
EXPORT_DIR_PATH = XX
export_saved_model --experiment=${EXPERIMENT_TYPE} \
--export_dir=${EXPORT_DIR_PATH}/ \
--checkpoint_path=${CHECKPOINT_PATH} \
--batch_size=2 \
--input_image_size=224,224
To serve (python):
export_dir_path = XX
input_type = XX
input_images = XX
imported = tf.saved_model.load(export_dir_path)
model_fn = imported.signatures['serving_default']
output = model_fn(input_images)
"""
from absl import app
from absl import flags
from official.common import registry_imports # pylint: disable=unused-import
from official.core import exp_factory
from official.modeling import hyperparams
from official.vision.beta.serving import export_saved_model_lib
FLAGS = flags.FLAGS
flags.DEFINE_string(
'experiment', None, 'experiment type, e.g. retinanet_resnetfpn_coco')
flags.DEFINE_string('export_dir', None, 'The export directory.')
flags.DEFINE_string('checkpoint_path', None, 'Checkpoint path.')
flags.DEFINE_multi_string(
'config_file',
default=None,
help='YAML/JSON files which specifies overrides. The override order '
'follows the order of args. Note that each file '
'can be used as an override template to override the default parameters '
'specified in Python. If the same parameter is specified in both '
'`--config_file` and `--params_override`, `config_file` will be used '
'first, followed by params_override.')
flags.DEFINE_string(
'params_override', '',
'The JSON/YAML file or string which specifies the parameter to be overriden'
' on top of `config_file` template.')
flags.DEFINE_integer(
'batch_size', None, 'The batch size.')
flags.DEFINE_string(
'input_type', 'image_tensor',
'One of `image_tensor`, `image_bytes`, `tf_example` and `tflite`.')
flags.DEFINE_string(
'input_image_size', '224,224',
'The comma-separated string of two integers representing the height,width '
'of the input to the model.')
flags.DEFINE_string('export_checkpoint_subdir', 'checkpoint',
'The subdirectory for checkpoints.')
flags.DEFINE_string('export_saved_model_subdir', 'saved_model',
'The subdirectory for saved model.')
def main(_):
params = exp_factory.get_exp_config(FLAGS.experiment)
for config_file in FLAGS.config_file or []:
params = hyperparams.override_params_dict(
params, config_file, is_strict=True)
if FLAGS.params_override:
params = hyperparams.override_params_dict(
params, FLAGS.params_override, is_strict=True)
params.validate()
params.lock()
export_saved_model_lib.export_inference_graph(
input_type=FLAGS.input_type,
batch_size=FLAGS.batch_size,
input_image_size=[int(x) for x in FLAGS.input_image_size.split(',')],
params=params,
checkpoint_path=FLAGS.checkpoint_path,
export_dir=FLAGS.export_dir,
export_checkpoint_subdir=FLAGS.export_checkpoint_subdir,
export_saved_model_subdir=FLAGS.export_saved_model_subdir)
if __name__ == '__main__':
app.run(main)
| [
"[email protected]"
] | |
a3bfa9e158ba5fe5b5a7697cfc74d1a729aefa2a | 65c616c59ae005debf91d82f4efc7f7cdcc2a7a4 | /news_recommendation/home/forms.py | a4274fbc39b814ece457b077eceefcf942431907 | [] | no_license | nghiatd16/most_cb | 28db8b0c52cc391f6890f2a56c8dee308a6dfc85 | 46d91016b20d57f3f43b63813f7fbccd5626a848 | refs/heads/master | 2022-12-25T17:33:04.896024 | 2020-09-19T08:34:15 | 2020-09-19T08:34:15 | 296,822,425 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 137 | py | from django.forms import ModelForm
import django.forms as forms
from django.conf import settings
import os
import glob
import shutil
| [
"[email protected]"
] | |
c30e22ee2d9981b49022661bd8c8de23908ce27e | 78d7d7aeb78a8cea6d0e10b89fc4aa6c46c95227 | /448.py | d69d285b5d87d18b1e029474dd35d050e1364dcc | [] | no_license | GenryEden/kpolyakovName | 97db13ef93061a8c2afc6cc5acd91337f79063f1 | c5d7f631ae7ec8770e56170574b82ea2b7d8a4d9 | refs/heads/master | 2023-05-23T21:22:51.983756 | 2021-06-21T08:56:49 | 2021-06-21T08:56:49 | 350,466,773 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 139 | py | def f(x):
if x < 1:
return 0
elif x == 1:
return 1
else:
ans = f(x-1)
if x - 1 != 7:
ans += f(x-2)
return ans
print(f(12)) | [
"[email protected]"
] | |
4105691310284155e93357df83d7741f403738fd | a6f4e2e2b2e25f7af509598327aaaa5c795433ac | /django_gocardless/views.py | 7208815d058b283889ea24034697e84804b85aa8 | [] | no_license | adamcharnock/django-gocardless | 4042e9dc6a179cf2030064855b82411adc960470 | ac126fcb12baf8a33472f0e22b29ede2b92e27ed | refs/heads/master | 2021-01-18T13:24:22.265030 | 2014-05-08T17:56:35 | 2014-05-08T17:56:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,746 | py | import json
import logging
from django.conf import settings
from django.http.response import HttpResponseBadRequest
from django.views.generic.base import View, logger
from gocardless.utils import generate_signature
class GoCardlessPayloadMixin(object):
def get_payload(self, request):
if not hasattr(self, '_payload'):
if request.method.lower() == 'get':
self._payload = request.GET.dict()
else:
self._payload = json.loads(request.body)['payload']
return self._payload
class GoCardlessSignatureMixin(GoCardlessPayloadMixin):
""" Will verify a GoCardless signature """
manual_signature_check = False
def verify_signature(self, request):
data = self.get_payload(request)
if not data:
logger.warning('No payload or request data found')
return False
pms = data.copy()
pms.pop('signature')
signature = generate_signature(pms, settings.GOCARDLESS_APP_SECRET)
if signature == data['signature']:
return True
return False
def dispatch(self, request, *args, **kwargs):
if not self.manual_signature_check and not self.verify_signature(request):
return self.handle_invalid_signature(request, *args, **kwargs)
response = super(GoCardlessSignatureMixin, self).dispatch(request, *args, **kwargs)
response['Cache-Control'] = 'no-cache'
return response
def handle_invalid_signature(self, request, *args, **kwargs):
response = HttpResponseBadRequest('Signature did not validate')
response['Cache-Control'] = 'no-cache'
return response
class GoCardlessView(GoCardlessSignatureMixin, View):
pass | [
"[email protected]"
] | |
98239664e3b9f688468eb75fcbeb94b3f8f3ea24 | 72644f1098f4b6703cdabb66b4aa91d54a911cbe | /src/protocol/protobuf_client.py | 1b446a6bef96243d1614b55c3c19ed43565d226c | [] | no_license | fei-cow/galaxy-integration-steam | 8f762b5ccbfa2bc8b0f929d0d26fd9ab3802ac94 | b638dd39e95647236ed22c493437f300595eb90b | refs/heads/master | 2022-10-08T16:51:40.050633 | 2020-06-09T13:07:19 | 2020-06-09T13:07:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 29,453 | py | import asyncio
import struct
import gzip
import json
import logging
import socket
from itertools import count
from typing import Awaitable, Callable,Dict, Optional, Any
from galaxy.api.errors import UnknownBackendResponse
from typing import List, NamedTuple
from protocol.messages import steammessages_base_pb2, steammessages_clientserver_login_pb2, steammessages_player_pb2, \
steammessages_clientserver_friends_pb2, steammessages_clientserver_pb2, steamui_libraryroot_pb2, steammessages_clientserver_2_pb2
from protocol.consts import EMsg, EResult, EAccountType, EFriendRelationship, EPersonaState
from protocol.types import SteamId, ProtoUserInfo
import vdf
import hashlib
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
class SteamLicense(NamedTuple):
license: str
shared: bool
class ProtobufClient:
_PROTO_MASK = 0x80000000
_ACCOUNT_ID_MASK = 0x0110000100000000
def __init__(self, set_socket):
self._socket = set_socket
self.log_on_handler: Optional[Callable[[EResult], Awaitable[None]]] = None
self.log_off_handler: Optional[Callable[[EResult], Awaitable[None]]] = None
self.relationship_handler: Optional[Callable[[bool, Dict[int, EFriendRelationship]], Awaitable[None]]] = None
self.user_info_handler: Optional[Callable[[int, ProtoUserInfo], Awaitable[None]]] = None
self.user_nicknames_handler: Optional[Callable[[dict], Awaitable[None]]] = None
self.license_import_handler: Optional[Callable[[int], Awaitable[None]]] = None
self.app_info_handler: Optional[Callable[[int, str], Awaitable[None]]] = None
self.package_info_handler: Optional[Callable[[int], Awaitable[None]]] = None
self.translations_handler: Optional[Callable[[int, Any], Awaitable[None]]] = None
self.stats_handler: Optional[Callable[[int, Any, Any], Awaitable[None]]] = None
self.user_authentication_handler: Optional[Callable[[str, Any], Awaitable[None]]] = None
self.sentry: Optional[Callable[[], Awaitable[None]]] = None
self.steam_id: Optional[int] = None
self.times_handler: Optional[Callable[[int, int, int], Awaitable[None]]] = None
self.times_import_finished_handler: Optional[Callable[[bool], Awaitable[None]]] = None
self._heartbeat_task: Optional[asyncio.Task] = None
self._session_id: Optional[int] = None
self._job_id_iterator = count(1)
self.job_list = []
self.account_info_retrieved = asyncio.Event()
self.login_key_retrieved = asyncio.Event()
self.collections = {'event': asyncio.Event(),
'collections': dict()}
async def close(self, is_socket_connected):
if is_socket_connected:
await self.send_log_off_message()
if self._heartbeat_task is not None:
self._heartbeat_task.cancel()
async def wait_closed(self):
pass
async def _process_packets(self):
pass
async def run(self):
while True:
for job in self.job_list.copy():
logger.info(f"New job on list {job}")
if job['job_name'] == "import_game_stats":
await self._import_game_stats(job['game_id'])
self.job_list.remove(job)
if job['job_name'] == "import_collections":
await self._import_collections()
self.job_list.remove(job)
if job['job_name'] == "import_game_times":
await self._import_game_time()
self.job_list.remove(job)
try:
packet = await asyncio.wait_for(self._socket.recv(), 0.1)
await self._process_packet(packet)
except asyncio.TimeoutError:
pass
async def send_log_off_message(self):
message = steammessages_clientserver_login_pb2.CMsgClientLogOff()
logger.info("Sending log off message")
try:
await self._send(EMsg.ClientLogOff, message)
except Exception as e:
logger.info(f"Unable to send logoff message {repr(e)}")
async def log_on_web_auth(self, steam_id, miniprofile_id, account_name, token):
# magic numbers taken from JavaScript Steam client
message = steammessages_clientserver_login_pb2.CMsgClientLogon()
message.account_name = account_name
message.protocol_version = 65580
message.qos_level = 2
message.client_os_type = 4294966596
message.ui_mode = 4
message.chat_mode = 2
message.web_logon_nonce = token
message.client_instance_id = 0
try:
self.steam_id = steam_id
await self.user_authentication_handler('steam_id', self.steam_id)
await self.user_authentication_handler('account_id', miniprofile_id)
await self._send(EMsg.ClientLogon, message)
except Exception:
self.steam_id = None
raise
async def log_on_password(self, account_name, password, two_factor, two_factor_type):
def sanitize_password(password):
return ''.join([i if ord(i) < 128 else '' for i in password])
message = steammessages_clientserver_login_pb2.CMsgClientLogon()
message.account_name = account_name
message.protocol_version = 65580
message.password = sanitize_password(password)
message.should_remember_password = True
message.supports_rate_limit_response = True
message.obfuscated_private_ip.v4 = struct.unpack(">L", socket.inet_aton(socket.gethostbyname(socket.gethostname())))[0] ^ 0xF00DBAAD
if two_factor:
if two_factor_type == 'email':
message.auth_code = two_factor
elif two_factor_type == 'mobile':
message.two_factor_code = two_factor
logger.info("Sending log on message using credentials")
await self._send(EMsg.ClientLogon, message)
async def log_on_token(self, steam_id, account_name, token, used_server_cell_id):
message = steammessages_clientserver_login_pb2.CMsgClientLogon()
message.account_name = account_name
message.cell_id = used_server_cell_id
message.protocol_version = 65580
message.should_remember_password = True
message.supports_rate_limit_response = True
message.login_key = token
message.obfuscated_private_ip.v4 = struct.unpack(">L", socket.inet_aton(socket.gethostbyname(socket.gethostname())))[0] ^ 0xF00DBAAD
sentry = await self.sentry()
if sentry:
logger.info("Sentry present")
message.eresult_sentryfile = EResult.OK
message.sha_sentryfile = sentry
self.steam_id = steam_id
logger.info("Sending log on message using token")
await self._send(EMsg.ClientLogon, message)
async def _import_game_stats(self, game_id):
logger.info(f"Importing game stats for {game_id}")
message = steammessages_clientserver_pb2.CMsgClientGetUserStats()
message.game_id = int(game_id)
await self._send(EMsg.ClientGetUserStats, message)
async def _import_game_time(self):
logger.info("Importing game times")
job_id = next(self._job_id_iterator)
message = steammessages_player_pb2.CPlayer_CustomGetLastPlayedTimes_Request()
message.min_last_played = 0
await self._send(EMsg.ServiceMethodCallFromClient, message, job_id, None, "Player.ClientGetLastPlayedTimes#1")
async def set_persona_state(self, state):
message = steammessages_clientserver_friends_pb2.CMsgClientChangeStatus()
message.persona_state = state
await self._send(EMsg.ClientChangeStatus, message)
async def get_friends_statuses(self):
job_id = next(self._job_id_iterator)
message = steamui_libraryroot_pb2.CChat_RequestFriendPersonaStates_Request()
await self._send(EMsg.ServiceMethodCallFromClient, message, job_id, None, "Chat.RequestFriendPersonaStates#1")
async def get_user_infos(self, users, flags):
message = steammessages_clientserver_friends_pb2.CMsgClientRequestFriendData()
message.friends.extend(users)
message.persona_state_requested = flags
await self._send(EMsg.ClientRequestFriendData, message)
async def _import_collections(self):
job_id = next(self._job_id_iterator)
message = steamui_libraryroot_pb2.CCloudConfigStore_Download_Request()
message_inside = steamui_libraryroot_pb2.CCloudConfigStore_NamespaceVersion()
message_inside.enamespace = 1
message.versions.append(message_inside)
await self._send(EMsg.ServiceMethodCallFromClient, message, job_id, None, "CloudConfigStore.Download#1")
async def get_packages_info(self, steam_licenses: List[SteamLicense]):
logger.info(f"Sending call {EMsg.PICSProductInfoRequest} with "
f"{[steam_license.license.package_id for steam_license in steam_licenses]}")
message = steammessages_clientserver_pb2.CMsgClientPICSProductInfoRequest()
for steam_license in steam_licenses:
info = message.packages.add()
info.packageid = steam_license.license.package_id
info.access_token = steam_license.license.access_token
await self._send(EMsg.PICSProductInfoRequest, message)
async def get_apps_info(self, app_ids):
logger.info(f"Sending call {EMsg.PICSProductInfoRequest} with {app_ids}")
message = steammessages_clientserver_pb2.CMsgClientPICSProductInfoRequest()
for app_id in app_ids:
info = message.apps.add()
info.appid = app_id
await self._send(EMsg.PICSProductInfoRequest, message)
async def get_presence_localization(self, appid, language='english'):
logger.info(f"Sending call for rich presence localization with {appid}, {language}")
message = steamui_libraryroot_pb2.CCommunity_GetAppRichPresenceLocalization_Request()
message.appid = appid
message.language = language
job_id = next(self._job_id_iterator)
await self._send(EMsg.ServiceMethodCallFromClient, message, job_id, None, target_job_name='Community.GetAppRichPresenceLocalization#1')
async def accept_update_machine_auth(self,jobid_target, sha_hash, offset, filename, cubtowrite):
logger.info("Accepting update machine auth")
message = steammessages_clientserver_2_pb2.CMsgClientUpdateMachineAuthResponse()
message.filename = filename
message.eresult = EResult.OK
message.sha_file = sha_hash
message.getlasterror = 0
message.offset = offset
message.cubwrote = cubtowrite
await self._send(EMsg.ClientUpdateMachineAuthResponse, message, None, jobid_target, None)
async def accept_new_login_token(self, unique_id, jobid_target):
logger.info("Accepting new login key")
message = steammessages_clientserver_login_pb2.CMsgClientNewLoginKeyAccepted()
message.unique_id = unique_id
await self._send(EMsg.ClientNewLoginKeyAccepted, message, None, jobid_target, None)
async def _send(
self,
emsg,
message,
source_job_id=None,
target_job_id=None,
target_job_name=None
):
proto_header = steammessages_base_pb2.CMsgProtoBufHeader()
if self.steam_id is not None:
proto_header.steamid = self.steam_id
else:
proto_header.steamid = 0 + self._ACCOUNT_ID_MASK
if self._session_id is not None:
proto_header.client_sessionid = self._session_id
if source_job_id is not None:
proto_header.jobid_source = source_job_id
if target_job_id is not None:
proto_header.jobid_target = target_job_id
if target_job_name is not None:
proto_header.target_job_name = target_job_name
header = proto_header.SerializeToString()
body = message.SerializeToString()
data = struct.pack("<2I", emsg | self._PROTO_MASK, len(header))
data = data + header + body
logger.info("Sending message %d (%d bytes)", emsg, len(data))
await self._socket.send(data)
logger.info("Send message success")
async def _heartbeat(self, interval):
message = steammessages_clientserver_login_pb2.CMsgClientHeartBeat()
while True:
await asyncio.sleep(interval)
await self._send(EMsg.ClientHeartBeat, message)
async def _process_packet(self, packet):
package_size = len(packet)
logger.info("Processing packet of %d bytes", package_size)
if package_size < 8:
logger.warning("Package too small, ignoring...")
raw_emsg = struct.unpack("<I", packet[:4])[0]
emsg = raw_emsg & ~self._PROTO_MASK
if raw_emsg & self._PROTO_MASK != 0:
header_len = struct.unpack("<I", packet[4:8])[0]
header = steammessages_base_pb2.CMsgProtoBufHeader()
header.ParseFromString(packet[8:8 + header_len])
if self._session_id is None and header.client_sessionid != 0:
logger.info("Session id: %d", header.client_sessionid)
self._session_id = header.client_sessionid
await self._process_message(emsg, header, packet[8 + header_len:])
else:
logger.warning("Packet with extended header - ignoring")
async def _process_message(self, emsg, header, body):
logger.info("Processing message %d", emsg)
if emsg == EMsg.Multi:
await self._process_multi(body)
elif emsg == EMsg.ClientLogOnResponse:
await self._process_client_log_on_response(body)
elif emsg == EMsg.ClientLogOff:
await self._process_client_log_off(body)
elif emsg == EMsg.ClientFriendsList:
await self._process_client_friend_list(body)
elif emsg == EMsg.ClientPersonaState:
await self._process_client_persona_state(body)
elif emsg == EMsg.ClientLicenseList:
await self._process_license_list(body)
elif emsg == EMsg.PICSProductInfoResponse:
await self._process_package_info_response(body)
elif emsg == EMsg.ClientGetUserStatsResponse:
await self._process_user_stats_response(body)
elif emsg == EMsg.ClientAccountInfo:
await self._process_account_info(body)
elif emsg == EMsg.ClientNewLoginKey:
await self._process_client_new_login_key(body, header.jobid_source)
elif emsg == EMsg.ClientUpdateMachineAuth:
await self._process_client_update_machine_auth(body, header.jobid_source)
elif emsg == EMsg.ClientPlayerNicknameList:
await self._process_user_nicknames(body)
elif emsg == EMsg.ServiceMethod:
await self._process_service_method_response(header.target_job_name, header.jobid_target, body)
elif emsg == EMsg.ServiceMethodResponse:
await self._process_service_method_response(header.target_job_name, header.jobid_target, body)
else:
logger.warning("Ignored message %d", emsg)
async def _process_multi(self, body):
logger.info("Processing message Multi")
message = steammessages_base_pb2.CMsgMulti()
message.ParseFromString(body)
if message.size_unzipped > 0:
loop = asyncio.get_running_loop()
data = await loop.run_in_executor(None, gzip.decompress, message.message_body)
else:
data = message.message_body
data_size = len(data)
offset = 0
size_bytes = 4
while offset + size_bytes <= data_size:
size = struct.unpack("<I", data[offset:offset + size_bytes])[0]
await self._process_packet(data[offset + size_bytes:offset + size_bytes + size])
offset += size_bytes + size
logger.info("Finished processing message Multi")
async def _process_client_log_on_response(self, body):
logger.info("Processing message ClientLogOnResponse")
message = steammessages_clientserver_login_pb2.CMsgClientLogonResponse()
message.ParseFromString(body)
result = message.eresult
if result == EResult.AccountLogonDenied:
if message.email_domain:
await self.user_authentication_handler('two_step', 'email')
if result == EResult.AccountLoginDeniedNeedTwoFactor:
await self.user_authentication_handler('two_step', 'mobile')
if result == EResult.OK:
interval = message.out_of_game_heartbeat_seconds
self.steam_id = message.client_supplied_steamid
await self.user_authentication_handler('steam_id', self.steam_id)
await self.user_authentication_handler('account_id', message.client_supplied_steamid - self._ACCOUNT_ID_MASK)
self._heartbeat_task = asyncio.create_task(self._heartbeat(interval))
else:
logger.info(f"Failed to log on, reason : {message}")
if self.log_on_handler is not None:
await self.log_on_handler(result)
async def _process_client_update_machine_auth(self, body, jobid_source):
logger.info("Processing message ClientUpdateMachineAuth")
message = steammessages_clientserver_2_pb2.CMsgClientUpdateMachineAuth()
message.ParseFromString(body)
sentry_sha = hashlib.sha1(message.bytes).digest()
await self.user_authentication_handler('sentry', sentry_sha)
await self.accept_update_machine_auth(jobid_source, sentry_sha, message.offset, message.filename, message.cubtowrite)
async def _process_account_info(self, body):
logger.info("Processing message ClientAccountInfo")
message = steammessages_clientserver_login_pb2.CMsgClientAccountInfo()
message.ParseFromString(body)
await self.user_authentication_handler('persona_name', message.persona_name)
self.account_info_retrieved.set()
async def _process_client_new_login_key(self, body, jobid_source):
logger.info("Processing message ClientNewLoginKey")
message = steammessages_clientserver_login_pb2.CMsgClientNewLoginKey()
message.ParseFromString(body)
await self.user_authentication_handler('token', message.login_key)
await self.accept_new_login_token(message.unique_id, jobid_source)
self.login_key_retrieved.set()
async def _process_client_log_off(self, body):
logger.info("Processing message ClientLoggedOff")
message = steammessages_clientserver_login_pb2.CMsgClientLoggedOff()
message.ParseFromString(body)
result = message.eresult
assert self._heartbeat_task is not None
self._heartbeat_task.cancel()
if self.log_off_handler is not None:
await self.log_off_handler(result)
async def _process_user_nicknames(self, body):
logger.info("Processing message ClientPlayerNicknameList")
message = steammessages_clientserver_friends_pb2.CMsgClientPlayerNicknameList()
message.ParseFromString(body)
nicknames = {}
for player_nickname in message.nicknames:
nicknames[str(player_nickname.steamid)] = player_nickname.nickname
await self.user_nicknames_handler(nicknames)
async def _process_client_friend_list(self, body):
logger.info("Processing message ClientFriendsList")
if self.relationship_handler is None:
return
message = steammessages_clientserver_friends_pb2.CMsgClientFriendsList()
message.ParseFromString(body)
friends = {}
for relationship in message.friends:
steam_id = relationship.ulfriendid
details = SteamId.parse(steam_id)
if details.type_ == EAccountType.Individual:
friends[steam_id] = EFriendRelationship(relationship.efriendrelationship)
await self.relationship_handler(message.bincremental, friends)
async def _process_client_persona_state(self, body):
logger.info("Processing message ClientPersonaState")
if self.user_info_handler is None:
return
message = steammessages_clientserver_friends_pb2.CMsgClientPersonaState()
message.ParseFromString(body)
for user in message.friends:
user_id = user.friendid
if user_id == self.steam_id and int(user.game_played_app_id) != 0:
await self.get_apps_info([int(user.game_played_app_id)])
user_info = ProtoUserInfo()
if user.HasField("player_name"):
user_info.name = user.player_name
if user.HasField("avatar_hash"):
user_info.avatar_hash = user.avatar_hash
if user.HasField("persona_state"):
user_info.state = EPersonaState(user.persona_state)
if user.HasField("gameid"):
user_info.game_id = user.gameid
rich_presence: Dict[str, str] = {}
for element in user.rich_presence:
if type(element.value) == bytes:
logger.info(f"Unsuported presence type: {type(element.value)} {element.value}")
rich_presence = {}
break
rich_presence[element.key] = element.value
if element.key == 'status' and element.value:
if "#" in element.value:
await self.translations_handler(user.gameid)
if element.key == 'steam_display' and element.value:
if "#" in element.value:
await self.translations_handler(user.gameid)
user_info.rich_presence = rich_presence
if user.HasField("game_name"):
user_info.game_name = user.game_name
await self.user_info_handler(user_id, user_info)
async def _process_license_list(self, body):
logger.info("Processing message ClientLicenseList")
if self.license_import_handler is None:
return
message = steammessages_clientserver_pb2.CMsgClientLicenseList()
message.ParseFromString(body)
licenses_to_check = []
for license in message.licenses:
# license.type 1024 = free games
# license.flags 520 = unidentified trash entries (games which are not owned nor are free)
if int(license.flags) == 520:
continue
if license.package_id == 0:
# Packageid 0 contains trash entries for every user
logger.info("Skipping packageid 0 ")
continue
if int(license.owner_id) == int(self.steam_id - self._ACCOUNT_ID_MASK):
logger.info(f"Owned license {license.package_id}")
licenses_to_check.append(SteamLicense(license=license, shared=False))
else:
if license.package_id in licenses_to_check:
continue
logger.info(f"Shared license {license.package_id}")
licenses_to_check.append(SteamLicense(license=license, shared=True))
await self.license_import_handler(licenses_to_check)
async def _process_package_info_response(self, body):
logger.info("Processing message PICSProductInfoResponse")
message = steammessages_clientserver_pb2.CMsgClientPICSProductInfoResponse()
message.ParseFromString(body)
apps_to_parse = []
for info in message.packages:
await self.package_info_handler()
package_id = str(info.packageid)
package_content = vdf.binary_loads(info.buffer[4:])
package = package_content.get(package_id)
if package is None:
continue
for app in package['appids'].values():
appid = str(app)
await self.app_info_handler(package_id=package_id, appid=appid)
apps_to_parse.append(app)
for info in message.apps:
app_content = vdf.loads(info.buffer[:-1].decode('utf-8', 'replace'))
appid = str(app_content['appinfo']['appid'])
try:
type_ = app_content['appinfo']['common']['type'].lower()
title = app_content['appinfo']['common']['name']
if type == 'game':
logger.info(f"Retrieved game {title}")
await self.app_info_handler(appid=appid, title=title, type=type_)
except KeyError:
logger.info(f"Unrecognized app structure {app_content}")
await self.app_info_handler(appid=appid, title='unknown', type='unknown')
if len(apps_to_parse) > 0:
logger.info(f"Apps to parse {apps_to_parse}, {len(apps_to_parse)} entries")
await self.get_apps_info(apps_to_parse)
async def _process_rich_presence_translations(self, body):
message = steamui_libraryroot_pb2.CCommunity_GetAppRichPresenceLocalization_Response()
message.ParseFromString(body)
logger.info(f"Received information about rich presence translations for {message.appid}")
await self.translations_handler(message.appid, message.token_lists)
async def _process_user_stats_response(self, body):
logger.info("Processing message ClientGetUserStatsResponse")
message = steammessages_clientserver_pb2.CMsgClientGetUserStatsResponse()
message.ParseFromString(body)
game_id = message.game_id
stats = message.stats
achievs = message.achievement_blocks
logger.info(f"Processing user stats response for {message.game_id}")
achievements_schema = vdf.binary_loads(message.schema,merge_duplicate_keys=False)
achievements_unlocked = []
for achievement_block in achievs:
achi_block_enum = 32 * (achievement_block.achievement_id - 1)
for index, unlock_time in enumerate(achievement_block.unlock_time):
if unlock_time > 0:
if str(achievement_block.achievement_id) not in achievements_schema[str(game_id)]['stats'] or \
str(index) not in achievements_schema[str(game_id)]['stats'][str(achievement_block.achievement_id)]['bits']:
logger.info("Non existent achievement unlocked")
continue
try:
if 'english' in achievements_schema[str(game_id)]['stats'][str(achievement_block.achievement_id)]['bits'][str(index)]['display']['name']:
name = achievements_schema[str(game_id)]['stats'][str(achievement_block.achievement_id)]['bits'][str(index)]['display']['name']['english']
else:
name = achievements_schema[str(game_id)]['stats'][str(achievement_block.achievement_id)]['bits'][str(index)]['display']['name']
achievements_unlocked.append({'id': achi_block_enum+index,
'unlock_time': unlock_time,
'name': name})
except:
logger.info(f"Unable to parse achievement {index} from block {achievement_block.achievement_id}")
logger.info(achievs)
logger.info(achievements_schema)
logger.info(message.schema)
raise UnknownBackendResponse()
await self.stats_handler(game_id, stats, achievements_unlocked)
async def _process_user_time_response(self, body):
logger.info("Received information about game times")
message = steammessages_player_pb2.CPlayer_CustomGetLastPlayedTimes_Response()
message.ParseFromString(body)
for game in message.games:
logger.info(f"Processing game times for game {game.appid}, playtime: {game.playtime_forever} last time played: {game.last_playtime}")
await self.times_handler(game.appid, game.playtime_forever, game.last_playtime)
await self.times_import_finished_handler(True)
async def _process_collections_response(self, body):
message = steamui_libraryroot_pb2.CCloudConfigStore_Download_Response()
message.ParseFromString(body)
for data in message.data:
for entry in data.entries:
try:
loaded_val = json.loads(entry.value)
self.collections['collections'][loaded_val['name']] = loaded_val['added']
except:
pass
self.collections['event'].set()
async def _process_service_method_response(self, target_job_name, target_job_id, body):
logger.info("Processing message ServiceMethodResponse %s", target_job_name)
if target_job_name == 'Community.GetAppRichPresenceLocalization#1':
await self._process_rich_presence_translations(body)
if target_job_name == 'Player.ClientGetLastPlayedTimes#1':
await self._process_user_time_response(body)
if target_job_name == 'CloudConfigStore.Download#1':
await self._process_collections_response(body)
| [
"[email protected]"
] | |
0e5dc5c575994fb9d51f5fd31c55ef92cd32e3f8 | ddadba7ebb64c2f341280728fd50e62369d6251e | /apps/notes_app/models.py | d5bba0cd359c70e4d2e127891182602fdd6e7910 | [] | no_license | LisCoding/Notes-App | 0f630b8229553d6cac278650f5649a9737ce1285 | 21bd8d0177ecf69335ec24e52c49df81f555f7a5 | refs/heads/master | 2021-06-22T10:44:25.755893 | 2017-08-31T19:33:49 | 2017-08-31T19:33:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 330 | py | from __future__ import unicode_literals
from django.db import models
# Create your models here.
class Note(models.Model):
title = models.CharField(max_length=255)
description = models.TextField(default="")
created_at = models.DateTimeField(auto_now_add = True)
updated_at = models.DateTimeField(auto_now = True)
| [
"[email protected]"
] | |
761eeaa6e8e18f8112e281af167a7ccbc3748013 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03837/s920938245.py | 3aeb662f91dd2f46bbfdb9f9b7edc7cc0fdcb132 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 650 | py | def warshall_floyd():
for k in range(N):
for i in range(N):
for j in range(N):
d[i][j] = min(d[i][j], d[i][k]+d[k][j])
N, M = map(int, input().split())
d = [[10**18]*N for _ in range(N)]
for i in range(N):
d[i][i] = 0
edges = []
for _ in range(M):
a, b, c = map(int, input().split())
d[a-1][b-1] = c
d[b-1][a-1] = c
edges.append((a-1, b-1, c))
warshall_floyd()
ans = 0
for a, b, c in edges:
flag = True
for i in range(N):
for j in range(N):
if d[i][a]+c+d[b][j]==d[i][j]:
flag = False
if flag:
ans += 1
print(ans) | [
"[email protected]"
] | |
74fc280f27c08e1336a10b2c6a6e61901d2387e1 | cd2ea0b9f0f8e01950ea4dd629a325ef26f914ad | /topics/Trees/BinaryTreeTraversal.py | b180ed30d3f5c5183418e0a9355c08ce008c8282 | [] | no_license | akhandsingh17/assignments | df5f1af44486ffefe1fefcccc643e6818ac1c55d | c89f40dcd7a8067fa78ed95d3fecc36cb1ca7b5d | refs/heads/master | 2023-08-24T18:00:32.938254 | 2021-10-06T06:01:32 | 2021-10-06T06:01:32 | 305,913,409 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,913 | py |
"""
1
/ \
2 3
/ \ / \
4 5 6 7
\
8
"""
class Node:
def __init__(self, val, left=None, right=None):
self.val = val
self.left = left
self.right = right
class BinaryTreeTraversal:
def __init__(self, root):
self.root = Node(root)
def preorder(self, start, traversal):
if start != None:
traversal = traversal + (str(start.val) + '-')
traversal = self.preorder(start.left, traversal )
traversal = self.preorder(start.right, traversal)
return traversal
def inorder(self, start, traversal):
if start != None:
traversal = self.preorder(start.left, traversal)
traversal = traversal + (str(start.val) + '-')
traversal = self.preorder(start.right, traversal)
return traversal
def postorder(self, start, traversal):
if start != None:
traversal = self.preorder(start.left, traversal )
traversal = self.preorder(start.right, traversal)
traversal = traversal + (str(start.val) + '-')
return traversal
def print_traversal(self, type):
if type == 'preorder':
return self.preorder(self.root, '')
if type == 'inorder':
return self.inorder(self.root, '')
if type == 'postorder':
return self.postorder(self.root, '')
def main():
tree = BinaryTreeTraversal(1)
tree.root.left = Node(2)
tree.root.right = Node(3)
tree.root.left.left = Node(4)
tree.root.left.right = Node(5)
tree.root.right.left = Node(6)
tree.root.right.right = Node(7)
tree.root.right.right.right = Node(8)
print(tree.print_traversal('preorder'))
print(tree.print_traversal('inorder'))
print(tree.print_traversal('postorder'))
if __name__=='__main__':
main()
| [
"[email protected]"
] | |
22dfaba28c59c06bab37c8db0df174e75f3bf706 | bd9278423bb215dcdbf9f56a948210db044bdba2 | /tests/test_01_main/test_env_vars_1.py | 501ad06fecc21417b497a0cafa4e66f9cbcc5426 | [
"MIT"
] | permissive | dungnv2602/uvicorn-gunicorn-docker | 77fd5e0d07a94c7acc0876a773e6b1262619fb6d | 37dbc188e555c22cf9b2dd0f3f6ab3e122e32c24 | refs/heads/master | 2020-04-26T16:40:32.749609 | 2019-02-08T10:44:24 | 2019-02-08T10:44:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,250 | py | import time
import pytest
import requests
import docker
from ..utils import CONTAINER_NAME, get_config, get_logs, remove_previous_container
client = docker.from_env()
def verify_container(container, response_text):
config_data = get_config(container)
assert config_data["workers_per_core"] == 1
assert config_data["host"] == "0.0.0.0"
assert config_data["port"] == "8000"
assert config_data["loglevel"] == "warning"
assert config_data["bind"] == "0.0.0.0:8000"
logs = get_logs(container)
assert "Checking for script in /app/prestart.sh" in logs
assert "Running script /app/prestart.sh" in logs
assert (
"Running inside /app/prestart.sh, you could add migrations to this file" in logs
)
response = requests.get("http://127.0.0.1:8000")
assert response.text == response_text
@pytest.mark.parametrize(
"image,response_text",
[
(
"tiangolo/uvicorn-gunicorn:python3.6",
"Hello world! From Uvicorn with Gunicorn. Using Python 3.6",
),
(
"tiangolo/uvicorn-gunicorn:python3.7",
"Hello world! From Uvicorn with Gunicorn. Using Python 3.7",
),
(
"tiangolo/uvicorn-gunicorn:latest",
"Hello world! From Uvicorn with Gunicorn. Using Python 3.7",
),
(
"tiangolo/uvicorn-gunicorn:python3.6-alpine3.8",
"Hello world! From Uvicorn with Gunicorn in Alpine. Using Python 3.6",
),
(
"tiangolo/uvicorn-gunicorn:python3.7-alpine3.8",
"Hello world! From Uvicorn with Gunicorn in Alpine. Using Python 3.7",
),
],
)
def test_env_vars_1(image, response_text):
remove_previous_container(client)
container = client.containers.run(
image,
name=CONTAINER_NAME,
environment={"WORKERS_PER_CORE": 1, "PORT": "8000", "LOG_LEVEL": "warning"},
ports={"8000": "8000"},
detach=True,
)
time.sleep(1)
verify_container(container, response_text)
container.stop()
# Test that everything works after restarting too
container.start()
time.sleep(1)
verify_container(container, response_text)
container.stop()
container.remove()
| [
"[email protected]"
] | |
0ad1426d1e43e8323cd4178c8c0dcebb7e9b9290 | fa38b704f9faf3889a4a35c6edce82be2e9b0274 | /Practice_Query/apps.py | be35856f927e72567c0382dbb335f702d2f40ebb | [] | no_license | Efrana/Django_Practice | 4a4bc0f5016eb6fda94f5184a1bfbf7cc6db757b | 864e4cb80b90b59deeeb7fd9a30f8af82d58658a | refs/heads/master | 2022-11-22T00:30:11.809382 | 2020-06-08T18:13:31 | 2020-06-08T18:13:31 | 270,774,361 | 0 | 1 | null | 2020-07-19T20:01:32 | 2020-06-08T18:09:45 | Python | UTF-8 | Python | false | false | 102 | py | from django.apps import AppConfig
class PracticeQueryConfig(AppConfig):
name = 'Practice_Query'
| [
"[email protected]"
] | |
781ee264796e64ff53334b63df8e2b3568dff462 | 7e0393251012e91213dddfd9c93f6b6b73ca2bfe | /cloudnetpy/products/drizzle_error.py | 6ac4ba7a3ba679b8c9adeb0aead54d7fe56fdbce | [
"MIT"
] | permissive | josephhardinee/cloudnetpy | ff4cc0303d7f2ae40f2d3466298257659ff3ccde | c37760db3cdfe62ae769f8090ba621803ec9a92c | refs/heads/master | 2021-03-06T15:37:51.529776 | 2020-02-13T09:05:29 | 2020-02-13T09:05:29 | 246,207,849 | 0 | 0 | MIT | 2020-03-10T04:29:48 | 2020-03-10T04:26:16 | null | UTF-8 | Python | false | false | 5,294 | py | import numpy as np
import numpy.ma as ma
import cloudnetpy.utils as utils
def _get_drizzle_indices(diameter):
return {'drizzle': diameter > 0,
'small': np.logical_and(diameter <= 1e-4, diameter > 1e-5),
'tiny': np.logical_and(diameter <= 1e-5, diameter > 0)}
def _read_input_uncertainty(categorize, uncertainty_type):
return tuple(db2lin(categorize.getvar(f'{key}_{uncertainty_type}'))
for key in ('Z', 'beta'))
MU_ERROR = 0.07
MU_ERROR_SMALL = 0.25
def get_drizzle_error(categorize, drizzle_parameters):
""" Estimates error and bias for drizzle classification.
Args:
categorize (DrizzleSource): The :class:`DrizzleSource` instance.
drizzle_parameters (DrizzleSolving): The :class:`DrizzleSolving` instance.
Returns:
errors (dict): Dictionary containing information of estimated error and bias for drizzle
"""
parameters = drizzle_parameters.params
drizzle_indices = _get_drizzle_indices(parameters['Do'])
error_input = _read_input_uncertainty(categorize, 'error')
bias_input = _read_input_uncertainty(categorize, 'bias')
errors = _calc_errors(drizzle_indices, error_input, bias_input)
return errors
def _calc_errors(drizzle_indices, error_input, bias_input):
errors = _calc_parameter_errors(drizzle_indices, error_input)
biases = _calc_parameter_biases(bias_input)
results = {**errors, **biases}
_add_supplementary_errors(results, drizzle_indices, error_input)
_add_supplementary_biases(results, bias_input)
return _convert_to_db(results)
def _calc_parameter_errors(drizzle_indices, error_input):
def _calc_dia_error():
error = _calc_error(2 / 7, (1, 1), error_input, add_mu=True)
error_small = _calc_error(1 / 4, (1, 1), error_input, add_mu_small=True)
return _stack_errors(error, drizzle_indices, error_small)
def _calc_lwc_error():
error = _calc_error(1 / 7, (1, 6), error_input)
error_small = _calc_error(1 / 4, (1, 3), error_input)
return _stack_errors(error, drizzle_indices, error_small)
def _calc_lwf_error():
error = _calc_error(1 / 7, (3, 4), error_input, add_mu=True)
error_small = _calc_error(1 / 2, (1, 1), error_input, add_mu_small=True)
error_tiny = _calc_error(1 / 4, (3, 1), error_input, add_mu_small=True)
return _stack_errors(error, drizzle_indices, error_small, error_tiny)
def _calc_s_error():
error = _calc_error(1 / 2, (1, 1), error_input)
return _stack_errors(error, drizzle_indices)
return {'Do_error': _calc_dia_error(),
'drizzle_lwc_error': _calc_lwc_error(),
'drizzle_lwf_error': _calc_lwf_error(),
'S_error': _calc_s_error()}
def _calc_parameter_biases(bias_input):
def _calc_bias(scale, weights):
return utils.l2norm_weighted(bias_input, scale, weights)
dia_bias = _calc_bias(2/7, (1, 1))
lwc_bias = _calc_bias(1/7, (1, 6))
lwf_bias = _calc_bias(1/7, (3, 4))
return {'Do_bias': dia_bias,
'drizzle_lwc_bias': lwc_bias,
'drizzle_lwf_bias': lwf_bias}
def _add_supplementary_errors(results, drizzle_indices, error_input):
def _calc_n_error():
z_error = error_input[0]
dia_error = db2lin(results['Do_error'])
n_error = utils.l2norm(z_error, 6 * dia_error)
return _stack_errors(n_error, drizzle_indices)
def _calc_v_error():
error = results['Do_error']
error[drizzle_indices['tiny']] *= error[drizzle_indices['tiny']]
return error
results['drizzle_N_error'] = _calc_n_error()
results['v_drizzle_error'] = _calc_v_error()
results['mu_error'] = MU_ERROR
return results
def _add_supplementary_biases(results, bias_input):
def _calc_n_bias():
z_bias = bias_input[0]
dia_bias = db2lin(results['Do_bias'])
return utils.l2norm_weighted((z_bias, dia_bias), 1, (1, 6))
results['drizzle_N_bias'] = _calc_n_bias()
results['v_drizzle_bias'] = results['Do_bias']
return results
def _convert_to_db(results):
"""Converts linear error values to dB."""
return {name: lin2db(value) for name, value in results.items()}
def _calc_error(scale, weights, error_input, add_mu=False, add_mu_small=False):
error = utils.l2norm_weighted(error_input, scale, weights)
if add_mu:
error = utils.l2norm(error, MU_ERROR)
if add_mu_small:
error = utils.l2norm(error, MU_ERROR_SMALL)
return error
def _stack_errors(error_in, drizzle_indices, error_small=None, error_tiny=None):
def _add_error_component(source, ind):
error[ind] = source[ind]
error = ma.zeros(error_in.shape)
_add_error_component(error_in, drizzle_indices['drizzle'])
if error_small is not None:
_add_error_component(error_small, drizzle_indices['small'])
if error_tiny is not None:
_add_error_component(error_tiny, drizzle_indices['tiny'])
return error
COR = 10 / np.log(10)
def db2lin(x):
if ma.max(x) > 100:
raise ValueError('Too large values in drizzle.db2lin()')
return ma.exp(x / COR) - 1
def lin2db(x):
if ma.min(x) < -0.9:
raise ValueError('Too small values in drizzle.lin2db()')
return ma.log(x + 1) * COR
| [
"[email protected]"
] | |
6475c7715d2ace925da77b437721147f76ea65b2 | d1d79d0c3889316b298852834b346d4246825e66 | /blackbot/core/wss/ttp/art/art_T1218.011-1.py | ae3f24301b8519e5fd2ff913044f9b62c72f2ce2 | [] | no_license | ammasajan/Atomic-Red-Team-Intelligence-C2 | 78d1ed2de49af71d4c3c74db484e63c7e093809f | 5919804f0bdeb15ea724cd32a48f377bce208277 | refs/heads/master | 2023-07-17T12:48:15.249921 | 2021-08-21T20:10:30 | 2021-08-21T20:10:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,920 | py | from blackbot.core.utils import get_path_in_package
from blackbot.core.wss.atomic import Atomic
from terminaltables import SingleTable
import os
import json
class Atomic(Atomic):
def __init__(self):
self.name = 'DefenseEvasion/T1218.011-1'
self.controller_type = ''
self.external_id = 'T1218.011'
self.blackbot_id = 'T1218.011-1'
self.version = ''
self.language = 'boo'
self.description = self.get_description()
self.last_updated_by = 'Blackbot, Inc. All Rights reserved'
self.references = ["System.Management.Automation"]
self.options = {}
def payload(self):
with open(get_path_in_package('core/wss/ttp/art/src/cmd_prompt.boo'), 'r') as ttp_src:
src = ttp_src.read()
cmd_script = get_path_in_package('core/wss/ttp/art/cmd_ttp/defenseEvasion/T1218.011-1')
with open(cmd_script) as cmd:
src = src.replace("CMD_SCRIPT", cmd.read())
return src
def get_description(self):
path = get_path_in_package('core/wss/ttp/art/cmd_ttp/defenseEvasion/T1218.011-1')
with open(path) as text:
head = [next(text) for l in range(4)]
technique_name = head[0].replace('#TechniqueName: ', '').strip('\n')
atomic_name = head[1].replace('#AtomicTestName: ', '').strip('\n')
description = head[2].replace('#Description: ', '').strip('\n')
language = head[3].replace('#Language: ', '').strip('\n')
aux = ''
count = 1
for char in description:
if char == '&':
continue
aux += char
if count % 126 == 0:
aux += '\n'
count += 1
out = '{}: {}\n{}\n\n{}\n'.format(technique_name, language, atomic_name, aux)
return out
| [
"[email protected]"
] | |
a2fff788a3bf339242af472e231ea2e64b740a53 | adea9fc9697f5201f4cb215571025b0493e96b25 | /napalm_yang/models/openconfig/network_instances/network_instance/protocols/protocol/ospfv2/areas/__init__.py | ec0be8d4e67f577dc03415c82b56d94adae2a930 | [
"Apache-2.0"
] | permissive | andyjsharp/napalm-yang | d8a8b51896ef7c6490f011fe265db46f63f54248 | ef80ebbfb50e188f09486380c88b058db673c896 | refs/heads/develop | 2021-09-09T02:09:36.151629 | 2018-03-08T22:44:04 | 2018-03-08T22:44:04 | 114,273,455 | 0 | 0 | null | 2018-03-08T22:44:05 | 2017-12-14T16:33:35 | Python | UTF-8 | Python | false | false | 10,450 | py |
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
unicode = str
elif six.PY2:
import __builtin__
from . import area
class areas(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/ospfv2/areas. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Configuration and operational state relating to an
OSPFv2 area.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_extmethods', '__area',)
_yang_name = 'areas'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__area = YANGDynClass(base=YANGListType("identifier",area.area, yang_name="area", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='identifier', extensions=None), is_container='list', yang_name="area", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='list', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'network-instances', u'network-instance', u'protocols', u'protocol', u'ospfv2', u'areas']
def _get_area(self):
"""
Getter method for area, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area (list)
YANG Description: The OSPFv2 areas within which the local system exists
"""
return self.__area
def _set_area(self, v, load=False):
"""
Setter method for area, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_area is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_area() directly.
YANG Description: The OSPFv2 areas within which the local system exists
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("identifier",area.area, yang_name="area", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='identifier', extensions=None), is_container='list', yang_name="area", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """area must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("identifier",area.area, yang_name="area", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='identifier', extensions=None), is_container='list', yang_name="area", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='list', is_config=True)""",
})
self.__area = t
if hasattr(self, '_set'):
self._set()
def _unset_area(self):
self.__area = YANGDynClass(base=YANGListType("identifier",area.area, yang_name="area", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='identifier', extensions=None), is_container='list', yang_name="area", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='list', is_config=True)
area = __builtin__.property(_get_area, _set_area)
_pyangbind_elements = {'area': area, }
from . import area
class areas(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/ospfv2/areas. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Configuration and operational state relating to an
OSPFv2 area.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_extmethods', '__area',)
_yang_name = 'areas'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__area = YANGDynClass(base=YANGListType("identifier",area.area, yang_name="area", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='identifier', extensions=None), is_container='list', yang_name="area", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='list', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'network-instances', u'network-instance', u'protocols', u'protocol', u'ospfv2', u'areas']
def _get_area(self):
"""
Getter method for area, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area (list)
YANG Description: The OSPFv2 areas within which the local system exists
"""
return self.__area
def _set_area(self, v, load=False):
"""
Setter method for area, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_area is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_area() directly.
YANG Description: The OSPFv2 areas within which the local system exists
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("identifier",area.area, yang_name="area", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='identifier', extensions=None), is_container='list', yang_name="area", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """area must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("identifier",area.area, yang_name="area", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='identifier', extensions=None), is_container='list', yang_name="area", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='list', is_config=True)""",
})
self.__area = t
if hasattr(self, '_set'):
self._set()
def _unset_area(self):
self.__area = YANGDynClass(base=YANGListType("identifier",area.area, yang_name="area", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='identifier', extensions=None), is_container='list', yang_name="area", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='list', is_config=True)
area = __builtin__.property(_get_area, _set_area)
_pyangbind_elements = {'area': area, }
| [
"[email protected]"
] | |
0020b9b9b399b772a1554cd04d96f72050d68d34 | 41cc033f82ce2b134cfeb71bbea2e5a369b1ba5d | /vise/analyzer/dos_plotter.py | 415b848af07e3986d22b4b100f92b1c192a80baf | [] | no_license | takahashi-akira-36m/vise_test | 0fad2087b5503d40592af7b769069b641ab3b821 | e96f9ac914b023b330a26a43610a4b331af36ade | refs/heads/master | 2023-01-07T16:16:53.770431 | 2019-12-17T02:49:51 | 2019-12-17T02:49:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,512 | py | # -*- coding: utf-8 -*-
from collections import OrderedDict, defaultdict
import numpy as np
from atomate.utils.utils import get_logger
from pymatgen.electronic_structure.core import Spin
from pymatgen.electronic_structure.dos import Dos
from pymatgen.electronic_structure.dos import add_densities
from pymatgen.electronic_structure.plotter import DosPlotter
from pymatgen.io.vasp import Vasprun
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from vise.config import SYMMETRY_TOLERANCE, ANGLE_TOL
__author__ = "Yu Kumagai"
__maintainer__ = "Yu Kumagai"
logger = get_logger(__name__)
class ViseDosPlotter(DosPlotter):
def get_plot(self, xlim=None, ylims=None, cbm_vbm=None, legend=True,
crop_first_value=False, title=None):
"""
Get a matplotlib plot showing the DOS.
Args:
xlim (list):
Specifies the x-axis limits. Set to None for automatic
determination.
ylims (list):
Specifies the y-axes limits. Two types of input.
[[y1min, y1max], [y2min, y2max], ..]
cbm_vbm (list):
Specify cbm and vbm [cbm, vbm]
legend (bool):
Whether to show the figure legend.
crop_first_value (bool):
Whether to crop the fist DOS.
title (str):
Title of the figure
"""
ncolors = max(3, len(self._doses))
ncolors = min(9, ncolors)
import palettable
colors = palettable.colorbrewer.qualitative.Set1_9.mpl_colors
y = None
all_densities = []
all_energies = []
# The DOS calculated using VASP holds a spuriously large value at the
# first mesh to keep the consistency with the integrated DOS in DOSCAR
# file. An example is shown below.
# ------------- DOSCAR --------------------
# 10 10 1 0
# 0.1173120E+02 0.5496895E-09 0.5496895E-09 0.5496895E-09 0.5000000E-15
# 1.000000000000000E-004
# CAR
# unknown system
# 23.00000000 - 9.00000000 3201 6.62000004 1.00000000
# -9.000 0.6000E+03 0.6000E+03 0.6000E+01 0.6000E+01 <-- large DOS
# -8.990 0.0000E+00 0.0000E+00 0.6000E+01 0.6000E+01
i = 1 if crop_first_value else 0
for key, dos in self._doses.items():
energies = dos['energies'][i:]
densities = {Spin(k): v[i:] for k, v in dos['densities'].items()}
if not y:
y = {Spin.up: np.zeros(energies.shape),
Spin.down: np.zeros(energies.shape)}
all_energies.append(energies)
all_densities.append(densities)
# Make groups to be shown in the same figure.
# Example, ZrTiSe4
# keys = ['Total', 'Site:1 Zr-s', 'Site:1 Zr-p', 'Site:1 Zr-d',
# 'Site:2 Ti-s', 'Site:2 Ti-p', 'Site:2 Ti-d', 'Site:3 Se-s',
# 'Site:3 Se-p', 'Site:3 Se-d', 'Site:5 Se-s', 'Site:5 Se-p',
# 'Site:5 Se-d']
keys = list(self._doses.keys())
grouped_keys = OrderedDict()
for k in keys:
first_word = k.split()[0]
if first_word in grouped_keys:
grouped_keys[first_word].append(k)
else:
grouped_keys[first_word] = [k]
import matplotlib.pyplot as plt
num_figs = len(grouped_keys)
fig, axs = plt.subplots(num_figs, 1, sharex=True)
if xlim:
axs[0].set_xlim(xlim)
n = 0
for i, gk in enumerate(grouped_keys):
all_pts = []
for j, key in enumerate(grouped_keys[gk]):
x = []
y = []
for spin in [Spin.up, Spin.down]:
if spin in all_densities[n]:
densities = list(int(spin) * all_densities[n][spin])
energies = list(all_energies[n])
x.extend(energies)
y.extend(densities)
all_pts.extend(list(zip(x, y)))
axs[i].plot(x, y, color=colors[j % ncolors], label=str(key),
linewidth=2)
n += 1
# plot vertical lines for band edges or Fermi level
if self.zero_at_efermi:
# plot a line
axs[i].axvline(0, color="black", linestyle="--", linewidth=0.5)
if cbm_vbm:
axs[i].axvline(cbm_vbm[0] - cbm_vbm[1], color="black",
linestyle="--", linewidth=0.5)
else:
axs[i].axvline(self._doses[key]['efermi'],
color="black", linestyle="--", linewidth=0.5)
if cbm_vbm:
axs[i].axvline(cbm_vbm[0], color="black", linestyle="--",
linewidth=0.5)
if legend:
axs[i].legend(loc="best", markerscale=0.1)
# axs[i].legend(bbox_to_anchor=(1.1, 0.8), loc="best")
leg = axs[i].get_legend()
for legobj in leg.legendHandles:
legobj.set_linewidth(1.2)
ltext = leg.get_texts()
plt.setp(ltext, fontsize=7)
else:
axs[i].set_title(key, fontsize=7)
axs[i].axhline(0, color="black", linewidth=0.5)
if ylims and len(ylims) not in (num_figs, 2):
raise ValueError("The number of y-ranges is not proper.")
if ylims and len(ylims) == 2:
if ylims[0][1] > 1.0:
axs[0].set_ylim(ylims[0])
for i in range(1, len(axs)):
axs[i].set_ylim(ylims[1])
elif ylims:
for i in range(len(axs)):
axs[i].set_ylim(ylims[i])
# else:
# for i in range(len(axs)):
# ylim = axs[i].get_ylim()
# print(ylim)
# relevanty = [p[1] for p in all_pts
# if ylim[0] < p[0] < ylim[1]]
# axs[i].set_ylim((min(relevanty), max(relevanty)))e
axs[-1].set_xlabel('Energy (eV)')
plt.tight_layout()
plt.subplots_adjust(left=None, bottom=None, right=None, top=None,
wspace=0.2, hspace=0.2)
if title:
axs[0].title.set_text(title)
return plt
def get_dos_plot(vasprun_file: str,
cbm_vbm: list = None,
pdos_type: str = "element",
specific: list = None,
orbital: list = True,
xlim: list = None,
ymaxs: list = None,
zero_at_efermi: bool = True,
legend: bool = True,
crop_first_value: bool = True,
show_spg: bool = True,
symprec: float = SYMMETRY_TOLERANCE,
angle_tolerance: float = ANGLE_TOL):
"""
Args:
vasprun_file (str):
vasprun.xml-type file name
cbm_vbm (list):
List of [cbm, vbm]
pdos_type (str): Plot type of PDOS.
"element": PDOS grouped by element type
"site": PDOS grouped by equivalent sites
"none": PDOS are not grouped.
specific (list): Show specific PDOS. If list elements are integers,
PDOS at particular sites are shown. If elements are shown,
PDOS of particular elements are shown.
["1", "2"] --> At site 1 and 2 compatible with pdos_type = "none"
["Mg", "O"] --> Summed at Mg and O sites coompatible with
pdos_type = "element"
orbital (bool):
Whether to show orbital decomposed PDOS.
xlim (list):
Specifies the x-axis limits. Set to None for automatic determination.
ymaxs (list):
Specifies the maxima of absolute y-axis limits.
zero_at_efermi (bool):
Whether to show the plot in the absolute scale.
legend (bool):
Whether to show the figure legend.
crop_first_value (bool):
Whether to crop the fist DOS.
show_spg (bool):
Whether to show space group number in the title.
symprec (float):
Symprec for determining the equivalent sites.
"""
v = Vasprun(vasprun_file, ionic_step_skip=True, parse_eigen=False)
if v.converged_electronic is False:
logger.warning("SCF is not attained in the vasp calculation.")
complete_dos = v.complete_dos
# check cbm
if cbm_vbm is None:
if complete_dos.get_gap() > 0.1:
cbm_vbm = complete_dos.get_cbm_vbm()
structure = v.final_structure
dos = OrderedDict()
# The CompleteDos behaves as DOS for total dos.
dos["Total"] = complete_dos
if specific and specific[0].isdigit():
if pdos_type is not "none":
logger.warning("pdos_type is changed from {} to none"
.format(pdos_type))
pdos_type = "none"
elif specific and specific[0].isalpha():
if pdos_type is not "none":
logger.warning("pdos_type is changed from {} to element"
.format(pdos_type))
pdos_type = "element"
sga = None
grouped_indices = defaultdict(list)
if pdos_type == "element":
for indices, s in enumerate(structure):
grouped_indices[str(s.specie)].append(indices)
elif pdos_type == "site":
# equivalent_sites: Equivalent site indices from SpacegroupAnalyzer.
sga = SpacegroupAnalyzer(structure=structure,
symprec=symprec,
angle_tolerance=angle_tolerance)
symmetrized_structure = sga.get_symmetrized_structure()
# equiv_indices = [[0], [1], [2, 3], [4, 5]]
equiv_index_lists = symmetrized_structure.equivalent_indices
for l in equiv_index_lists:
name = str(structure[l[0]].specie) + " " \
+ sga.get_symmetry_dataset()["wyckoffs"][l[0]]
grouped_indices[name] = l
elif pdos_type == "none":
for indices, s in enumerate(structure):
grouped_indices[str(s.specie) + " site:" + str(indices)].append(indices)
else:
raise KeyError("The given pdos_type is not supported.")
# TODO: Add specific handling
# if specific:
# tmp = defaultdict(list)
# for key, value in grouped_indices.items():
# if pdos_type == "element" and key in specific:
# tmp[key] = value
# else:
# # type(index) is str
# index = ''.join(c for c in key if c.isdigit())
# if index in specific:
# tmp[key] = value
# grouped_indices = tmp
# efermi is set to VBM if exists.
efermi = cbm_vbm[1] if cbm_vbm else complete_dos.efermi
complete_dos.efermi = efermi
energies = complete_dos.energies
for key, value in grouped_indices.items():
for indices in value:
site = structure[indices]
if orbital:
for orb, pdos in complete_dos.get_site_spd_dos(site).items():
# " " is used for grouping the plots.
if pdos_type == "none":
name = key + " " + str(orb)
else:
name = \
key + " #" + str(len(value)) + " " + str(orb)
density = divide_densities(pdos.densities, len(value))
if name in dos:
density = add_densities(dos[name].densities, density)
dos[name] = Dos(efermi, energies, density)
else:
dos[name] = Dos(efermi, energies, density)
else:
name = key + "(" + str(len(key)) + ")"
pdos = complete_dos.get_site_dos(site)
if name in dos:
dos[name] = add_densities(dos[name], pdos)
else:
dos[name] = pdos
# use complete_dos.efermi for total dos.
plotter = ViseDosPlotter(zero_at_efermi=zero_at_efermi)
plotter.add_dos_dict(dos)
if xlim is None:
xlim = [-10, 10]
if ymaxs:
ylims = [[-y, y] for y in ymaxs] \
if v.incar.get("ISPIN", 1) == 2 else [[0, y] for y in ymaxs]
else:
energies = complete_dos.energies - efermi
tdos_max = max_density(complete_dos.densities, energies, xlim,
crop_first_value)
tdos_max *= 1.1
ylims = [[-tdos_max, tdos_max]] if v.incar.get("ISPIN", 1) == 2 \
else [[0, tdos_max]]
pdos_max = 0.0
for k, d in dos.items():
if k == "Total":
continue
pdos_max = \
max(max_density(d.densities, energies, xlim), pdos_max)
pdos_max *= 1.1
ylims.append([-pdos_max, pdos_max] if v.incar.get("ISPIN", 1) == 2
else [0, pdos_max])
print("y-range", ylims)
if show_spg:
if sga is None:
sga = SpacegroupAnalyzer(structure, symprec=symprec)
sg_num_str = str(sga.get_space_group_number())
sg = f" {sga.get_space_group_symbol()} ({sg_num_str})"
print(f"Space group number: {sg}")
title = f"{structure.composition} SG: {sg}"
else:
title = str(structure.composition)
return plotter.get_plot(xlim=xlim,
ylims=ylims,
cbm_vbm=cbm_vbm,
legend=legend,
crop_first_value=crop_first_value,
title=title)
def divide_densities(density: dict,
denominator: float):
"""
Method to sum two densities.
Args:
density: First density.
denominator: Second density.
Returns:
{spin: np.array(density)}.
"""
return {spin: np.array(value) / denominator
for spin, value in density.items()}
def max_density(density: dict,
energies: list,
xlim: list,
crop_first_value: bool = True) -> float:
"""
Method to sum two densities.
Args:
density (dict):
Note that the first value may contains huge values when the
lower limit of the calculation of density of states is larger than
that of occupied states. Therefore, we need to crop the first value
by default.
{Spin.up: [...], Spin.down: [...] }
energies (list):
Energy mesh
xlim (list):
Limit of x-range.
[x-min, x-max]
crop_first_value (bool):
Whether to crop the first value or not.
Return:
Max value in the density within the given x-range.
"""
values = []
for density_in_each_spin in density.values():
for i, (d, e) in enumerate(zip(density_in_each_spin, energies)):
if crop_first_value and i == 0:
continue
if xlim[0] < e < xlim[1]:
values.append(d)
if not values:
raise ValueError("DOS is empty at the given energy {0[0]} - {0[1]} "
"range.".format(xlim))
return max(values)
| [
"[email protected]"
] | |
0021c7e9e93f3bb30c1d2f4511b9a15aee101958 | a00c487d88c50401ebf8505cd267c70b42e3c362 | /bangla/soup/MSR.py | 9c488a0e43064bf12f84eebfbeb665415f8376dd | [] | no_license | sharif1302042/A-news-Agrregation-system | 9aca07ed29f13b5da8e93a2aabe03281d6b66365 | 5e48a726f5fedba686d18601d561784c6ceddd5a | refs/heads/master | 2020-04-01T11:00:39.088387 | 2019-11-09T15:05:24 | 2019-11-09T15:05:24 | 153,142,416 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 936 | py | import requests
from bs4 import BeautifulSoup
news=[]
r=requests.get('https://bangla.bdnews24.com/politics/')
soup = BeautifulSoup(r.text, 'html.parser')
r1=soup.find_all('li',attrs={'class':'article first '})
r2=soup.find_all('li',attrs={'class':'article '})
l=0
for r in r1+r2:
if l<10:
txt=r.find('a')['href']
url=r.find('a').text[1:-1]
news.append((url,txt,'Bdnews24'))
l+=1
"""
#--------------jugantor-----------
r=requests.get('https://www.jugantor.com/')
soup = BeautifulSoup(r.text, 'html.parser')
r1=soup.find_all('div',attrs={'id':'popular_list_block'})
url=r1[0].find('a')
r=r1[0].find('a')
txt=r.find('h4').text
news.append((txt,url,"Jugantor"))
r1=soup.find_all('div',attrs={'class':'editor_picks_list'})
l=0
for r in r1:
if l<6:
url=r.find('a')['href']
txt=r.find('a')
txt=txt.find('h4').text
news.append((txt,url,"Jugantor"))
l+=1
print('MSR',len(news))
for r in news:
print(r[0])
""" | [
"[email protected]"
] | |
1602340190e28cb47ee3c4a8aa11ec9b668431a0 | ed5a082d977aefcecc8c40c76046d26334615a8e | /contest/abc/abc147/a.py | a0322ac34844aa2ebbc0d19eb0d875a19fe8e7e9 | [] | no_license | arakoma/competitive_programming | 0ff9b9a97d2f37a3a1dac96c157f3235dde96b85 | ebbc5621860aca320a6949433f1707f1cbfcf911 | refs/heads/master | 2021-08-07T10:50:08.890353 | 2021-07-10T14:10:15 | 2021-07-10T14:10:15 | 223,712,828 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 97 | py | a = list(map(int, input().split()))
if sum(a) >= 22:
print("bust")
else:
print("win") | [
"[email protected]"
] | |
5102a13c1af192205b49132a170a820a1c33ee47 | a799a105ab2aba39a475bf2ce086405def0351c2 | /test/model/tpp/common.py | 4fd78b174d605ac5736f5e09c8c9e575b42dfa7b | [
"Apache-2.0"
] | permissive | mbohlkeschneider/gluon-ts | d663750d13798624eca5c9d6f12a87e321ce7334 | df4256b0e67120db555c109a1bf6cfa2b3bd3cd8 | refs/heads/master | 2021-11-24T06:09:49.905352 | 2021-10-14T09:30:38 | 2021-10-14T09:30:38 | 192,546,557 | 54 | 10 | Apache-2.0 | 2022-08-31T18:36:44 | 2019-06-18T13:33:36 | Python | UTF-8 | Python | false | false | 2,364 | py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import numpy as np
import pandas as pd
import pytest
from gluonts.dataset.common import ListDataset
def point_process_dataset():
ia_times = np.array([0.2, 0.7, 0.2, 0.5, 0.3, 0.3, 0.2, 0.1])
marks = np.array([0, 1, 2, 0, 1, 2, 2, 2])
lds = ListDataset(
[
{
"target": np.c_[ia_times, marks].T,
"start": pd.Timestamp("2011-01-01 00:00:00", freq="H"),
"end": pd.Timestamp("2011-01-01 03:00:00", freq="H"),
}
],
freq="H",
one_dim_target=False,
)
return lds
def point_process_dataset_2():
lds = ListDataset(
[
{
"target": np.c_[
np.array([0.2, 0.7, 0.2, 0.5, 0.3, 0.3, 0.2, 0.1]),
np.array([0, 1, 2, 0, 1, 2, 2, 2]),
].T,
"start": pd.Timestamp("2011-01-01 00:00:00", freq="H"),
"end": pd.Timestamp("2011-01-01 03:00:00", freq="H"),
},
{
"target": np.c_[
np.array([0.2, 0.1, 0.2, 0.1, 0.3, 0.3, 0.5, 0.4]),
np.array([0, 1, 2, 0, 1, 2, 1, 1]),
].T,
"start": pd.Timestamp("2011-01-01 00:00:00", freq="H"),
"end": pd.Timestamp("2011-01-01 03:00:00", freq="H"),
},
{
"target": np.c_[
np.array([0.2, 0.7, 0.2, 0.5, 0.1, 0.1, 0.2, 0.1]),
np.array([0, 1, 2, 0, 1, 0, 1, 2]),
].T,
"start": pd.Timestamp("2011-01-01 00:00:00", freq="H"),
"end": pd.Timestamp("2011-01-01 03:00:00", freq="H"),
},
],
freq="H",
one_dim_target=False,
)
return lds
| [
"[email protected]"
] | |
94edf1ad6adc7d8a3551c8b9103bf294c8afc731 | cd23b0457bc02a60b89f1f52783e56cc36d85b5e | /oop/getitem.py | 1bff5722f8bb3e1db032216968dc42463cf0a724 | [] | no_license | cluo/learingPython | 65c7068613e1a2ae0178e23770503043d9278c45 | 54609288e489047d4dd1dead5ac142f490905f0e | refs/heads/master | 2020-04-01T13:04:15.981758 | 2015-02-23T13:21:31 | 2015-02-23T13:21:31 | 28,440,969 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 401 | py | #!/usr/bin/evn python
#-*- coding:utf-8 -*-
__author__ = 'admin'
class Indexer:
def __getitem__(self, index):
return index ** 2
X = Indexer()
print X[2]
for i in range(5):
print(X[i])
class stepper:
def __getitem__(self, i):
return self.data[i]
X = stepper()
X.data = 'spam'
for item in X:
print(item)
print 'p' in X
print [c for c in X]
print ''.join(X)
print list(X)
print tuple(X)
| [
"[email protected]"
] | |
087e92e25d5452f986b22430ce4fffefb538f075 | 0b49c40162e15b5e0c551e548d865c4105e8df7d | /koopmanInvertedPendulum.py | 23a2317318f8c0da4234581463207c14b2bd54f1 | [] | no_license | jiemingChen/DeepKoopman | 654a47922e4d7d6161c032a5e7ac7374d6999917 | 2e6ce8218c0bf5b7bcb072a6983a8f6870ec6186 | refs/heads/master | 2023-03-27T22:39:24.992333 | 2020-09-28T23:11:40 | 2020-09-28T23:11:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,539 | py | import numpy as np
import torch
import torch.nn as nn
import gym
from torch.utils.data import Dataset, DataLoader
import control
import os
from ReplayBuffer import ReplayBuffer
import time
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--env_name", default='InvertedPendulum-v2')
parser.add_argument("--max_iter", default=200)
parser.add_argument("--hidden_dim", default=3, type=int)
parser.add_argument("--mode", default="train")
args = parser.parse_args()
class DeepKoopman():
def __init__(self, env_name = "Pendulum-v0", hidden_dim = 2):
self.env_name = env_name
self.env = gym.make(env_name)
self.state_dim = self.env.observation_space.shape[0]+1
self.hidden_dim = hidden_dim
self.action_dim = self.env.action_space.shape[0]
self.encoder = nn.Sequential(nn.Linear(self.state_dim, 16),
nn.PReLU(),
nn.Linear(16, 16),
nn.PReLU(),
nn.Linear(16, hidden_dim))
self.decoder = nn.Sequential(nn.Linear(hidden_dim, 16),
nn.PReLU(),
nn.Linear(16, 16),
nn.PReLU(),
nn.Linear(16, self.state_dim))
self.propagate = nn.Linear(hidden_dim+self.action_dim, hidden_dim, bias = False)
self.lambda1 = 1.0
self.lambda2 = 0.3
self.replay_buffer = ReplayBuffer(100000)
def get_system(self):
weight = self.propagate.weight.data.numpy()
A = weight[:, :self.hidden_dim]
B = weight[:, self.hidden_dim:]
return A, B
def forward(self, xt, ut):
gt = self.encoder(xt)
xt_ = self.decoder(gt)
gtdot = self.propagate(torch.cat((gt, ut), axis = -1))
gt1 = gt + self.env.env.dt*gtdot
xt1_ = self.decoder(gt1)
return gt, gt1, xt_, xt1_
def save(self):
if not os.path.exists("weights/"):
os.mkdir("weights/")
file_name = "weights/" + self.env_name + ".pt"
torch.save({"encoder" : self.encoder.state_dict(),
"decoder" : self.decoder.state_dict(),
"propagate" : self.propagate.state_dict()}, file_name)
print("save model to " + file_name)
def load(self):
try:
if not os.path.exists("weights/"):
os.mkdir("weights/")
file_name = "weights/" + self.env_name + ".pt"
checkpoint = torch.load(file_name)
self.encoder.load_state_dict(checkpoint["encoder"])
self.decoder.load_state_dict(checkpoint["decoder"])
self.propagate.load_state_dict(checkpoint["propagate"])
print("load model from " + file_name)
except:
print("fail to load model!")
def transform_state(self, x):
return np.array([x[1], np.sin(x[1]), np.cos(x[1]), x[2], x[3]])
def policy_rollout(self):
A, B = self.get_system()
Q = np.eye(self.hidden_dim)
R = np.array([[0.01]])
K, _, _ = control.lqr(A, B, Q, R)
ref = torch.FloatTensor([[0.0, 0.0, 1.0, 0., 0.]])
ref = model.encoder(ref).detach().numpy()
obs_old = self.transform_state(self.env.reset())
#obs_old[2] = obs_old[2] / 8.0
for _ in range(200):
if np.random.random() > 0.05:
state = torch.FloatTensor(obs_old.reshape((1, -1)))
y = model.encoder(state).detach().numpy()
action = -np.dot(K, (y-ref).T)
action = np.clip(np.array([action.item()]), -1., 1.)
else:
action = self.env.action_space.sample()
#self.env.render()
obs, reward, done, info = self.env.step(action)
#obs[2] = obs[2] / 8.0
obs = self.transform_state(obs)
self.replay_buffer.push((obs_old, action, obs))
obs_old = obs
def random_rollout(self):
obs_old = self.transform_state(self.env.reset())
#obs_old[2] = obs_old[2] / 8.
for _ in range(200):
action = self.env.action_space.sample()
obs, reward, done, info = self.env.step(action)
obs = self.transform_state(obs)
#obs[2] = obs[2] / 8.0
self.replay_buffer.push((obs_old, action, obs))
obs_old = obs
def train(self, max_iter, lr =0.001):
mseloss = nn.MSELoss()
l1loss = nn.L1Loss()
encoder_optimizer = torch.optim.Adam(self.encoder.parameters(), lr = lr)
decoder_optimizer = torch.optim.Adam(self.decoder.parameters(), lr = lr)
propagate_optimizer = torch.optim.Adam(self.propagate.parameters(), lr = lr)
for i in range(20):
self.random_rollout()
for it in range(max_iter):
loss_hist = []
for _ in range(100):
xt, ut, xt1 = self.replay_buffer.sample(64)
xt = torch.FloatTensor(xt)
ut = torch.FloatTensor(ut)
xt1 = torch.FloatTensor(xt1)
gt, gt1, xt_, xt1_ = self.forward(xt, ut)
ae_loss = mseloss(xt_, xt)
pred_loss = mseloss(xt1_, xt1)
metric_loss = l1loss(torch.norm(gt1-gt, dim=1), torch.norm(xt1-xt, dim=1))
#reg_loss = torch.norm(self.propagate.weight.data[:, self.hidden_dim:])
total_loss = ae_loss + self.lambda1*pred_loss + self.lambda2*metric_loss
encoder_optimizer.zero_grad()
decoder_optimizer.zero_grad()
propagate_optimizer.zero_grad()
total_loss.backward()
encoder_optimizer.step()
decoder_optimizer.step()
propagate_optimizer.step()
loss_hist.append(total_loss.detach().numpy())
print("epoch: %d, loss: %2.5f" % (it, np.mean(loss_hist)))
for i in range(5):
self.policy_rollout()
for i in range(5):
self.random_rollout()
if __name__ == "__main__":
model = DeepKoopman(args.env_name, args.hidden_dim)
if args.mode == "train":
model.train(args.max_iter, 0.001)
model.save()
else:
model.load()
A, B = model.get_system()
Q = np.eye(args.hidden_dim)
R = np.array([[0.08]])
K, _, _ = control.lqr(A, B, Q, R)
print(A)
print(B)
print(K)
env = gym.make(args.env_name)
ref = torch.FloatTensor([[0.0, 0.0, 1.0, 0., 0.]])
ref = model.encoder(ref).detach().numpy()
offset = [0.1, 0.2, 0.3, 0.4, 0.5]
for k in range(5):
state = env.reset()
state[1] = offset[k]
env.env.set_state(state[:2], state[:2])
state = model.transform_state(state)
for i in range(200):
env.render()
state = torch.FloatTensor(state.reshape((1, -1)))
#state[0, 2] = state[0, 2] / 8.0
y = model.encoder(state).detach().numpy()
action = -np.dot(K, (y-ref).T)
state, reward, done, info = env.step(action)
#print(state)
state = model.transform_state(state)
env.close()
| [
"[email protected]"
] | |
e3dfa7a4455e0f59230ecf1846ccc32cc4122744 | e6c65e2e354336a4bea5b6a4ccbccd3682915fe2 | /out-bin/py/google/fhir/models/run_locally.runfiles/com_google_fhir/external/pypi__tensorflow_1_12_0/tensorflow-1.12.0.data/purelib/tensorflow/contrib/receptive_field/python/__init__.py | 3d7249a31712e439c472e3403dd11bd766b47d41 | [
"Apache-2.0"
] | permissive | rasalt/fhir-datalab | c30ab773d84983dd04a37e9d0ddec8bf2824b8a4 | 3e329fc8b4226d3e3a4a7c23c306a86e7a9ea0de | refs/heads/master | 2021-10-09T05:51:04.593416 | 2018-12-21T18:11:03 | 2018-12-22T05:38:32 | 162,744,237 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 194 | py | /home/rkharwar/.cache/bazel/_bazel_rkharwar/0ddaa3627472ad9d1367a008236ce2f5/external/pypi__tensorflow_1_12_0/tensorflow-1.12.0.data/purelib/tensorflow/contrib/receptive_field/python/__init__.py | [
"[email protected]"
] | |
e6b1813c3f8b1b5ec5036f41e3260c00447cd56c | a9e3f3ad54ade49c19973707d2beb49f64490efd | /Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/openedx/core/djangoapps/api_admin/tests/test_views.py | ae07399acb44c8b63d501cadbbf37a3fbe23ed9e | [
"AGPL-3.0-only",
"AGPL-3.0-or-later",
"MIT"
] | permissive | luque/better-ways-of-thinking-about-software | 8c3dda94e119f0f96edbfe5ba60ca6ec3f5f625d | 5809eaca7079a15ee56b0b7fcfea425337046c97 | refs/heads/master | 2021-11-24T15:10:09.785252 | 2021-11-22T12:14:34 | 2021-11-22T12:14:34 | 163,850,454 | 3 | 1 | MIT | 2021-11-22T12:12:31 | 2019-01-02T14:21:30 | JavaScript | UTF-8 | Python | false | false | 15,310 | py | """ Tests for the api_admin app's views. """
import json
import ddt
import httpretty
from django.conf import settings
from django.test import TestCase
from django.test.utils import override_settings
from django.urls import reverse
from oauth2_provider.models import get_application_model
from openedx.core.djangoapps.api_admin.models import ApiAccessConfig, ApiAccessRequest
from openedx.core.djangoapps.api_admin.tests.factories import (
ApiAccessRequestFactory,
ApplicationFactory,
CatalogFactory
)
from openedx.core.djangoapps.api_admin.tests.utils import VALID_DATA
from openedx.core.djangolib.testing.utils import skip_unless_lms
from common.djangoapps.student.tests.factories import UserFactory
Application = get_application_model() # pylint: disable=invalid-name
class ApiAdminTest(TestCase):
"""
Base class to allow API admin access to tests.
"""
def setUp(self):
super().setUp()
ApiAccessConfig(enabled=True).save()
@skip_unless_lms
class ApiRequestViewTest(ApiAdminTest):
"""
Test the API Request View.
"""
def setUp(self):
super().setUp()
self.url = reverse('api_admin:api-request')
password = 'abc123'
self.user = UserFactory(password=password)
self.client.login(username=self.user.username, password=password)
def test_get(self):
"""Verify that a logged-in can see the API request form."""
response = self.client.get(self.url)
assert response.status_code == 200
def test_get_anonymous(self):
"""Verify that users must be logged in to see the page."""
self.client.logout()
response = self.client.get(self.url)
assert response.status_code == 302
def test_get_with_existing_request(self):
"""
Verify that users who have already requested access are redirected
to the client creation page to see their status.
"""
ApiAccessRequestFactory(user=self.user)
response = self.client.get(self.url)
self.assertRedirects(response, reverse('api_admin:api-status'))
def _assert_post_success(self, response):
"""
Assert that a successful POST has been made, that the response
redirects correctly, and that the correct object has been created.
"""
self.assertRedirects(response, reverse('api_admin:api-status'))
api_request = ApiAccessRequest.objects.get(user=self.user)
assert api_request.status == ApiAccessRequest.PENDING
return api_request
def test_post_valid(self):
"""Verify that a logged-in user can create an API request."""
assert not ApiAccessRequest.objects.all().exists()
response = self.client.post(self.url, VALID_DATA)
self._assert_post_success(response)
def test_post_anonymous(self):
"""Verify that users must be logged in to create an access request."""
self.client.logout()
response = self.client.post(self.url, VALID_DATA)
assert response.status_code == 302
assert not ApiAccessRequest.objects.all().exists()
def test_get_with_feature_disabled(self):
"""Verify that the view can be disabled via ApiAccessConfig."""
ApiAccessConfig(enabled=False).save()
response = self.client.get(self.url)
assert response.status_code == 404
def test_post_with_feature_disabled(self):
"""Verify that the view can be disabled via ApiAccessConfig."""
ApiAccessConfig(enabled=False).save()
response = self.client.post(self.url)
assert response.status_code == 404
@skip_unless_lms
@override_settings(PLATFORM_NAME='edX')
@ddt.ddt
class ApiRequestStatusViewTest(ApiAdminTest):
"""
Tests of the API Status endpoint.
"""
def setUp(self):
super().setUp()
password = 'abc123'
self.user = UserFactory(password=password)
self.client.login(username=self.user.username, password=password)
self.url = reverse('api_admin:api-status')
def test_get_without_request(self):
"""
Verify that users who have not yet requested API access are
redirected to the API request form.
"""
response = self.client.get(self.url)
self.assertRedirects(response, reverse('api_admin:api-request'))
@ddt.data(
(ApiAccessRequest.APPROVED, 'Your request to access the edX Course Catalog API has been approved.'),
(ApiAccessRequest.PENDING, 'Your request to access the edX Course Catalog API is being processed.'),
(ApiAccessRequest.DENIED, 'Your request to access the edX Course Catalog API has been denied.'),
)
@ddt.unpack
def test_get_with_request(self, status, expected):
"""
Verify that users who have requested access can see a message
regarding their request status.
"""
ApiAccessRequestFactory(user=self.user, status=status)
response = self.client.get(self.url)
self.assertContains(response, expected)
def test_get_with_existing_application(self):
"""
Verify that if the user has created their client credentials, they
are shown on the status page.
"""
ApiAccessRequestFactory(user=self.user, status=ApiAccessRequest.APPROVED)
application = ApplicationFactory(user=self.user)
response = self.client.get(self.url)
self.assertContains(response, application.client_secret)
self.assertContains(response, application.client_id)
self.assertContains(response, application.redirect_uris)
def test_get_anonymous(self):
"""Verify that users must be logged in to see the page."""
self.client.logout()
response = self.client.get(self.url)
assert response.status_code == 302
def test_get_with_feature_disabled(self):
"""Verify that the view can be disabled via ApiAccessConfig."""
ApiAccessConfig(enabled=False).save()
response = self.client.get(self.url)
assert response.status_code == 404
@ddt.data(
(ApiAccessRequest.APPROVED, True, True),
(ApiAccessRequest.DENIED, True, False),
(ApiAccessRequest.PENDING, True, False),
(ApiAccessRequest.APPROVED, False, True),
(ApiAccessRequest.DENIED, False, False),
(ApiAccessRequest.PENDING, False, False),
)
@ddt.unpack
def test_post(self, status, application_exists, new_application_created):
"""
Verify that posting the form creates an application if the user is
approved, and does not otherwise. Also ensure that if the user
already has an application, it is deleted before a new
application is created.
"""
if application_exists:
old_application = ApplicationFactory(user=self.user)
ApiAccessRequestFactory(user=self.user, status=status)
self.client.post(self.url, {
'name': 'test.com',
'redirect_uris': 'http://example.com'
})
applications = Application.objects.filter(user=self.user)
if application_exists and new_application_created:
assert applications.count() == 1
assert old_application != applications[0]
elif application_exists:
assert applications.count() == 1
assert old_application == applications[0]
elif new_application_created:
assert applications.count() == 1
else:
assert applications.count() == 0
def test_post_with_errors(self):
ApiAccessRequestFactory(user=self.user, status=ApiAccessRequest.APPROVED)
response = self.client.post(self.url, {
'name': 'test.com',
'redirect_uris': 'not a url'
})
self.assertContains(response, 'Enter a valid URL.')
@skip_unless_lms
class ApiTosViewTest(ApiAdminTest):
"""
Tests of the API terms of service endpoint.
"""
def test_get_api_tos(self):
"""
Verify that the terms of service can be read.
"""
url = reverse('api_admin:api-tos')
response = self.client.get(url)
self.assertContains(response, 'Terms of Service')
class CatalogTest(ApiAdminTest):
"""
Test the catalog API.
"""
def setUp(self):
super().setUp()
password = 'abc123'
self.user = UserFactory(password=password, is_staff=True)
self.client.login(username=self.user.username, password=password)
def mock_catalog_endpoint(self, data, catalog_id=None, method=httpretty.GET, status_code=200):
""" Mock the Course Catalog API's catalog endpoint. """
assert httpretty.is_enabled(), 'httpretty must be enabled to mock Catalog API calls.'
url = '{root}/catalogs/'.format(root=settings.COURSE_CATALOG_API_URL.rstrip('/'))
if catalog_id:
url += f'{catalog_id}/'
httpretty.register_uri(
method,
url,
body=json.dumps(data),
content_type='application/json',
status=status_code
)
@skip_unless_lms
class CatalogSearchViewTest(CatalogTest):
"""
Test the catalog search endpoint.
"""
def setUp(self):
super().setUp()
self.url = reverse('api_admin:catalog-search')
def test_get(self):
response = self.client.get(self.url)
assert response.status_code == 200
@httpretty.activate
def test_post(self):
catalog_user = UserFactory()
self.mock_catalog_endpoint({'results': []})
response = self.client.post(self.url, {'username': catalog_user.username})
self.assertRedirects(response, reverse('api_admin:catalog-list', kwargs={'username': catalog_user.username}))
def test_post_without_username(self):
response = self.client.post(self.url, {'username': ''})
self.assertRedirects(response, reverse('api_admin:catalog-search'))
@skip_unless_lms
class CatalogListViewTest(CatalogTest):
"""
Test the catalog list endpoint.
"""
def setUp(self):
super().setUp()
self.catalog_user = UserFactory()
self.url = reverse('api_admin:catalog-list', kwargs={'username': self.catalog_user.username})
@httpretty.activate
def test_get(self):
catalog = CatalogFactory(viewers=[self.catalog_user.username])
self.mock_catalog_endpoint({'results': [catalog.attributes]})
response = self.client.get(self.url)
self.assertContains(response, catalog.name)
@httpretty.activate
def test_get_no_catalogs(self):
"""Verify that the view works when no catalogs are set up."""
self.mock_catalog_endpoint({}, status_code=404)
response = self.client.get(self.url)
assert response.status_code == 200
@httpretty.activate
def test_post(self):
catalog_data = {
'name': 'test-catalog',
'query': '*',
'viewers': [self.catalog_user.username]
}
catalog_id = 123
self.mock_catalog_endpoint(dict(catalog_data, id=catalog_id), method=httpretty.POST)
response = self.client.post(self.url, catalog_data)
assert httpretty.last_request().method == 'POST'
self.mock_catalog_endpoint(CatalogFactory().attributes, catalog_id=catalog_id)
self.assertRedirects(response, reverse('api_admin:catalog-edit', kwargs={'catalog_id': catalog_id}))
@httpretty.activate
def test_post_invalid(self):
catalog = CatalogFactory(viewers=[self.catalog_user.username])
self.mock_catalog_endpoint({'results': [catalog.attributes]})
response = self.client.post(self.url, {
'name': '',
'query': '*',
'viewers': [self.catalog_user.username]
})
assert response.status_code == 400
# Assert that no POST was made to the catalog API
assert len([r for r in httpretty.httpretty.latest_requests if r.method == 'POST']) == 0
@skip_unless_lms
class CatalogEditViewTest(CatalogTest):
"""
Test edits to the catalog endpoint.
"""
def setUp(self):
super().setUp()
self.catalog_user = UserFactory()
self.catalog = CatalogFactory(viewers=[self.catalog_user.username])
self.url = reverse('api_admin:catalog-edit', kwargs={'catalog_id': self.catalog.id})
@httpretty.activate
def test_get(self):
self.mock_catalog_endpoint(self.catalog.attributes, catalog_id=self.catalog.id)
response = self.client.get(self.url)
self.assertContains(response, self.catalog.name)
@httpretty.activate
def test_delete(self):
self.mock_catalog_endpoint(
self.catalog.attributes,
method=httpretty.DELETE,
catalog_id=self.catalog.id
)
response = self.client.post(self.url, {'delete-catalog': 'on'})
self.assertRedirects(response, reverse('api_admin:catalog-search'))
assert httpretty.last_request().method == 'DELETE' # lint-amnesty, pylint: disable=no-member
assert httpretty.last_request().path == \
f'/api/v1/catalogs/{self.catalog.id}/' # lint-amnesty, pylint: disable=no-member
assert len(httpretty.httpretty.latest_requests) == 1
@httpretty.activate
def test_edit(self):
self.mock_catalog_endpoint(self.catalog.attributes, method=httpretty.PATCH, catalog_id=self.catalog.id)
new_attributes = dict(self.catalog.attributes, **{'delete-catalog': 'off', 'name': 'changed'})
response = self.client.post(self.url, new_attributes)
self.mock_catalog_endpoint(new_attributes, catalog_id=self.catalog.id)
self.assertRedirects(response, reverse('api_admin:catalog-edit', kwargs={'catalog_id': self.catalog.id}))
@httpretty.activate
def test_edit_invalid(self):
self.mock_catalog_endpoint(self.catalog.attributes, catalog_id=self.catalog.id)
new_attributes = dict(self.catalog.attributes, **{'delete-catalog': 'off', 'name': ''})
response = self.client.post(self.url, new_attributes)
assert response.status_code == 400
# Assert that no PATCH was made to the Catalog API
assert len([r for r in httpretty.httpretty.latest_requests if r.method == 'PATCH']) == 0
@skip_unless_lms
class CatalogPreviewViewTest(CatalogTest):
"""
Test the catalog preview endpoint.
"""
def setUp(self):
super().setUp()
self.url = reverse('api_admin:catalog-preview')
@httpretty.activate
def test_get(self):
data = {'count': 1, 'results': ['test data'], 'next': None, 'prev': None}
httpretty.register_uri(
httpretty.GET,
'{root}/courses/'.format(root=settings.COURSE_CATALOG_API_URL.rstrip('/')),
body=json.dumps(data),
content_type='application/json'
)
response = self.client.get(self.url, {'q': '*'})
assert response.status_code == 200
assert json.loads(response.content.decode('utf-8')) == data
def test_get_without_query(self):
response = self.client.get(self.url)
assert response.status_code == 200
assert json.loads(response.content.decode('utf-8')) == {'count': 0, 'results': [], 'next': None, 'prev': None}
| [
"[email protected]"
] | |
01e1441294cda302a160e5771d99e199e575a62e | 90cdfc6ff827c8334c81f6f896b1081cbb4d4f7a | /07GUI/08Pyqt5/06QtLearning/main.py | 67e007139e350075c02c31f2644d82b77e45fcbe | [] | no_license | HBU/Jupyter | c79883f329efd2426c5c8fde1364266ed8b5059f | b3d5d08c89c26c68027409c2b466ac64aeb1af39 | refs/heads/master | 2022-07-06T22:00:43.694050 | 2020-12-22T09:53:02 | 2020-12-22T09:53:02 | 123,717,897 | 3 | 3 | null | 2022-07-06T19:20:58 | 2018-03-03T18:04:01 | Jupyter Notebook | UTF-8 | Python | false | false | 3,190 | py | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'main.ui'
#
# Created by: PyQt5 UI code generator 5.10.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(517, 400)
self.label = QtWidgets.QLabel(Dialog)
self.label.setGeometry(QtCore.QRect(80, 10, 211, 61))
font = QtGui.QFont()
font.setFamily("微软雅黑")
font.setPointSize(36)
self.label.setFont(font)
self.label.setObjectName("label")
self.tableView = QtWidgets.QTableView(Dialog)
self.tableView.setGeometry(QtCore.QRect(60, 100, 256, 261))
self.tableView.setObjectName("tableView")
self.layoutWidget = QtWidgets.QWidget(Dialog)
self.layoutWidget.setGeometry(QtCore.QRect(340, 120, 135, 241))
self.layoutWidget.setObjectName("layoutWidget")
self.verticalLayout = QtWidgets.QVBoxLayout(self.layoutWidget)
self.verticalLayout.setContentsMargins(0, 0, 0, 0)
self.verticalLayout.setObjectName("verticalLayout")
self.pushButton_2 = QtWidgets.QPushButton(self.layoutWidget)
self.pushButton_2.setObjectName("pushButton_2")
self.verticalLayout.addWidget(self.pushButton_2)
self.pushButton_3 = QtWidgets.QPushButton(self.layoutWidget)
self.pushButton_3.setObjectName("pushButton_3")
self.verticalLayout.addWidget(self.pushButton_3)
self.pushButton_4 = QtWidgets.QPushButton(self.layoutWidget)
self.pushButton_4.setObjectName("pushButton_4")
self.verticalLayout.addWidget(self.pushButton_4)
self.lineEdit = QtWidgets.QLineEdit(self.layoutWidget)
self.lineEdit.setObjectName("lineEdit")
self.verticalLayout.addWidget(self.lineEdit)
self.pushButton_5 = QtWidgets.QPushButton(self.layoutWidget)
self.pushButton_5.setObjectName("pushButton_5")
self.verticalLayout.addWidget(self.pushButton_5)
self.pushButton = QtWidgets.QPushButton(self.layoutWidget)
self.pushButton.setObjectName("pushButton")
self.verticalLayout.addWidget(self.pushButton)
self.retranslateUi(Dialog)
self.pushButton.clicked.connect(Dialog.btnClose)
self.pushButton_2.clicked.connect(Dialog.btnInsert)
self.pushButton_3.clicked.connect(Dialog.btnDelete)
self.pushButton_4.clicked.connect(Dialog.btnUpdate)
self.pushButton_5.clicked.connect(Dialog.btnQuery)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", "Dialog"))
self.label.setText(_translate("Dialog", "用户管理"))
self.pushButton_2.setText(_translate("Dialog", "增加"))
self.pushButton_3.setText(_translate("Dialog", "删除"))
self.pushButton_4.setText(_translate("Dialog", "修改"))
self.pushButton_5.setText(_translate("Dialog", "查询"))
self.pushButton.setText(_translate("Dialog", "关闭"))
| [
"[email protected]"
] | |
902b8d163053965b0fd5ccb0bccc4093f6735a82 | 0adf94fc39a02018165b62e93dd83edddd041230 | /.history/Jobs/views_20190225164613.py | 81e0cf17f2f0704d47a0e7fa8441b3d22cbb48ad | [] | no_license | SabitDeepto/BrJobs | 1e3baa143331cf46b9c70911c6644d1efd4fffd6 | 1a458c8c667f8093a2325d963e5542655467c7aa | refs/heads/master | 2020-04-24T08:02:26.350007 | 2019-03-17T05:53:30 | 2019-03-17T05:53:30 | 171,818,024 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,304 | py | from django.contrib.auth.forms import UserCreationForm
from django.shortcuts import redirect, render
from django.urls import reverse_lazy
from django.views import generic
from .forms import UserForm, ProfileForm
from django.contrib import messages
from django.db.models import Q
from django.shortcuts import get_object_or_404, render, render_to_response
from .forms import JobPostForm
from .models import JobPost
def home(request):
post = JobPost.objects.all()
return render(request, 'basic/index.html', {'post': post})
def single_post(request, post_id):
post = JobPost.objects.get(pk=post_id)
return render(request, 'basic/detail.html', {'post': post})
def jobpost(request):
form = JobPostForm(request.POST, request.FILES)
if form.is_valid():
form.save()
return redirect('home')
return render(request, 'basic/client-job.html', {'form': form})
def update_profile(request):
if request.method == 'POST':
user_form = UserForm(request.POST, instance=request.user)
profile_form = ProfileForm(request.POST, instance=request.user.profile)
if user_form.is_valid() and profile_form.is_valid():
user_form.save()
profile_form.save()
messages.success(request, ('Your profile was successfully updated!'))
# return redirect('settings:profile')
else:
messages.error(request, ('Please correct the error below.'))
else:
user_form = UserForm(instance=request.user)
profile_form = ProfileForm(instance=request.user.profile)
return render(request, 'basic/test.html', {
'user_form': user_form,
'profile_form': profile_form
})
def searchposts(request):
if request.method == 'GET':
query = request.GET.get('q')
submitbutton = request.GET.get('submit')
if query is not None:
lookups = Q(title__icontains=query) | Q(detail__icontains=query)
results = Blog.objects.filter(lookups).distinct()
context = {'results': results,
'submitbutton': submitbutton}
return render(request, 'blog/blog_view.html', context)
else:
return render(request, 'blog/blog_view.html')
else:
return render(request, 'blog/blog_view.html')
| [
"[email protected]"
] | |
612247c1e53605ffa741a2fd8c545e5aee1047b8 | 1c2a9ce62301d5342113f2fdea8faefe807877c3 | /weekly/models.py | 95cda273c45b342928bebd15c878c21b9bdd4218 | [] | no_license | Jillelanglas/weekly | 782c03595118bb110c6d4ef3cda182d4b750ce30 | b4b5bd373b7b9a07198c1354ea2f9a7854ffa75b | refs/heads/master | 2021-01-15T23:07:08.495235 | 2013-10-05T18:01:51 | 2013-10-05T18:01:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,524 | py | from weekly import db
import cryptacular.bcrypt
import datetime
import mongoengine
from flask import url_for
from misaka import Markdown, HtmlRenderer
rndr = HtmlRenderer()
md = Markdown(rndr)
crypt = cryptacular.bcrypt.BCRYPTPasswordManager()
class User(db.Document):
_password = db.StringField(max_length=1023, required=True)
username = db.StringField(max_length=32, min_length=3, unique=True)
name = db.StringField(max_length=32, min_length=3, unique=True)
team = db.ReferenceField('Team')
major = db.ReferenceField('Major')
email = db.StringField(required=True)
admin = db.BooleanField(default=False)
active = db.BooleanField(default=False)
_type = db.IntField(min_value=0, max_value=3)
@property
def type(self):
if self._type == 0:
return 'Volunteer'
elif self._type == 1:
return 'Senior'
elif self._type == 2:
return 'Alumni'
else:
return 'Other'
@property
def password(self):
return self._password
@password.setter
def password(self, val):
self._password = unicode(crypt.encode(val))
def check_password(self, password):
return crypt.check(self._password, password)
def is_authenticated(self):
return True
def is_active(self):
return self.active
def is_anonymous(self):
return False
def get_id(self):
return unicode(self.id)
def __repr__(self):
return '<User %r>' % (self.nickname)
class Comment(db.EmbeddedDocument):
body = db.StringField(min_length=10)
user = db.ReferenceField(User, required=True)
time = db.DateTimeField()
@property
def md_body(self):
return md.render(self.body)
class Post(db.Document):
id = db.ObjectIdField()
body = db.StringField(min_length=10)
timestamp = db.DateTimeField(default=datetime.datetime.now())
year = db.IntField(required=True)
week = db.IntField(required=True)
user = db.ReferenceField(User, required=True)
comments = db.ListField(db.EmbeddedDocumentField(Comment))
@property
def md_body(self):
return md.render(self.body)
@classmethod
def next_week(self, week=None, year=None):
now = datetime.datetime.now().isocalendar()
if not week:
week = now[1] - 1
if not year:
year = now[0]
if week == 52:
year += 1
week = 0
else:
week += 1
return url_for('index', week=week, year=year)
@classmethod
def prev_week(self, week=None, year=None):
now = datetime.datetime.now().isocalendar()
if not week:
week = now[1] - 1
if not year:
year = now[0]
if week == 0:
year -= 1
week = 52
else:
week -= 1
return url_for('index', week=week, year=year)
def add_comment(self, user, body):
comment = Comment(user=user,
body=body,
time=datetime.datetime.now())
self.comments.append(comment)
self.save()
class Team(db.Document):
id = db.ObjectIdField()
text = db.StringField()
def __str__(self):
return self.text
def users(self):
return User.objects(team=self, _type=1)
class Major(db.Document):
key = db.StringField(max_length=5, primary_key=True)
text = db.StringField()
def __str__(self):
return self.text
| [
"[email protected]"
] | |
91844c1ed6cc7e36ae4119c9586f5fb82f28822b | e204cdd8a38a247aeac3d07f6cce6822472bdcc5 | /.history/app_test_django/models_20201116133107.py | 2523c8d06c874130fa411ddfea0a2aa8bcbbfe7e | [] | no_license | steven-halla/python-test | 388ad8386662ad5ce5c1a0976d9f054499dc741b | 0b760a47d154078002c0272ed1204a94721c802a | refs/heads/master | 2023-04-08T03:40:00.453977 | 2021-04-09T19:12:29 | 2021-04-09T19:12:29 | 354,122,365 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,940 | py | from django.db import models
import re
class UserManager(models.Manager):
def user_registration_validator(self, post_data):
errors = {}
EMAIL_REGEX = re.compile(
r'^[a-zA-Z0-9.+_-]+@[a-zA-Z0-9._-]+\.[a-zA-Z]+$')
if len(post_data['first_name']) < 3:
errors['first_name'] = "First name must be 3 characters"
if post_data['first_name'].isalpha() == False:
errors['first_name'] = "letters only"
if len(post_data['last_name']) < 3:
errors['last_name'] = "Last name must be 3 characters"
if post_data['last_name'].isalpha() == False:
errors['last_name'] = "letters only"
if len(post_data['email']) < 8:
errors['email'] = "Email must contain 8 characters"
#if post_data['email'].Books.objects.filter(title=post_data) == True:
# errors['email'] ="this email already exist in database"
if post_data['email'].find("@") == -1:
errors['email'] = "email must contain @ and .com"
if post_data['email'].find(".com") == -1:
errors['email'] = "email must contain @ and .com"
# test whether a field matches the pattern
if not EMAIL_REGEX.match(post_data['email']):
errors['email'] = "Invalid email address!"
if post_data['password'] != post_data['confirm_password']:
errors['pass_match'] = "password must match confirm password"
if len(post_data['password']) < 8:
errors['pass_length'] = "password must be longer than 8 characters"
return errors
class User(models.Model):
first_name = models.CharField(max_length=20)
last_name = models.CharField(max_length=20)
email = models.CharField(max_length=20)
password = models.CharField(max_length=20)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
objects = UserManager()
class TripManager(models.Manager):
def add_trip_validator(self, post_data):
errors = {}
if len(post_data['destination']) < 2:
errors['title'] = "destination name must be 2 characters"
if len(post_data['startdate']) < 1:
errors['title'] = "start date needs input"
if len(post_data['enddate']) < 1:
errors['desc'] = "end date needs input"
if len(post_data['plan']) < 5:
errors['desc'] = "plan must be 5 characters"
return errors
class Trip(models.Model):
destination = models.CharField(max_length=20)
startdate = models.DateTimeField()
enddate = models.DateTimeField()
plan = models.CharField(max_length=30)
uploaded_by = models.ForeignKey(User, related_name="trip_uploaded", on_delete=models.CASCADE)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
objects=TripManager()
| [
"[email protected]"
] | |
c462bafef5399e8f9cd37b8a37573720063ab2c2 | 306d2a92fb331aec6ddf0794b538d6e3385a0df9 | /app/api/account/urls.py | 21f884031d1962d2ca3574afe6cc2097735a669d | [] | no_license | Zarinabonu/ForceApp | f343d3a52aee08890230c5425c9e238df99c5a7f | 13f8e8613999c4850fc6f0bfcec66f897eecbe4a | refs/heads/master | 2020-12-10T08:00:25.072289 | 2020-01-20T13:14:07 | 2020-01-20T13:14:07 | 233,540,795 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 279 | py | from rest_framework.serializers import ModelSerializer
from app.model import Account
class AccountSerializer(ModelSerializer):
class Meta:
model = Account
fields = ('id',
'f_name',
'l_name',
'm_name',)
| [
"[email protected]"
] | |
19e9eb6c0f0128d8724b3f15dc2aeca49e1f211b | 2d921bb03eade0763ddb3a9cc5cb637730ecbde1 | /bdt/misassign_masses.py | 21339aff913311d7f6730d9ba3d5c46fd49fded9 | [] | no_license | rmanzoni/WTau3Mu | 10c57971b80f9769578284abd69009008901eea7 | 5ad336df976d5a1b39e4b516641661921b06ba20 | refs/heads/92X | 2021-01-18T15:10:41.887147 | 2019-05-09T12:48:00 | 2019-05-09T12:48:00 | 84,342,825 | 0 | 7 | null | 2018-07-19T09:08:19 | 2017-03-08T16:35:42 | Python | UTF-8 | Python | false | false | 4,883 | py | import ROOT
import root_pandas
import numpy as np
import pandas
import root_numpy
global m_k
global m_pi
m_k = 0.493677
m_pi = 0.13957061
# tree = ROOT.TChain('tree')
# tree.Add('/Users/manzoni/Documents/tau3mu2018/16april/ntuples/data_enriched_16apr2018v16.root')
print 'loading dataset...'
dataset = pandas.DataFrame(root_numpy.root2array(
'/Users/manzoni/Documents/tau3mu2018/16april/ntuples/data_enriched_16apr2018v16.root',
'tree',
# start=0,
# stop=100000,
)
)
print '\t...done'
mpp12_array = []
mpp13_array = []
mpp23_array = []
mkk12_array = []
mkk13_array = []
mkk23_array = []
mkp12_array = []
mkp13_array = []
mkp23_array = []
mpk12_array = []
mpk13_array = []
mpk23_array = []
mppp_array = []
mppk_array = []
mpkp_array = []
mkpp_array = []
mpkk_array = []
mkpk_array = []
mkkp_array = []
mkkk_array = []
# for i, ev in enumerate(tree):
for i in range(len(dataset)):
if i%10000 == 0:
print '========> processed %d/%d \tevents\t%.1f' %(i, len(dataset), float(i)/len(dataset))
# for i in range(10):
# k1p4 = ROOT.Math.LorentzVector('ROOT::Math::PtEtaPhiM4D<double>')(ev.mu1_pt, ev.mu1_eta, ev.mu1_phi, m_k )
# k2p4 = ROOT.Math.LorentzVector('ROOT::Math::PtEtaPhiM4D<double>')(ev.mu2_pt, ev.mu2_eta, ev.mu2_phi, m_k )
# k3p4 = ROOT.Math.LorentzVector('ROOT::Math::PtEtaPhiM4D<double>')(ev.mu3_pt, ev.mu3_eta, ev.mu3_phi, m_k )
#
# pi1p4 = ROOT.Math.LorentzVector('ROOT::Math::PtEtaPhiM4D<double>')(ev.mu1_pt, ev.mu1_eta, ev.mu1_phi, m_pi)
# pi2p4 = ROOT.Math.LorentzVector('ROOT::Math::PtEtaPhiM4D<double>')(ev.mu2_pt, ev.mu2_eta, ev.mu2_phi, m_pi)
# pi3p4 = ROOT.Math.LorentzVector('ROOT::Math::PtEtaPhiM4D<double>')(ev.mu3_pt, ev.mu3_eta, ev.mu3_phi, m_pi)
k1p4 = ROOT.Math.LorentzVector('ROOT::Math::PtEtaPhiM4D<double>')(dataset.mu1_refit_pt[i], dataset.mu1_refit_eta[i], dataset.mu1_refit_phi[i], m_k )
k2p4 = ROOT.Math.LorentzVector('ROOT::Math::PtEtaPhiM4D<double>')(dataset.mu2_refit_pt[i], dataset.mu2_refit_eta[i], dataset.mu2_refit_phi[i], m_k )
k3p4 = ROOT.Math.LorentzVector('ROOT::Math::PtEtaPhiM4D<double>')(dataset.mu3_refit_pt[i], dataset.mu3_refit_eta[i], dataset.mu3_refit_phi[i], m_k )
pi1p4 = ROOT.Math.LorentzVector('ROOT::Math::PtEtaPhiM4D<double>')(dataset.mu1_refit_pt[i], dataset.mu1_refit_eta[i], dataset.mu1_refit_phi[i], m_pi)
pi2p4 = ROOT.Math.LorentzVector('ROOT::Math::PtEtaPhiM4D<double>')(dataset.mu2_refit_pt[i], dataset.mu2_refit_eta[i], dataset.mu2_refit_phi[i], m_pi)
pi3p4 = ROOT.Math.LorentzVector('ROOT::Math::PtEtaPhiM4D<double>')(dataset.mu3_refit_pt[i], dataset.mu3_refit_eta[i], dataset.mu3_refit_phi[i], m_pi)
mpp12 = (pi1p4 + pi2p4).mass()
mpp13 = (pi1p4 + pi3p4).mass()
mpp23 = (pi2p4 + pi3p4).mass()
mkk12 = (k1p4 + k2p4).mass()
mkk13 = (k1p4 + k3p4).mass()
mkk23 = (k2p4 + k3p4).mass()
mkp12 = (k1p4 + pi2p4).mass()
mkp13 = (k1p4 + pi3p4).mass()
mkp23 = (k2p4 + pi3p4).mass()
mpk12 = (pi1p4 + k2p4).mass()
mpk13 = (pi1p4 + k3p4).mass()
mpk23 = (pi2p4 + k3p4).mass()
mppp = (pi1p4 + pi2p4 + pi3p4).mass()
mppk = (pi1p4 + pi2p4 + k3p4 ).mass()
mpkp = (pi1p4 + k2p4 + pi3p4).mass()
mkpp = (k1p4 + pi2p4 + pi3p4).mass()
mpkk = (pi1p4 + k2p4 + k3p4 ).mass()
mkpk = (k1p4 + pi2p4 + k3p4 ).mass()
mkkp = (k1p4 + k2p4 + pi3p4).mass()
mkkk = (k1p4 + k2p4 + k3p4 ).mass()
mpp12_array.append(mpp12)
mpp13_array.append(mpp13)
mpp23_array.append(mpp23)
mkk12_array.append(mkk12)
mkk13_array.append(mkk13)
mkk23_array.append(mkk23)
mkp12_array.append(mkp12)
mkp13_array.append(mkp13)
mkp23_array.append(mkp23)
mpk12_array.append(mpk12)
mpk13_array.append(mpk13)
mpk23_array.append(mpk23)
mppp_array .append(mppp )
mppk_array .append(mppk )
mpkp_array .append(mpkp )
mkpp_array .append(mkpp )
mpkk_array .append(mpkk )
mkpk_array .append(mkpk )
mkkp_array .append(mkkp )
mkkk_array .append(mkkk )
dataset['mpp12'] = mpp12_array
dataset['mpp13'] = mpp13_array
dataset['mpp23'] = mpp23_array
dataset['mkk12'] = mkk12_array
dataset['mkk13'] = mkk13_array
dataset['mkk23'] = mkk23_array
dataset['mkp12'] = mkp12_array
dataset['mkp13'] = mkp13_array
dataset['mkp23'] = mkp23_array
dataset['mpk12'] = mpk12_array
dataset['mpk13'] = mpk13_array
dataset['mpk23'] = mpk23_array
dataset['mppp'] = mppp_array
dataset['mppk'] = mppk_array
dataset['mpkp'] = mpkp_array
dataset['mkpp'] = mkpp_array
dataset['mpkk'] = mpkk_array
dataset['mkpk'] = mkpk_array
dataset['mkkp'] = mkkp_array
dataset['mkkk'] = mkkk_array
print 'staging dataset...'
dataset.to_root(
'/Users/manzoni/Documents/tau3mu2018/16april/ntuples/data_enriched_16apr2018v16_extra_masses.root',
key='tree',
store_index=False
)
print '\t...done'
| [
"[email protected]"
] | |
727fc97005633da5105c31d875de048d679cb327 | 17268419060d62dabb6e9b9ca70742f0a5ba1494 | /pp/samples/191_mirror_h.py | 5d5f8caa93016a9121b917401e02a52f9b2ade76 | [
"MIT"
] | permissive | TrendingTechnology/gdsfactory | a19124423b12cbbb4f35b61f33303e9a012f82e5 | c968558dba1bae7a0421bdf49dc192068147b776 | refs/heads/master | 2023-02-22T03:05:16.412440 | 2021-01-24T03:38:00 | 2021-01-24T03:38:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 188 | py | if __name__ == "__main__":
import pp
c = pp.Component()
m1 = c << pp.c.mmi1x2()
m2 = c << pp.c.mmi1x2()
m2.reflect_h(port_name="E1")
m2.movex(10)
pp.show(c)
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.