filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_7641 | import argparse
import os.path
import re
def Convert(fh):
for line in fh.readlines():
#CHROM POS ID REF ALT QUAL FILTER INFO FORMAT FHS-CRS-4744.chr20.recaled
chrom,pos,locus_id,ref,alt,qual,fil,info,formt,sample = line.strip().split('\t')
pos = int(pos)
# 1 46366107 . T <DEL> . PASS SVLEN=-329;SVTYPE=DEL;END=46366436 GT:ABC:PE:REFCOUNTS 0/1:0,0,6:6:17,8
svtype = alt.split(":")[0].lstrip("<").rstrip(">")
end = int(info.split("END=")[1].split(";")[0])
size = abs(int(info.split("SVLEN=")[1].split(";")[0]))
if(fil == "PASS"):
if svtype == "DEL":
if size >= 100:
fout.write("%s\t.\t%d\t.\t.\t%d\t.\tDEL\t%d\t%s\n" % (chrom, min(pos, end),max(pos, end), abs(size), info))
if svtype == "DUP":
if size >= 100:
fout.write("%s\t.\t%d\t.\t.\t%d\t.\tINS\t%d\t%s\n" % (chrom, min(pos, end),max(pos, end), abs(size), info))
if svtype == "INV":
if size >= 100:
fout.write("%s\t.\t%d\t.\t.\t%d\t.\tINV\t%d\t%s\n" % (chrom, min(pos, end),max(pos, end), abs(size), info))
def pullHeader(fh):
"""
Pulls the header from vcf file
"""
while True:
line = fh.readline()
if line.startswith("##INFO=<ID=IMPRECISE"):
continue
if line.startswith("##INFO=<ID=PRECISE"):
continue
if line.startswith("##INFO="):
#sys.stdout.write(line)
fout.write(line)
if line.startswith("##FOR"):
#sys.stdout.write(line.replace("FORMAT","INFO"))
fout.write(line.replace("FORMAT","INFO"))
if line.startswith("#CH"):
return
if line is None:
sys.stderr.write("ERROR! No read good.\n")
exit(10)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("version", type=str, help="Breakseq2 Version")
parser.add_argument("input", type=str, help="Breakseq2 final vcf file. Concatenate all the vcf output files into a single vcf")
parser.add_argument("source", type=str, help="unique identifier. Fullpath to bam or samplename")
args = parser.parse_args()
samplename = os.path.splitext(os.path.basename(args.source))[0]
svpfile = samplename + "_BS.svp"
with open(args.input, 'r') as fh, open(svpfile, 'w') as fout:
fout.write("##program=" + args.version+"\n")
fout.write("##abbrev=BS"+"\n")
fout.write("##source=" + args.source+"\n")
pullHeader(fh)
mergeHeader1 = "#CHROM\tOUTERSTART\tSTART\tINNERSTART\tINNEREND\tEND\tOUTEREND\tTYPE\tSIZE\tINFO"
fout.write(mergeHeader1+"\n")
Convert(fh)
|
the-stack_0_7642 | from flask import Flask, request
from flask_restful import Resource, Api
from flask_cors import CORS
import os
import json
import pandas as pd
import datetime
import time
from filelock import Timeout, FileLock
app = Flask(__name__)
api = Api(app)
CORS(app)
with open("server_config.json") as f:
config = json.load(f)
lock_assigned_sequences = FileLock(config["assignedSequencesFile"] + ".lock")
lock_data = FileLock(config["dataFile"] + ".lock")
lock_data_sandbox = FileLock(config["dataSandboxFile"] + ".lock")
lock_when_to_stop = FileLock(config["dashboardFile"] + ".lock")
lock_submission_file = FileLock(config["submitFile"] + ".lock")
class InitializePreview(Resource):
def initialize_vars(self):
self.trial_feedback = request.args.get("trialFeedback")
self.timestamp = datetime.datetime.now()
def get_sequence_info(self, feedback):
assigned_file = config["previewSequenceFile"]
with open(assigned_file) as f:
sequence_info = json.load(f)
index_to_run = 0
run_info = {"index_to_run": index_to_run,
"sequenceFile": assigned_file,
"images": sequence_info["sequences"][index_to_run],
"blocked": int(False),
"finished": int(False),
"timestamp": self.timestamp.__str__()}
# only send correct answers along if you will be giving trial feedback (less risk of tech savvy workers using
# it to get perfect scores
if feedback:
run_info["conditions"] = sequence_info["types"][run_info["index_to_run"]]
return run_info
def get(self):
self.initialize_vars()
return_dict = self.get_sequence_info(self.trial_feedback)
return_dict["running"] = False
return return_dict
class InitializeRun(Resource):
def initialize_vars(self):
self.workerId = request.args.get("workerId")
self.medium = request.args.get("medium")
self.trial_feedback = request.args.get("trialFeedback")
self.assigned_sequences_df = pd.read_csv(config["assignedSequencesFile"], delimiter=",")
self.assigned_sequences_df = self.assigned_sequences_df.set_index("workerId", drop=False)
self.timestamp = datetime.datetime.now()
def available_sequences(self):
sequence_files = os.listdir(os.path.join(config["sequenceDir"]))
assigned_files = self.assigned_sequences_df["sequenceFile"].values.tolist()
assigned_files = [os.path.basename(x) for x in assigned_files]
available_files = [x for x in sequence_files if x not in assigned_files and x.endswith(".json")]
available_files = [x for x in available_files if not os.path.samefile(os.path.join(config["sequenceDir"],x), config["previewSequenceFile"])]
available_files = sorted(available_files)
return available_files
def assign_new_sequence(self, workerId):
if workerId in self.assigned_sequences_df["workerId"].values:
raise Exception('cannot assign new sequence, workerId already has one')
else:
available_files = self.available_sequences()
assigned_file = os.path.join(config["sequenceDir"], available_files[0])
new_row = {"workerId": workerId,
"sequenceFile": assigned_file,
"indexToRun": int(0),
"blocked": False,
"finished": False,
"timestamp": self.timestamp.__str__(),
"version": config["version"]}
self.assigned_sequences_df = self.assigned_sequences_df.append(pd.DataFrame(new_row, index=[0]),
ignore_index=True)
self.assigned_sequences_df = self.assigned_sequences_df.set_index("workerId", drop=False)
def already_running(self, workerId, timestamp, new_worker):
if new_worker:
return False
else:
# if previous initialization was less than 5 minutes ago, session is probably still active
previous_timestamp = self.assigned_sequences_df.loc[workerId, "timestamp"]
previous_timestamp = datetime.datetime.strptime(previous_timestamp.__str__(), "%Y-%m-%d %H:%M:%S.%f")
return (timestamp - previous_timestamp) < datetime.timedelta(minutes=4)
def get_sequence_info(self, workerId, feedback):
assigned_file = self.assigned_sequences_df.loc[workerId, "sequenceFile"]
with open(assigned_file) as f:
sequence_info = json.load(f)
index_to_run = int(self.assigned_sequences_df.loc[workerId, "indexToRun"])
run_info = {"index_to_run": index_to_run,
"sequenceFile": str(self.assigned_sequences_df.loc[workerId, "sequenceFile"]),
"images": sequence_info["sequences"][index_to_run],
"blocked": int(self.assigned_sequences_df.loc[workerId, "blocked"]),
"finished": int(self.assigned_sequences_df.loc[workerId, "finished"]),
"maintenance": config["maintenance"],
"timestamp": self.timestamp.__str__()}
# only send correct answers along if you will be giving trial feedback (less risk of tech savvy workers using
# it to get perfect scores
if feedback:
run_info["conditions"] = sequence_info["types"][run_info["index_to_run"]]
return run_info
def update_df(self, run_info):
if not (run_info["running"] or run_info["finished"] or run_info["blocked"] or run_info["maintenance"]):
if run_info["index_to_run"] + 1 >= config["maxNumRuns"]:
self.assigned_sequences_df.at[self.workerId, "finished"] = True
else:
self.assigned_sequences_df.at[self.workerId, "indexToRun"] = run_info["index_to_run"] + 1
self.assigned_sequences_df.at[self.workerId, "timestamp"] = self.timestamp.__str__()
self.assigned_sequences_df.to_csv(config["assignedSequencesFile"], index=False)
def get(self):
with lock_assigned_sequences:
self.initialize_vars()
# assign sequence file if worker is new
if self.workerId not in self.assigned_sequences_df["workerId"].values:
new_worker = True
self.assign_new_sequence(self.workerId)
else:
new_worker = False
# get assigned sequence info
return_dict = self.get_sequence_info(self.workerId, self.trial_feedback)
# check if another run might be active
return_dict["running"] = self.already_running(self.workerId, self.timestamp, new_worker)
# update the database
self.update_df(return_dict)
return return_dict
class FinalizeRun(Resource):
def initialize_vars(self):
start = time.time()
self.data_received = request.get_json()
self.medium = self.data_received["medium"]
self.sequence_info = self.get_sequence_info(self.data_received["sequenceFile"])
self.return_dict = \
{"blocked": False, # initializing, will be set to True if blocked,
"finished": self.data_received["indexToRun"] + 1 >= config["maxNumRuns"],
"maintenance": config["maintenance"]}
end = time.time()
print("initialized vars, took ", end - start, " seconds")
def get_sequence_info(self, sequence_file):
with open(sequence_file) as f:
sequence_info = json.load(f)
return sequence_info
def update_data_file(self):
start = time.time()
data_received = self.data_received
sequence_info = self.sequence_info
run_index = data_received["indexToRun"]
num_trials = data_received["numTrials"]
meta_data = {
"medium": data_received["medium"],
"sequenceFile": data_received["sequenceFile"],
"workerId": data_received["workerId"],
"assignmentId": data_received["assignmentId"],
"timestamp": data_received["timestamp"],
"runIndex": run_index,
"initTime": data_received["initTime"],
"finishTime": data_received["finishTime"]
}
# Setting data file and lock
if self.medium == "mturk_sandbox":
data_file = config["dataSandboxFile"]
lock = lock_data_sandbox
else:
data_file = config["dataFile"]
lock = lock_data
print(lock)
with lock:
data_all = pd.read_csv(data_file)
# Trial data
data = {
"response": [1 if i in data_received["responseIndices"] else 0 for i in range(num_trials)],
"trialIndex": list(range(data_received["numTrials"])),
"condition": sequence_info["types"][run_index][0:num_trials],
"image": sequence_info["sequences"][run_index][0:num_trials]
}
df = pd.DataFrame.from_dict(data, orient='index').transpose()
df = pd.concat([df, pd.DataFrame([meta_data] * num_trials)], axis=1)
data_all = data_all.append(df, ignore_index=True)
data_all.to_csv(data_file, index=False)
end = time.time()
print("updated data df, took ", end - start, " seconds")
def compute_scores(self):
start = time.time()
data_received = self.data_received
sequence_info = self.sequence_info
run_index = data_received["indexToRun"]
num_trials = data_received["numTrials"]
repeat_indices = []
for i in range(num_trials):
if sequence_info["types"][run_index][i] in config["conditionLabels"]["repeatTrials"]:
repeat_indices.append(i)
no_repeat_indices = []
for i in range(num_trials):
if sequence_info["types"][run_index][i] in config["conditionLabels"]["noRepeatTrials"]:
no_repeat_indices.append(i)
hits = set(repeat_indices) & set(data_received["responseIndices"])
false_alarms = set(no_repeat_indices) & set(data_received["responseIndices"])
end = time.time()
print("computed scores, took ", end - start, " seconds")
return {"hit_rate": float(len(hits)) / len(repeat_indices) if len(repeat_indices) > 0 else -1,
"false_alarm_num": len(false_alarms)}
def evaluate_vigilance(self, vig_hr_criterion, far_criterion):
start = time.time()
# initializing
data_received = self.data_received
sequence_info = self.sequence_info
run_index = data_received["indexToRun"]
num_trials = data_received["numTrials"]
passing_criteria = True
vig_repeat_indices = [i for i in range(num_trials) if sequence_info["types"][run_index][i] == "vig repeat"]
no_repeat_indices = [i for i in range(num_trials) if sequence_info["types"][run_index][i] in ["filler",
"target",
"vig"]]
if len(vig_repeat_indices) > 0:
vig_hits = set(vig_repeat_indices) & set(data_received["responseIndices"])
vig_hit_rate = float(len(vig_hits)) / len(vig_repeat_indices)
if vig_hit_rate < vig_hr_criterion:
passing_criteria = False
false_alarms = set(no_repeat_indices) & set(data_received["responseIndices"])
false_alarm_rate = float(len(false_alarms))/len(no_repeat_indices)
if false_alarm_rate >= far_criterion:
passing_criteria = False
end = time.time()
print("evaluated vigilance, took ", end - start, " seconds")
return "pass" if passing_criteria else "fail"
def block_worker(self, workerId):
start = time.time()
print("blocking")
self.return_dict["blocked"] = True
with lock_assigned_sequences:
assigned_sequences_df = pd.read_csv(config["assignedSequencesFile"], delimiter=",")
assigned_sequences_df = assigned_sequences_df.set_index("workerId", drop=False)
assigned_sequences_df.at[workerId, "blocked"] = True
assigned_sequences_df.to_csv(config["assignedSequencesFile"], index=False)
end = time.time()
print("blocked worker, took ", end - start, " seconds")
def update_dashboard(self, valid):
start = time.time()
with lock_when_to_stop:
with open(config["dashboardFile"]) as f:
dashboard = json.load(f)
dashboard["numBlocksTotalSoFar"] += 1
dashboard["numValidBlocksSoFar"] += valid
with open(config["dashboardFile"], 'w') as fp:
json.dump(dashboard, fp)
end = time.time()
print("updated when to stop, took ", end - start, " seconds")
def post(self):
self.initialize_vars()
if not self.data_received['preview']:
self.update_data_file()
valid = 0
# Check vigilance performance and block if necessary
if self.data_received['workerId'] not in config["whitelistWorkerIds"]:
if self.evaluate_vigilance(config["blockingCriteria"]["vigHrCriterion"],
config["blockingCriteria"]["farCriterion"]) == "fail":
self.block_worker(self.data_received["workerId"])
else:
valid = 1
else:
valid = 1
self.update_dashboard(valid)
# Add scores to return_dict
self.return_dict.update(self.compute_scores())
return self.return_dict
class SubmitRuns(Resource):
def initialize_vars(self):
self.data_received = request.get_json()
def update_submissions(self):
data = self.data_received
with lock_submission_file:
submitted_runs_df = pd.read_csv(config["submitFile"])
submitted_runs_df = submitted_runs_df.append(data, ignore_index=True)
submitted_runs_df.to_csv(config["submitFile"], index=False)
def post(self):
self.initialize_vars()
self.update_submissions()
return ("submission successful")
api.add_resource(InitializePreview, '/initializepreview')
api.add_resource(InitializeRun, '/initializerun')
api.add_resource(FinalizeRun, '/finalizerun')
api.add_resource(SubmitRuns, '/submitruns')
if __name__ == '__main__':
app.run(host='0.0.0.0', port=config["port"], debug=True)
|
the-stack_0_7644 | from distutils.version import LooseVersion
import pytest
import torch
from mmcls.models.utils import channel_shuffle, is_tracing, make_divisible
def test_make_divisible():
# test min_value is None
result = make_divisible(34, 8, None)
assert result == 32
# test when new_value > min_ratio * value
result = make_divisible(10, 8, min_ratio=0.9)
assert result == 16
# test min_value = 0.8
result = make_divisible(33, 8, min_ratio=0.8)
assert result == 32
def test_channel_shuffle():
x = torch.randn(1, 24, 56, 56)
with pytest.raises(AssertionError):
# num_channels should be divisible by groups
channel_shuffle(x, 7)
groups = 3
batch_size, num_channels, height, width = x.size()
channels_per_group = num_channels // groups
out = channel_shuffle(x, groups)
# test the output value when groups = 3
for b in range(batch_size):
for c in range(num_channels):
c_out = c % channels_per_group * groups + c // channels_per_group
for i in range(height):
for j in range(width):
assert x[b, c, i, j] == out[b, c_out, i, j]
@pytest.mark.skipif(
LooseVersion(torch.__version__) < LooseVersion('1.6.0'),
reason='torch.jit.is_tracing is not available before 1.6.0')
def test_is_tracing():
def foo(x):
if is_tracing():
return x
else:
return x.tolist()
x = torch.rand(3)
# test without trace
assert isinstance(foo(x), list)
# test with trace
traced_foo = torch.jit.trace(foo, (torch.rand(1), ))
assert isinstance(traced_foo(x), torch.Tensor)
|
the-stack_0_7645 | # Copyright 2017 Braxton Mckee
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import test_looper.core.algebraic as Algebraic
from test_looper.core.hash import sha_hash
import unittest
expr = Algebraic.Alternative("Expr")
expr.Constant = {'value': int}
expr.Add = {'l': expr, 'r': expr}
expr.Sub = {'l': expr, 'r': expr}
expr.Mul = {'l': expr, 'r': expr}
class AlgebraicTests(unittest.TestCase):
def test_basic(self):
X = Algebraic.Alternative('X', A = {}, B = {})
xa = X.A()
xb = X.B()
self.assertTrue(xa.matches.A)
self.assertFalse(xa.matches.B)
self.assertTrue(xb.matches.B)
self.assertFalse(xb.matches.A)
def test_stable_sha_hashing(self):
#adding default values to a type shouldn't disrupt its hashes
leaf = Algebraic.Alternative("Leaf")
leaf.A = {'a': int}
leaf.B = {'b': int}
leaf.setCreateDefault(lambda: leaf.A(0))
not_leaf = Algebraic.Alternative("NotLeaf")
not_leaf.A = {'z': float, 'leaf': leaf}
not_leaf2 = Algebraic.Alternative("NotLeaf")
not_leaf2.A = {'z': float, 'leaf': leaf, 'int': int}
a_simple_notleaf = not_leaf.A(z=10.0,_fill_in_missing=True)
a_simple_notleaf2 = not_leaf2.A(z=10.0,_fill_in_missing=True)
a_different_notleaf = not_leaf.A(z=10.0, leaf=leaf.B(b=10),_fill_in_missing=True)
a_different_notleaf2 = not_leaf2.A(z=10.0, leaf=leaf.B(b=10),_fill_in_missing=True)
a_final_different_notleaf = not_leaf2.A(z=10.0, leaf=leaf.B(b=10),int=123,_fill_in_missing=True)
self.assertEqual(sha_hash(a_simple_notleaf), sha_hash(a_simple_notleaf2))
self.assertNotEqual(sha_hash(a_simple_notleaf), sha_hash(a_different_notleaf))
self.assertEqual(sha_hash(a_different_notleaf), sha_hash(a_different_notleaf2))
self.assertNotEqual(sha_hash(a_simple_notleaf), sha_hash(a_final_different_notleaf))
self.assertNotEqual(sha_hash(a_different_notleaf), sha_hash(a_final_different_notleaf))
def test_field_lookup(self):
X = Algebraic.Alternative('X', A = {'a': int}, B = {'b': float})
self.assertEqual(X.A(10).a, 10)
with self.assertRaises(AttributeError):
X.A(10).b
self.assertEqual(X.B(11.0).b, 11.0)
with self.assertRaises(AttributeError):
X.B(11.0).a
def test_lists(self):
X = Algebraic.Alternative('X')
X.A = {'val': int}
X.B = {'val': Algebraic.List(X)}
xa = X.A(10)
xb = X.B([xa, X.A(11)])
self.assertTrue(xa.matches.A)
self.assertTrue(xb.matches.B)
self.assertTrue(isinstance(xb.val, tuple))
self.assertTrue(len(xb.val) == 2)
def test_stringification(self):
self.assertEqual(
repr(expr.Add(l = expr(10), r = expr(20))),
"Expr.Add(l=Expr.Constant(value=10),r=Expr.Constant(value=20))"
)
def test_isinstance(self):
self.assertTrue(isinstance(expr(10), Algebraic.AlternativeInstance))
self.assertTrue(isinstance(expr(10), expr.Constant))
def test_coercion(self):
Sub = Algebraic.Alternative('Sub', I={}, S={})
with self.assertRaises(Exception):
Sub.I(Sub.S)
X = Algebraic.Alternative('X', A={'val': Sub})
X.A(val=Sub.S())
with self.assertRaises(Exception):
X.A(val=Sub.S)
def test_coercion_null(self):
Sub = Algebraic.Alternative('Sub', I={}, S={})
X = Algebraic.Alternative('X', I={'val': Algebraic.Nullable(Sub)})
self.assertTrue(X(Sub.I()).val.matches.Value)
def test_equality(self):
for i in range(10):
self.assertEqual(expr.Constant(i).__sha_hash__(), expr.Constant(i).__sha_hash__())
self.assertEqual(hash(expr.Constant(i)), hash(expr.Constant(i)))
self.assertEqual(expr.Constant(i), expr.Constant(i))
self.assertEqual(
expr.Add(l=expr.Constant(i),r=expr.Constant(i+1)),
expr.Add(l=expr.Constant(i),r=expr.Constant(i+1))
)
self.assertNotEqual(
expr.Add(l=expr.Constant(i),r=expr.Constant(i+1)),
expr.Add(l=expr.Constant(i),r=expr.Constant(i+2))
)
self.assertNotEqual(expr.Constant(i), expr.Constant(i+1))
def test_algebraics_in_dicts(self):
d = {}
for i in range(10):
d[expr.Constant(i)] = i
d[expr.Add(l=expr.Constant(i),r=expr.Constant(i+1))] = 2*i+1
for i in range(10):
self.assertEqual(d[expr.Constant(i)], i)
self.assertEqual(d[expr.Add(l=expr.Constant(i),r=expr.Constant(i+1))], 2*i+1)
|
the-stack_0_7649 | from discord.ext import commands
from os import getenv
from src.internal.bot import Bot
from src.internal.context import Context
from src.internal.checks import in_channel
class Trivia(commands.Cog):
"""Trivia questions."""
def __init__(self, bot: Bot):
self.bot = bot
@commands.group(name="trivia", invoke_without_command=True)
@commands.cooldown(rate=1, per=60, type=commands.BucketType.member)
@in_channel(int(getenv("CHANNEL")))
async def trivia(self, ctx: Context, unique: bool = False):
"""Get a trivia question."""
pass
@trivia.command(name="add")
@commands.check_any(commands.is_owner(), commands.has_role(337442104026595329))
async def trivia_add(self, ctx: Context, *, question: str):
"""Add a new trivia question."""
await ctx.reply("Enter valid answers, and .stop to finish.")
answers = []
while True:
try:
msg = await self.bot.wait_for("message", check=lambda m: m.channel == ctx.channel and m.author == ctx.author, timeout=15)
except:
return await ctx.reply("Selection timed out, exiting.")
answer = msg.content
if answer == ".stop":
break
else:
answers.append(answer)
answers = "`".join(answers)
await self.bot.db.create_trivia_question(ctx.author.id, question, answers)
def setup(bot: Bot):
bot.add_cog(Trivia(bot))
|
the-stack_0_7651 | import cv2
class rovio:
chase = None
detector = None
rovioConrol = None
def __init__(self, chase, detector, rovioControl):
self.chase = chase
self.detector = detector
self.rovioConrol = rovioControl
def action(self):
# ROVIO detect start here
# keep rotate right to search for Rovio
boxes = self.rovio_detector.predict(ori_frame)
if len(boxes) < 1:
self.rovio.rotate_right(angle=15, speed=1)
else:
# Get the nearest one to move to (Biggest Area)
x, y, w, h = 0, 0, 0, 0
max_box_i = 0
max_area = 0
for index, box in enumerate(boxes):
width = box.w + box.x
height = box.h + box.y
area = (box.w + box.x) * (box.h + box.y)
print(width / height)
if max_area < area and (width / height > 1.1 and width / height < 1.2):
max_area = area
max_box_i = index
x, y, w, h = boxes[max_box_i].get_position()
# get center point of the box
xmin = int((box.x - box.w / 2) * frame.shape[1])
xmax = int((box.x + box.w / 2) * frame.shape[1])
ymin = int((box.y - box.h / 2) * frame.shape[0])
ymax = int((box.y + box.h / 2) * frame.shape[0])
cv2.rectangle(ori_frame, (xmin, ymin), (xmax, ymax), (0, 255, 0), 3)
cv2.putText(ori_frame,
labels[box.get_label()] + ' ' + str(box.get_score()),
(xmin, ymin - 13),
cv2.FONT_HERSHEY_SIMPLEX,
1e-3 * frame.shape[0],
(0, 255, 0), 2)
cv2.imshow('detector', ori_frame)
# Assume x and y is the center point
if (x * frame.shape[0] >= 213 and x * frame.shape[0] <= 426):
self.rovio.forward()
elif x * frame.shape[1] > frame.shape[1] / 2:
self.rovio.rotate_right(angle=15, speed=1)
else:
self.rovio.rotate_left(angle=15, speed=1)
#####################################################
# Perform Floor floor_finder #
#####################################################
# If safe zone is more than 80 then check for infrared detection
if self.floor_finder() > 80:
pass
# if(not self.rovio.ir()):
# self.rovio.api.set_ir(1)
# if (not self.rovio.obstacle()):
# self.rovio.forward()
# self.rovio.forward()
# self.rovio.forward()
# self.rovio.forward()
# self.rovio.forward()
# self.rovio.forward()
# else:
# self.rovio.rotate_right(angle=20, speed=2)
# Rotate right is safe zone is smaller than 80 pixels
else:
pass
# self.rovio.rotate_right(angle=20, speed=2)
# If Button Pressed, onAction
# Use ASCII for decode
self.key = cv2.waitKey(20)
if self.key > 0:
# print self.key
pass
if self.key == 114: # r
self.rovio.turn_around()
elif self.key == 63233 or self.key == 115: # down or s
self.rovio.backward(speed=7)
elif self.key == 63232 or self.key == 119: # up or w
self.rovio.forward(speed=1)
elif self.key == 63234 or self.key == 113: # left or a
self.rovio.rotate_left(angle=12, speed=5)
elif self.key == 63235 or self.key == 101: # right or d
self.rovio.rotate_right(angle=12, speed=5)
elif self.key == 97: # left or a
self.rovio.left(speed=1)
elif self.key == 100: # right or d
self.rovio.right(speed=1)
elif self.key == 44: # comma
self.rovio.head_down()
elif self.key == 46: # period
self.rovio.head_middle()
elif self.key == 47: # slash
self.rovio.head_up()
elif self.key == 32: # Space Bar, pressed then perform face detection
flag = False
# self.rovio.stop()
# self.face_detection()
|
the-stack_0_7653 | """Various sources for providing generalized Beaver triples for the Pond
protocol."""
import abc
import logging
import random
import tensorflow as tf
from ...config import get_config
from ...utils import wrap_in_variables, reachable_nodes, unwrap_fetches
logger = logging.getLogger('tf_encrypted')
class TripleSource(abc.ABC):
"""Base class for triples sources."""
@abc.abstractmethod
def cache(self, a, cache_updater):
pass
@abc.abstractmethod
def initializer(self):
pass
@abc.abstractmethod
def generate_triples(self, fetches):
pass
class BaseTripleSource(TripleSource):
"""
Partial triple source adding graph nodes for constructing and keeping track
of triples and their use. Subclasses must implement `_build_queues`.
"""
def __init__(self, player0, player1, producer):
config = get_config()
self.player0 = config.get_player(player0) if player0 else None
self.player1 = config.get_player(player1) if player1 else None
self.producer = config.get_player(producer) if producer else None
def mask(self, backing_dtype, shape):
with tf.name_scope("triple-generation"):
with tf.device(self.producer.device_name):
a0 = backing_dtype.sample_uniform(shape)
a1 = backing_dtype.sample_uniform(shape)
a = a0 + a1
d0, d1 = self._build_queues(a0, a1)
return a, d0, d1
def mul_triple(self, a, b):
with tf.name_scope("triple-generation"):
with tf.device(self.producer.device_name):
ab = a * b
ab0, ab1 = self._share(ab)
return self._build_queues(ab0, ab1)
def square_triple(self, a):
with tf.name_scope("triple-generation"):
with tf.device(self.producer.device_name):
aa = a * a
aa0, aa1 = self._share(aa)
return self._build_queues(aa0, aa1)
def matmul_triple(self, a, b):
with tf.name_scope("triple-generation"):
with tf.device(self.producer.device_name):
ab = a.matmul(b)
ab0, ab1 = self._share(ab)
return self._build_queues(ab0, ab1)
def conv2d_triple(self, a, b, strides, padding):
with tf.device(self.producer.device_name):
with tf.name_scope("triple"):
ab = a.conv2d(b, strides, padding)
ab0, ab1 = self._share(ab)
return self._build_queues(ab0, ab1)
def indexer_mask(self, a, slc):
with tf.name_scope("mask-transformation"):
with tf.device(self.producer.device_name):
a_sliced = a[slc]
return a_sliced
def transpose_mask(self, a, perm):
with tf.name_scope("mask-transformation"):
with tf.device(self.producer.device_name):
a_t = a.transpose(perm=perm)
return a_t
def strided_slice_mask(self, a, args, kwargs):
with tf.name_scope("mask-transformation"):
with tf.device(self.producer.device_name):
a_slice = a.strided_slice(args, kwargs)
return a_slice
def split_mask(self, a, num_split, axis):
with tf.name_scope("mask-transformation"):
with tf.device(self.producer.device_name):
bs = a.split(num_split=num_split, axis=axis)
return bs
def stack_mask(self, bs, axis):
factory = bs[0].factory
with tf.name_scope("mask-transformation"):
with tf.device(self.producer.device_name):
b_stacked = factory.stack(bs, axis=axis)
return b_stacked
def concat_mask(self, bs, axis):
factory = bs[0].factory
with tf.name_scope("mask-transformation"):
with tf.device(self.producer.device_name):
b_stacked = factory.concat(bs, axis=axis)
return b_stacked
def reshape_mask(self, a, shape):
with tf.name_scope("mask-transformation"):
with tf.device(self.producer.device_name):
a_reshaped = a.reshape(shape)
return a_reshaped
def expand_dims_mask(self, a, axis):
with tf.name_scope("mask-transformation"):
with tf.device(self.producer.device_name):
a_e = a.expand_dims(axis=axis)
return a_e
def squeeze_mask(self, a, axis):
with tf.name_scope("mask-transformation"):
with tf.device(self.producer.device_name):
a_squeezed = a.squeeze(axis=axis)
return a_squeezed
def _share(self, secret):
with tf.name_scope("share"):
share0 = secret.factory.sample_uniform(secret.shape)
share1 = secret - share0
# randomized swap to distribute who gets the seed
if random.random() < 0.5:
share0, share1 = share1, share0
return share0, share1
@abc.abstractmethod
def _build_queues(self, c0, c1):
"""
Method used to inject buffers between mask generating and use
(ie online vs offline). `c0` and `c1` represent the generated
masks and the method is expected to return a similar pair of
of tensors.
"""
class OnlineTripleSource(BaseTripleSource):
"""
This triple source will generate triples as part of the online phase
using a dedicated third-party `producer`.
There is no need to call `generate_triples` nor `initialize`.
"""
def __init__(self, producer):
super().__init__(None, None, producer)
def cache(self, a, cache_updater):
with tf.device(self.producer.device_name):
updater, [a_cached] = wrap_in_variables(a)
return updater, a_cached
def initializer(self):
return tf.no_op()
def generate_triples(self, fetches):
return []
def _build_queues(self, c0, c1):
return c0, c1
class QueuedOnlineTripleSource(BaseTripleSource):
"""
Similar to `OnlineTripleSource` but with in-memory buffering backed by
`tf.FIFOQueue`s.
"""
def __init__(self, player0, player1, producer, capacity=10):
super().__init__(player0, player1, producer)
self.capacity = capacity
self.queues = list()
self.triggers = dict()
def cache(self, a, cache_updater):
with tf.device(self.producer.device_name):
offline_updater, [a_cached] = wrap_in_variables(a)
self.triggers[cache_updater] = offline_updater
return tf.no_op(), a_cached
def initializer(self):
return tf.no_op()
def generate_triples(self, fetches):
if isinstance(fetches, (list, tuple)) and len(fetches) > 1:
logger.warning("Generating triples for a run involving more than "
"one fetch may introduce non-determinism that can "
"break the correspondence between the two phases "
"of the computation.")
unwrapped_fetches = unwrap_fetches(fetches)
reachable_operations = [node
for node in reachable_nodes(unwrapped_fetches)
if isinstance(node, tf.Operation)]
reachable_triggers = [self.triggers[op]
for op in reachable_operations
if op in self.triggers]
return reachable_triggers
def _build_triple_store(self, mask, player_id):
"""
Adds a tf.FIFOQueue to store mask locally on player.
"""
# TODO(Morten) taking `value` doesn't work for int100
raw_mask = mask.value
factory = mask.factory
dtype = mask.factory.native_type
shape = mask.shape
with tf.name_scope("triple-store-{}".format(player_id)):
q = tf.queue.FIFOQueue(
capacity=self.capacity,
dtypes=[dtype],
shapes=[shape],
)
e = q.enqueue(raw_mask)
d = q.dequeue()
d_wrapped = factory.tensor(d)
self.queues += [q]
self.triggers[d.op] = e
return d_wrapped
def _build_queues(self, c0, c1):
with tf.device(self.player0.device_name):
d0 = self._build_triple_store(c0, "0")
with tf.device(self.player1.device_name):
d1 = self._build_triple_store(c1, "1")
return d0, d1
"""
class PlaceholderTripleSource(BaseTripleSource):
# TODO(Morten) manually unwrap and re-wrap of values, should be hidden away
def __init__(self, player0, player1, producer):
super().__init__(player0, player1, producer)
self.placeholders = list()
def _build_queues(self, c0, c1):
with tf.device(self.player0.device_name):
r0 = tf.placeholder(
dtype=c0.factory.native_type,
shape=c0.shape,
)
d0 = c0.factory.tensor(r0)
with tf.device(self.player1.device_name):
r1 = tf.placeholder(
dtype=c1.factory.native_type,
shape=c1.shape,
)
d1 = c1.factory.tensor(r1)
self.placeholders += [r0, r1]
return d0, d1
""" #pylint: disable=pointless-string-statement
"""
class DatasetTripleSource(BaseTripleSource):
# TODO(Morten) manually unwrap and re-wrap of values, should be hidden away
def __init__(
self,
player0,
player1,
producer,
capacity=10,
directory="/tmp/triples/",
support_online_running=False,
):
super().__init__(player0, player1, producer)
self.capacity = capacity
self.dequeuers = list()
self.enqueuers = list()
self.initializers = list()
self.directory = directory
self.support_online_running = support_online_running
if support_online_running:
self.dequeue_from_file = tf.placeholder_with_default(True,
shape=[])
def _build_queues(self, c0, c1):
def dataset_from_file(filename, dtype, shape):
def parse(x):
res = tf.parse_tensor(x, out_type=dtype)
res = tf.reshape(res, shape)
return res
iterator = tf.data.TFRecordDataset(filename) \
.map(parse) \
.make_initializable_iterator()
return iterator.get_next(), iterator.initializer
def dataset_from_queue(queue, dtype, shape):
dummy = tf.data.Dataset.from_tensors(0).repeat(None)
iterator = (dummy.map(lambda _: queue.dequeue())
.make_initializable_iterator())
return iterator.get_next(), iterator.initializer
# gen = lambda: queue.dequeue()
# dataset = tf.data.Dataset.from_generator(gen, [dtype], [shape])
# iterator = dataset.make_one_shot_iterator()
# return iterator.get_next(), iterator.initializer
def sanitize_filename(filename):
return filename.replace('/', '__')
def build_triple_store(mask):
raw_mask = mask.value
factory = mask.factory
dtype = mask.factory.native_type
shape = mask.shape
with tf.name_scope("triple-store"):
q = tf.queue.FIFOQueue(
capacity=self.capacity,
dtypes=[dtype],
shapes=[shape],
)
e = q.enqueue(raw_mask)
f = os.path.join(self.directory, sanitize_filename(q.name))
if self.support_online_running:
r, i = tf.cond(
self.dequeue_from_file,
true_fn=lambda: dataset_from_file(f, dtype, shape),
false_fn=lambda: dataset_from_queue(q, dtype, shape),
)
else:
r, i = dataset_from_file(f, dtype, shape)
d = factory.tensor(r)
return f, q, e, d, i
with tf.device(self.player0.device_name):
f0, q0, e0, d0, i0 = build_triple_store(c0)
with tf.device(self.player1.device_name):
f1, q1, e1, d1, i1 = build_triple_store(c1)
self.dequeuers += [(f0, q0.dequeue()), (f1, q1.dequeue())]
self.enqueuers += [(e0, e1)]
self.initializers += [(i0, i1)]
return d0, d1
def initialize(self, sess, tag=None):
sess.run(self.initializers, tag=tag)
def generate_triples(self, sess, num=1, tag=None, save_to_file=True):
for _ in range(num):
sess.run(self.enqueuers, tag=tag)
if save_to_file:
self.save_triples_to_file(sess, num=num, tag=tag)
def save_triples_to_file(self, sess, num, tag=None):
if not os.path.exists(self.directory):
os.makedirs(self.directory)
for filename, dequeue in self.dequeuers:
with tf.io.TFRecordWriter(filename) as writer:
# size = sess.run(queue.size(), tag=tag)
for _ in range(num):
serialized = tf.io.serialize_tensor(dequeue)
triple = sess.run(serialized, tag=tag)
writer.write(triple)
""" #pylint: disable=pointless-string-statement
|
the-stack_0_7656 | __AUTHOR__ = "hugsy"
__VERSION__ = 0.1
import os
import gdb
def fastbin_index(sz):
return (sz >> 4) - 2 if gef.arch.ptrsize == 8 else (sz >> 3) - 2
def nfastbins():
return fastbin_index( (80 * gef.arch.ptrsize // 4)) - 1
def get_tcache_count():
if get_libc_version() < (2, 27):
return 0
count_addr = gef.heap.base_address + 2*gef.arch.ptrsize
count = p8(count_addr) if get_libc_version() < (2, 30) else p16(count_addr)
return count
@lru_cache(128)
def collect_known_values() -> dict:
arena = get_glibc_arena()
result = {} # format is { 0xaddress : "name" ,}
# tcache
if get_libc_version() >= (2, 27):
tcache_addr = GlibcHeapTcachebinsCommand.find_tcache()
for i in range(GlibcHeapTcachebinsCommand.TCACHE_MAX_BINS):
chunk, _ = GlibcHeapTcachebinsCommand.tcachebin(tcache_addr, i)
j = 0
while True:
if chunk is None:
break
result[chunk.data_address] = "tcachebins[{}/{}] (size={:#x})".format(i, j, (i+1)*0x10+0x10)
next_chunk_address = chunk.get_fwd_ptr(True)
if not next_chunk_address: break
next_chunk = GlibcChunk(next_chunk_address)
j += 1
chunk = next_chunk
# fastbins
for i in range(nfastbins()):
chunk = arena.fastbin(i)
j = 0
while True:
if chunk is None:
break
result[chunk.data_address] = "fastbins[{}/{}]".format(i, j)
next_chunk_address = chunk.get_fwd_ptr(True)
if not next_chunk_address: break
next_chunk = GlibcChunk(next_chunk_address)
j += 1
chunk = next_chunk
# other bins
for name in ["unorderedbins", "smallbins", "largebins"]:
fw, bk = arena.bin(i)
if bk==0x00 and fw==0x00: continue
head = GlibcChunk(bk, from_base=True).fwd
if head == fw: continue
chunk = GlibcChunk(head, from_base=True)
j = 0
while True:
if chunk is None: break
result[chunk.data_address] = "{}[{}/{}]".format(name, i, j)
next_chunk_address = chunk.get_fwd_ptr(True)
if not next_chunk_address: break
next_chunk = GlibcChunk(next_chunk_address, from_base=True)
j += 1
chunk = next_chunk
return result
@lru_cache(128)
def collect_known_ranges()->list:
result = []
for entry in get_process_maps():
if not entry.path:
continue
path = os.path.basename(entry.path)
result.append( (range(entry.page_start, entry.page_end), path) )
return result
@register_external_command
class VisualizeHeapChunksCommand(GenericCommand):
"""Visual helper for glibc heap chunks"""
_cmdline_ = "visualize-libc-heap-chunks"
_syntax_ = "{:s}".format(_cmdline_)
_aliases_ = ["heap-view",]
_example_ = "{:s}".format(_cmdline_)
def __init__(self):
super(VisualizeHeapChunksCommand, self).__init__(complete=gdb.COMPLETE_SYMBOL)
return
@only_if_gdb_running
def do_invoke(self, argv):
ptrsize = gef.arch.ptrsize
heap_base_address = gef.heap.base_address
arena = get_glibc_arena()
if not arena.top:
err("The heap has not been initialized")
return
top = align_address(int(arena.top))
base = align_address(heap_base_address)
colors = [ "cyan", "red", "yellow", "blue", "green" ]
cur = GlibcChunk(base, from_base=True)
idx = 0
known_ranges = collect_known_ranges()
known_values = collect_known_values()
while True:
base = cur.base_address
aggregate_nuls = 0
if base == top:
gef_print("{} {} {}".format(format_address(addr), format_address(gef.memory.read_integer(addr)) , Color.colorify(LEFT_ARROW + "Top Chunk", "red bold")))
gef_print("{} {} {}".format(format_address(addr+ptrsize), format_address(gef.memory.read_integer(addr+ptrsize)) , Color.colorify(LEFT_ARROW + "Top Chunk Size", "red bold")))
break
if cur.size == 0:
warn("incorrect size, heap is corrupted")
break
for off in range(0, cur.size, cur.ptrsize):
addr = base + off
value = gef.memory.read_integer(addr)
if value == 0:
if off != 0 and off != cur.size - cur.ptrsize:
aggregate_nuls += 1
if aggregate_nuls > 1:
continue
if aggregate_nuls > 2:
gef_print(" ↓")
gef_print(" [...]")
gef_print(" ↓")
aggregate_nuls = 0
text = "".join([chr(b) if 0x20 <= b < 0x7F else "." for b in gef.memory.read(addr, cur.ptrsize)])
line = "{} {}".format(format_address(addr), Color.colorify(format_address(value), colors[idx % len(colors)]))
line+= " {}".format(text)
derefs = dereference_from(addr)
if len(derefs) > 2:
line+= " [{}{}]".format(LEFT_ARROW, derefs[-1])
if off == 0:
line+= " Chunk[{}]".format(idx)
if off == cur.ptrsize:
line+= " {}{}{}{}".format(value&~7, "|NON_MAIN_ARENA" if value&4 else "", "|IS_MMAPED" if value&2 else "", "|PREV_INUSE" if value&1 else "")
# look in mapping
for x in known_ranges:
if value in x[0]:
line+= " (in {})".format(Color.redify(x[1]))
# look in known values
if value in known_values:
line += "{}{}".format(RIGHT_ARROW, Color.cyanify(known_values[value]))
gef_print(line)
next_chunk = cur.get_next_chunk()
if next_chunk is None:
break
next_chunk_addr = Address(value=next_chunk.data_address)
if not next_chunk_addr.valid:
warn("next chunk probably corrupted")
break
cur = next_chunk
idx += 1
return
|
the-stack_0_7659 | # ----------------------------------------------------------------------------
# CLASSES: nightly
#
# Test Case: lineout.py #
# Tests: plots - Curve
# operators - Lineout
#
# Defect ID: none
#
# Programmer: Brad Whitlock
# Date: Fri Jan 3 14:22:41 PST 2003
#
# Modifications:
# Kathleen Bonnell, Mon Mar 17 09:54:14 PST 2003
# Added TestMultiVarLineout2D.
#
# Kathleen Bonnell, Tue Dec 23 09:29:29 PST 2003
# Added TestSAMRAI.
#
# Kathleen Bonnell, Thu Jul 29 11:59:35 PDT 2004
# Added tests for no-sampling version, renamed old Curve* tests to
# indicate they were generated with-sampling.
#
# Kathleen Bonnell, Thu Aug 5 10:44:22 PDT 2004
# Added calls to ResetPickLetter() and ResetLineoutColor() at the end of
# each test, so that failure on any one test won't necessarily affect the
# tests that follow.
#
# Kathleen Bonnell, Wed Nov 24 11:38:55 PST 2004
# Modified the way that sampling gets turned on due to changes in Lineout
# Attributes and GlobalLineoutAttributes. Use global version to turn
# sampling on and off.
#
# Kathleen Bonnell, Fri Feb 4 11:17:56 PST 2005
# Added TestDynamic, to test new global atts: curveOption and colorOption.
#
# Hank Childs, Wed Feb 16 07:34:07 PST 2005
# Rename variables that have unsupported characters.
#
# Kathleen Bonnell, Wed Mar 23 17:58:20 PST 2005
# Added TestDynamic2.
#
# Kathleen Bonnell, hu May 19 11:26:39 PDT 2005
# Added TestTecPlot.
#
# Jeremy Meredith, Wed Sep 7 12:06:04 PDT 2005
# Allowed spaces in variable names.
#
# Kathleen Bonnell, Tue Jun 20 16:02:38 PDT 2006
# Added tests for GetOutputArray to Lineout2D.
#
# Kathleen Bonnell, Wed Jun 28 15:57:58 PDT 2006
# Added tests to TestDynamicLineout, testing having curves from different
# time-varying databases (same originating window and different originating
# window) in same curve window, and update the curves via the originating
# plots time-slider (bug '7002).
#
# Brad Whitlock, Wed Jan 14 16:12:10 PST 2009
# I changed the call to GetOutputArray. It's no longer a built-in function
# in the CLI.
#
# Mark C. Miller, Wed Jan 20 07:37:11 PST 2010
# Added ability to swtich between Silo's HDF5 and PDB data.
#
# Cyrus Harrison, Thu Mar 25 09:57:34 PDT 2010
# Added call(s) to DrawPlots() b/c of changes to the default plot state
# behavior when an operator is added.
#
# Brad Whitlock, Tue Mar 26 12:06:51 PDT 2013
# I added TestOperatorCreatedVariables.
#
# ----------------------------------------------------------------------------
def GetOutputArray(plotID = -1, winID = -1):
gInfo = GetGlobalAttributes()
oldWin = gInfo.windows[gInfo.activeWindow]
# Set the active window
if winID != -1:
SetActiveWindow(winID)
# Get the active plots
active = []
if plotID != -1:
pL = GetPlotList()
for i in range(pL.GetNumPlots()):
if pL.GetPlots(i).activeFlag:
active = active + [i]
SetActivePlots(plotID)
pInfo = GetPlotInformation()
# Restore the old active plots
if len(active) > 0:
SetActivePlots(tuple(active))
# Restore the old active window
if winID != -1:
SetActiveWindow(oldWin)
return pInfo["Curve"]
def InitAnnotation():
a = AnnotationAttributes()
TurnOffAllAnnotations(a)
a.axes2D.visible = 1
a.axes2D.xAxis.label.visible = 0
a.axes2D.yAxis.label.visible = 0
a.axes2D.xAxis.title.visible = 0
a.axes2D.yAxis.title.visible = 0
SetAnnotationAttributes(a)
def TestLineout2D(time, suffix):
OpenDatabase(silo_data_path("curv2d.silo"))
AddPlot("Pseudocolor", "d")
DrawPlots()
# Set the colortable to one that has white at the bottom values.
SetActiveContinuousColorTable("calewhite")
pc = PseudocolorAttributes()
pc.colorTableName = "Default"
SetPlotOptions(pc)
# Create the variable list.
vars = ("default")
# Do some lineouts.
p0 = (-4.01261, 1.91818)
p1 = (-0.693968, 4.448759)
p2 = (4.144392, 1.713066)
nsteps = 15
for i in range(nsteps):
t = float(i) / float(nsteps - 1)
p3x = t * p2[0] + (1. - t) * p1[0]
p3y = t * p2[1] + (1. - t) * p1[1]
SetActiveWindow(1)
Lineout(p0, (p3x, p3y), vars)
if (time == 1):
SetActiveWindow(1)
Test("Lineout2d")
if (time == 2):
SetActiveWindow(1)
oa = GetOutputArray(4, 2)
s = ''.join(['%f, '% x for x in oa])
s = '(' + s + ')'
TestText("Lineout2d_output_04", s)
oa = GetOutputArray(8, 2)
s = ''.join(['%f, '% x for x in oa])
s = '(' + s + ')'
TestText("Lineout2d_output_08", s)
SetActiveWindow(2)
InitAnnotation()
Test("CurvesFrom2d" + suffix)
if (time == 2):
oa = GetOutputArray(2)
s = ''.join(['%f, '% x for x in oa])
s = '(' + s + ')'
TestText("Lineout2d_output_02", s)
oa = GetOutputArray()
s = ''.join(['%f, '% x for x in oa])
s = '(' + s + ')'
TestText("Lineout2d_output_15", s)
# Delete the second window.
DeleteWindow()
DeleteAllPlots()
ResetPickLetter()
ResetLineoutColor()
def TestLineout3D(time, suffix):
OpenDatabase(silo_data_path("noise.silo"))
AddPlot("Pseudocolor", "hardyglobal")
DrawPlots()
# Set the view
v = View3DAttributes()
v.viewNormal = (-0.65577, 0.350079, 0.668888)
v.focus = (0, 0, 0)
v.viewUp = (0.218553, 0.936082, -0.275655)
v.viewAngle = 30
v.parallelScale = 17.3205
v.nearPlane = -34.641
v.farPlane = 34.641
v.perspective = 1
SetView3D(v)
# Do some lineouts
vars = ("default")
p0 = (-10., -10., -10.)
P = ((-10., -10., 10.), (-10., 10., -10.), (-10., 10., 10.),\
(10., -10., -10.), (10., -10., 10.), (10., 10., -10.), (10., 10., 10.))
for p in P:
SetActiveWindow(1)
Lineout(p0, p, vars)
if (time == 1):
SetActiveWindow(1)
pc = PseudocolorAttributes()
pc.colorTableName = "xray"
pc.SetOpacityType(pc.Constant)
pc.opacity = 0.5
SetPlotOptions(pc)
Test("Lineout3d")
SetActiveWindow(2)
InitAnnotation()
Test("CurvesFrom3d" + suffix)
# Delete the second window.
DeleteWindow()
DeleteAllPlots()
ResetPickLetter()
ResetLineoutColor()
def TestMultiVarLineout2D(time, suffix):
OpenDatabase(silo_data_path("curv2d.silo"))
AddPlot("Pseudocolor", "d")
DrawPlots()
# Do some lineouts
vars = ("p", "u", "v")
Y = (2, 3, 4)
x1 = -4.5
x2 = 4.5
for y in Y:
SetActiveWindow(1)
Lineout((x1, y), (x2, y), vars)
if (time == 1):
SetActiveWindow(1)
Test("MultiVarLineout2d")
SetActiveWindow(2)
InitAnnotation()
Test("MultiVarCurvesFrom2d" + suffix)
DeleteWindow()
DeleteAllPlots()
ResetPickLetter()
ResetLineoutColor()
def TestSAMRAI(time, suffix):
OpenDatabase(data_path("samrai_test_data/sil_changes/dumps.visit"))
AddPlot("Pseudocolor", "Primitive Var _number_0")
DrawPlots()
# Set the colortable to one that has white at the bottom values.
SetActiveContinuousColorTable("rainbow")
pc = PseudocolorAttributes()
pc.colorTableName = "Default"
SetPlotOptions(pc)
AddOperator("Slice", 1)
slice = SliceAttributes()
slice.originType = slice.Percent
slice.originPercent = 18
slice.axisType = slice.ZAxis
slice.project2d = 1
SetOperatorOptions(slice, 0, 1)
DrawPlots()
ResetView()
SetTimeSliderState(1)
#Do some lineouts
p0 = (3, 3)
p1 = (0, 20)
p2 = (30, 0)
nsteps = 15
for i in range(nsteps):
t = float(i) / float(nsteps - 1)
p3x = t * p2[0] + (1. - t) * p1[0]
p3y = t * p2[1] + (1. - t) * p1[1]
SetActiveWindow(1)
Lineout(p0, (p3x, p3y))
if (time == 1):
SetActiveWindow(1)
Test("LineoutSAMRAI")
SetActiveWindow(2)
InitAnnotation()
Test("CurvesFromSAMRAI" + suffix)
DeleteWindow()
DeleteAllPlots()
ResetPickLetter()
ResetLineoutColor()
def TestSpecifyLineoutWindow(time, suffix):
#window 1
OpenDatabase(data_path("pdb_test_data/dbA00.pdb"))
AddPlot("Pseudocolor", "mesh/ireg")
DrawPlots()
ResetView()
Lineout((0, 2.5), (5, 2.5))
if (time == 1):
SetActiveWindow(1)
InitAnnotation()
Test("LineoutSpecifyWindow_01")
SetActiveWindow(2)
InitAnnotation()
Test("CurvesFromSpecifyWindow_01" + suffix)
SetActiveWindow(1)
CloneWindow()
#window 3
SetTimeSliderState(4)
DrawPlots()
gla = GetGlobalLineoutAttributes()
gla.createWindow = 0
gla.windowId = 4
SetGlobalLineoutAttributes(gla)
Lineout((0, 2.5), (5, 2.5))
if (time == 1):
SetActiveWindow(3)
InitAnnotation()
Test("LineoutSpecifyWindow_02")
SetActiveWindow(4)
InitAnnotation()
Test("CurvesFromSpecifyWindow_02" + suffix)
DeleteWindow()
SetActiveWindow(3)
DeleteWindow()
SetActiveWindow(2)
DeleteWindow()
DeleteAllPlots()
gla.createWindow = 1
gla.windowId = 2
SetGlobalLineoutAttributes(gla)
ResetPickLetter()
ResetLineoutColor()
def TestDynamicLineout(time, suffix):
if (time == 1):
return
#window 1
OpenDatabase(silo_data_path("wave.visit"))
AddPlot("Pseudocolor", "pressure")
DrawPlots()
ResetView()
Lineout((0, 0.5, 2.5), (10, 0.5, 2.5))
gla = GetGlobalLineoutAttributes()
gla.Dynamic = 1
gla.curveOption = gla.UpdateCurve
SetGlobalLineoutAttributes(gla)
SetActiveWindow(1)
t = 0
for i in range (10):
t += 5
SetTimeSliderState(t)
SetActiveWindow(2)
InitAnnotation()
ResetView()
Test("CurvesFromDynamic_01")
# go back to the beginning time state
# and have new curves created for each new time
SetActiveWindow(1)
t = 0
SetTimeSliderState(t)
gla.curveOption = gla.CreateCurve
SetGlobalLineoutAttributes(gla)
for i in range (7):
t += 5
SetTimeSliderState(t)
# now have each new curve have its own color.
gla.colorOption = gla.CreateColor
SetGlobalLineoutAttributes(gla)
for i in range (7):
t += 5
SetTimeSliderState(t)
SetActiveWindow(2)
InitAnnotation()
ResetView()
Test("CurvesFromDynamic_02")
ResetPickLetter()
ResetLineoutColor()
# delete window 2
DeleteWindow()
# clear all plots from window 1
DeleteAllPlots()
dbs = (data_path("pdb_test_data/dbA00.pdb"),
data_path("pdb_test_data/dbB00.pdb"),
data_path("pdb_test_data/dbC00.pdb"))
OpenDatabase(dbs[0])
AddPlot("Pseudocolor", "mesh/ireg")
OpenDatabase(dbs[1])
AddPlot("Pseudocolor", "mesh/ireg")
DrawPlots()
AddWindow()
SetActiveWindow(2)
DeleteAllPlots()
OpenDatabase(dbs[2])
AddPlot("Pseudocolor", "mesh/ireg")
DrawPlots()
gla.Dynamic = 1
gla.curveOption = gla.UpdateCurve
SetGlobalLineoutAttributes(gla)
#Lineout for dbC00.pdb in window 2
Lineout((5.0, 7.5, 0.), (10, 7.5, 0.))
SetActiveWindow(1)
SetActivePlots(1)
#Lineout for dbB00.pdb in window 1
Lineout((0, 8, 0), (5, 8, 0))
SetActivePlots(0)
#Lineout for dbA00.pdb in window 1
Lineout((0, 3, 0), (5, 3, 0))
SetActiveWindow(3)
InitAnnotation()
Test("CurvesFromDynamic_03")
SetActiveWindow(1)
SetActiveTimeSlider(dbs[1])
SetTimeSliderState(15)
SetActiveWindow(3)
Test("CurvesFromDynamic_04")
SetActiveWindow(1)
SetActiveTimeSlider(dbs[0])
SetTimeSliderState(3)
SetActiveWindow(3)
Test("CurvesFromDynamic_05")
SetActiveWindow(2)
SetTimeSliderState(29)
SetActiveWindow(3)
Test("CurvesFromDynamic_06")
ResetLineoutColor()
ResetPickLetter()
# delete window 3
DeleteWindow()
# delete window 2
SetActiveWindow(2)
DeleteWindow()
# clear all plots from window 1
DeleteAllPlots()
def TestDynamic2():
# VisIt00006006 -- ensure that 'ClearRefLines' will 'disconnect' the lineout
# from its originating plot, and won't update when orig plot changes time.
OpenDatabase(silo_data_path("wave.visit"))
AddPlot("Pseudocolor", "pressure")
DrawPlots()
ResetView()
SetTimeSliderState(0)
Lineout((0, 0.5, 2.5), (10, 0.5, 2.5))
SetActiveWindow(2)
InitAnnotation()
Test("Dynamic2_01")
SetActiveWindow(1)
gla = GetGlobalLineoutAttributes()
gla.Dynamic = 1
gla.curveOption = gla.UpdateCurve
SetGlobalLineoutAttributes(gla)
SetTimeSliderState(27)
SetActiveWindow(2)
Test("Dynamic2_02")
SetActiveWindow(1)
gla.Dynamic = 0
SetGlobalLineoutAttributes(gla)
SetTimeSliderState(52)
SetActiveWindow(2)
Test("Dynamic2_03")
ResetPickLetter()
ResetLineoutColor()
DeleteWindow()
DeleteAllPlots()
def TestTecPlot():
# VisIt00006243 -- curve generated from Lineout looks reversed in X
OpenDatabase(data_path("tecplot_test_data/T3L3CLS17u.plt"))
AddPlot("Mesh", "mesh")
AddPlot("Pseudocolor", "k")
DrawPlots()
ResetView()
v = GetView2D()
v.windowCoords = (0.340063, 0.340868, 0.00512584, 0.00572613 )
SetView2D(v)
Lineout((0.340505, 0.00565604, 0), (0.340291, 0.00514717, 0))
InitAnnotation()
Test("LineoutTecPlot_01")
SetActiveWindow(2)
InitAnnotation()
Test("CurvesFromTecPlot_01")
ResetPickLetter()
ResetLineoutColor()
# delete window 2
DeleteWindow()
# remove plots from window 1
DeleteAllPlots()
def TestOperatorCreatedVariables():
def SetCurveAtts():
c = CurveAttributes(1)
c.lineWidth = 2
c.curveColor = (255,0,0,255)
c.curveColorSource = c.Custom
c.showLabels = 0
SetPlotOptions(c)
TestSection("Operator-Created Variables")
OpenDatabase(silo_data_path("noise.silo"))
# Do lineout on a data binning variable.
AddPlot("Pseudocolor", "operators/DataBinning/2D/Mesh", 1, 1)
DataBinningAtts = DataBinningAttributes()
DataBinningAtts.numDimensions = DataBinningAtts.Two # One, Two, Three
DataBinningAtts.dim1BinBasedOn = DataBinningAtts.X # X, Y, Z, Variable
DataBinningAtts.dim1Var = "default"
DataBinningAtts.dim1SpecifyRange = 0
DataBinningAtts.dim1MinRange = 0
DataBinningAtts.dim1MaxRange = 1
DataBinningAtts.dim1NumBins = 50
DataBinningAtts.dim2BinBasedOn = DataBinningAtts.Y # X, Y, Z, Variable
DataBinningAtts.dim2Var = "default"
DataBinningAtts.dim2SpecifyRange = 0
DataBinningAtts.dim2MinRange = 0
DataBinningAtts.dim2MaxRange = 1
DataBinningAtts.dim2NumBins = 50
DataBinningAtts.dim3BinBasedOn = DataBinningAtts.Variable # X, Y, Z, Variable
DataBinningAtts.dim3Var = "default"
DataBinningAtts.dim3SpecifyRange = 0
DataBinningAtts.dim3MinRange = 0
DataBinningAtts.dim3MaxRange = 1
DataBinningAtts.dim3NumBins = 50
DataBinningAtts.outOfBoundsBehavior = DataBinningAtts.Clamp # Clamp, Discard
DataBinningAtts.reductionOperator = DataBinningAtts.Maximum # Average, Minimum, Maximum, StandardDeviation, Variance, Sum, Count, RMS, PDF
DataBinningAtts.varForReduction = "hardyglobal"
DataBinningAtts.emptyVal = 0
DataBinningAtts.outputType = DataBinningAtts.OutputOnBins # OutputOnBins, OutputOnInputMesh
DataBinningAtts.removeEmptyValFromCurve = 1
SetOperatorOptions(DataBinningAtts, 1)
DrawPlots()
Lineout((9, 9), (4.5, -9))
SetActiveWindow(1)
ResetView()
Test("lineout_op_vars_00")
SetActiveWindow(2)
InitAnnotation()
ResetView()
SetCurveAtts()
Test("lineout_op_vars_01")
# delete window 2
DeleteWindow()
# remove plots from window 1
DeleteAllPlots()
# Do lineout on a data binning variable that had other operators
OpenDatabase(silo_data_path("noise.silo"))
AddPlot("Pseudocolor", "operators/DataBinning/2D/Mesh", 1, 1)
SetOperatorOptions(DataBinningAtts, 1)
AddOperator("Transform")
AddOperator("Project")
DrawPlots()
Lineout((9, 9), (4.5, -9))
SetActiveWindow(2)
InitAnnotation()
SetCurveAtts()
Test("lineout_op_vars_02")
# delete window 2
DeleteWindow()
# remove plots from window 1
DeleteAllPlots()
def DoTests(t,s):
TestLineout2D(t,s)
TestLineout3D(t,s)
TestMultiVarLineout2D(t,s)
TestSAMRAI(t,s)
TestSpecifyLineoutWindow(t,s)
TestDynamicLineout(t,s)
def LineoutMain():
InitAnnotation()
la = GetGlobalLineoutAttributes()
la.samplingOn = 1
SetGlobalLineoutAttributes(la)
DoTests(1, "_withSampling")
la.samplingOn = 0
SetGlobalLineoutAttributes(la)
DoTests(2, "_noSampling")
TestDynamic2()
TestTecPlot()
TestOperatorCreatedVariables()
# Call the main function
LineoutMain()
Exit()
|
the-stack_0_7660 | import unittest
from easypysa.easypysa import EasyPysa
class UnitTests(unittest.TestCase):
def test_can_load_executable(self):
easy = EasyPysa()
self.assertTrue(easy._check_executable() == "OK")
if __name__ == "__main__":
unittest.main()
|
the-stack_0_7661 | # Copyright (c) OpenMMLab. All rights reserved.
import os
import warnings
from collections import OrderedDict
import json_tricks as json
import numpy as np
from mmcv import Config
from mmpose.core.evaluation.top_down_eval import keypoint_epe
from mmpose.datasets.builder import DATASETS
from ..base import Kpt3dSviewRgbImgTopDownDataset
@DATASETS.register_module()
class InterHand3DDataset(Kpt3dSviewRgbImgTopDownDataset):
"""InterHand2.6M 3D dataset for top-down hand pose estimation.
`InterHand2.6M: A Dataset and Baseline for 3D Interacting Hand Pose
Estimation from a Single RGB Image' Moon, Gyeongsik etal. ECCV'2020
More details can be found in the `paper
<https://arxiv.org/pdf/2008.09309.pdf>`__ .
The dataset loads raw features and apply specified transforms
to return a dict containing the image tensors and other information.
InterHand2.6M keypoint indexes::
0: 'r_thumb4',
1: 'r_thumb3',
2: 'r_thumb2',
3: 'r_thumb1',
4: 'r_index4',
5: 'r_index3',
6: 'r_index2',
7: 'r_index1',
8: 'r_middle4',
9: 'r_middle3',
10: 'r_middle2',
11: 'r_middle1',
12: 'r_ring4',
13: 'r_ring3',
14: 'r_ring2',
15: 'r_ring1',
16: 'r_pinky4',
17: 'r_pinky3',
18: 'r_pinky2',
19: 'r_pinky1',
20: 'r_wrist',
21: 'l_thumb4',
22: 'l_thumb3',
23: 'l_thumb2',
24: 'l_thumb1',
25: 'l_index4',
26: 'l_index3',
27: 'l_index2',
28: 'l_index1',
29: 'l_middle4',
30: 'l_middle3',
31: 'l_middle2',
32: 'l_middle1',
33: 'l_ring4',
34: 'l_ring3',
35: 'l_ring2',
36: 'l_ring1',
37: 'l_pinky4',
38: 'l_pinky3',
39: 'l_pinky2',
40: 'l_pinky1',
41: 'l_wrist'
Args:
ann_file (str): Path to the annotation file.
camera_file (str): Path to the camera file.
joint_file (str): Path to the joint file.
img_prefix (str): Path to a directory where images are held.
Default: None.
data_cfg (dict): config
pipeline (list[dict | callable]): A sequence of data transforms.
use_gt_root_depth (bool): Using the ground truth depth of the wrist
or given depth from rootnet_result_file.
rootnet_result_file (str): Path to the wrist depth file.
dataset_info (DatasetInfo): A class containing all dataset info.
test_mode (str): Store True when building test or
validation dataset. Default: False.
"""
def __init__(self,
ann_file,
camera_file,
joint_file,
img_prefix,
data_cfg,
pipeline,
use_gt_root_depth=True,
rootnet_result_file=None,
dataset_info=None,
test_mode=False):
if dataset_info is None:
warnings.warn(
'dataset_info is missing. '
'Check https://github.com/open-mmlab/mmpose/pull/663 '
'for details.', DeprecationWarning)
cfg = Config.fromfile('configs/_base_/datasets/interhand3d.py')
dataset_info = cfg._cfg_dict['dataset_info']
super().__init__(
ann_file,
img_prefix,
data_cfg,
pipeline,
dataset_info=dataset_info,
test_mode=test_mode)
self.ann_info['heatmap3d_depth_bound'] = data_cfg[
'heatmap3d_depth_bound']
self.ann_info['heatmap_size_root'] = data_cfg['heatmap_size_root']
self.ann_info['root_depth_bound'] = data_cfg['root_depth_bound']
self.ann_info['use_different_joint_weights'] = False
self.camera_file = camera_file
self.joint_file = joint_file
self.use_gt_root_depth = use_gt_root_depth
if not self.use_gt_root_depth:
assert rootnet_result_file is not None
self.rootnet_result_file = rootnet_result_file
self.db = self._get_db()
print(f'=> num_images: {self.num_images}')
print(f'=> load {len(self.db)} samples')
@staticmethod
def _encode_handtype(hand_type):
if hand_type == 'right':
return np.array([1, 0], dtype=np.float32)
elif hand_type == 'left':
return np.array([0, 1], dtype=np.float32)
elif hand_type == 'interacting':
return np.array([1, 1], dtype=np.float32)
else:
assert 0, f'Not support hand type: {hand_type}'
def _get_db(self):
"""Load dataset.
Adapted from 'https://github.com/facebookresearch/InterHand2.6M/'
'blob/master/data/InterHand2.6M/dataset.py'
Copyright (c) FaceBook Research, under CC-BY-NC 4.0 license.
"""
with open(self.camera_file, 'r') as f:
cameras = json.load(f)
with open(self.joint_file, 'r') as f:
joints = json.load(f)
if not self.use_gt_root_depth:
rootnet_result = {}
with open(self.rootnet_result_file, 'r') as f:
rootnet_annot = json.load(f)
for i in range(len(rootnet_annot)):
rootnet_result[str(
rootnet_annot[i]['annot_id'])] = rootnet_annot[i]
gt_db = []
bbox_id = 0
for img_id in self.img_ids:
num_joints = self.ann_info['num_joints']
ann_id = self.coco.getAnnIds(imgIds=img_id, iscrowd=False)
ann = self.coco.loadAnns(ann_id)[0]
img = self.coco.loadImgs(img_id)[0]
capture_id = str(img['capture'])
camera_name = img['camera']
frame_idx = str(img['frame_idx'])
image_file = os.path.join(self.img_prefix, self.id2name[img_id])
camera_pos = np.array(
cameras[capture_id]['campos'][camera_name], dtype=np.float32)
camera_rot = np.array(
cameras[capture_id]['camrot'][camera_name], dtype=np.float32)
focal = np.array(
cameras[capture_id]['focal'][camera_name], dtype=np.float32)
principal_pt = np.array(
cameras[capture_id]['princpt'][camera_name], dtype=np.float32)
joint_world = np.array(
joints[capture_id][frame_idx]['world_coord'], dtype=np.float32)
joint_cam = self._world2cam(
joint_world.transpose(1, 0), camera_rot,
camera_pos.reshape(3, 1)).transpose(1, 0)
joint_img = self._cam2pixel(joint_cam, focal, principal_pt)[:, :2]
joint_valid = np.array(
ann['joint_valid'], dtype=np.float32).flatten()
hand_type = self._encode_handtype(ann['hand_type'])
hand_type_valid = ann['hand_type_valid']
if self.use_gt_root_depth:
bbox = np.array(ann['bbox'], dtype=np.float32)
# extend the bbox to include some context
center, scale = self._xywh2cs(*bbox, 1.25)
abs_depth = [joint_cam[20, 2], joint_cam[41, 2]]
else:
rootnet_ann_data = rootnet_result[str(ann_id[0])]
bbox = np.array(rootnet_ann_data['bbox'], dtype=np.float32)
# the bboxes have been extended
center, scale = self._xywh2cs(*bbox, 1.0)
abs_depth = rootnet_ann_data['abs_depth']
# 41: 'l_wrist', left hand root
# 20: 'r_wrist', right hand root
rel_root_depth = joint_cam[41, 2] - joint_cam[20, 2]
# if root is not valid, root-relative 3D depth is also invalid.
rel_root_valid = joint_valid[20] * joint_valid[41]
# if root is not valid -> root-relative 3D pose is also not valid.
# Therefore, mark all joints as invalid
joint_valid[:20] *= joint_valid[20]
joint_valid[21:] *= joint_valid[41]
joints_3d = np.zeros((num_joints, 3), dtype=np.float32)
joints_3d_visible = np.zeros((num_joints, 3), dtype=np.float32)
joints_3d[:, :2] = joint_img
joints_3d[:21, 2] = joint_cam[:21, 2] - joint_cam[20, 2]
joints_3d[21:, 2] = joint_cam[21:, 2] - joint_cam[41, 2]
joints_3d_visible[...] = np.minimum(1, joint_valid.reshape(-1, 1))
gt_db.append({
'image_file': image_file,
'center': center,
'scale': scale,
'rotation': 0,
'joints_3d': joints_3d,
'joints_3d_visible': joints_3d_visible,
'hand_type': hand_type,
'hand_type_valid': hand_type_valid,
'rel_root_depth': rel_root_depth,
'rel_root_valid': rel_root_valid,
'abs_depth': abs_depth,
'joints_cam': joint_cam,
'focal': focal,
'princpt': principal_pt,
'dataset': self.dataset_name,
'bbox': bbox,
'bbox_score': 1,
'bbox_id': bbox_id
})
bbox_id = bbox_id + 1
gt_db = sorted(gt_db, key=lambda x: x['bbox_id'])
return gt_db
def evaluate(self, outputs, res_folder, metric='MPJPE', **kwargs):
"""Evaluate interhand2d keypoint results. The pose prediction results
will be saved in `${res_folder}/result_keypoints.json`.
Note:
batch_size: N
num_keypoints: K
heatmap height: H
heatmap width: W
Args:
outputs (list(dict))
:preds (np.ndarray[N,K,3]): The first two dimensions are
coordinates, score is the third dimension of the array.
:hand_type (np.ndarray[N, 4]): The first two dimensions are
hand type, scores is the last two dimensions.
:rel_root_depth (np.ndarray[N]): The relative depth of left
wrist and right wrist.
:boxes (np.ndarray[N,6]): [center[0], center[1], scale[0]
, scale[1],area, score]
:image_paths (list[str]): For example, ['Capture6/
0012_aokay_upright/cam410061/image4996.jpg']
:output_heatmap (np.ndarray[N, K, H, W]): model outpus.
res_folder (str): Path of directory to save the results.
metric (str | list[str]): Metric to be performed.
Options: 'MRRPE', 'MPJPE', 'Handedness_acc'.
Returns:
dict: Evaluation results for evaluation metric.
"""
metrics = metric if isinstance(metric, list) else [metric]
allowed_metrics = ['MRRPE', 'MPJPE', 'Handedness_acc']
for metric in metrics:
if metric not in allowed_metrics:
raise KeyError(f'metric {metric} is not supported')
res_file = os.path.join(res_folder, 'result_keypoints.json')
kpts = []
for output in outputs:
preds = output.get('preds')
if preds is None and 'MPJPE' in metrics:
raise KeyError('metric MPJPE is not supported')
hand_type = output.get('hand_type')
if hand_type is None and 'Handedness_acc' in metrics:
raise KeyError('metric Handedness_acc is not supported')
rel_root_depth = output.get('rel_root_depth')
if rel_root_depth is None and 'MRRPE' in metrics:
raise KeyError('metric MRRPE is not supported')
boxes = output['boxes']
image_paths = output['image_paths']
bbox_ids = output['bbox_ids']
batch_size = len(image_paths)
for i in range(batch_size):
image_id = self.name2id[image_paths[i][len(self.img_prefix):]]
kpt = {
'center': boxes[i][0:2].tolist(),
'scale': boxes[i][2:4].tolist(),
'area': float(boxes[i][4]),
'score': float(boxes[i][5]),
'image_id': image_id,
'bbox_id': bbox_ids[i]
}
if preds is not None:
kpt['keypoints'] = preds[i, :, :3].tolist()
if hand_type is not None:
kpt['hand_type'] = hand_type[i][0:2].tolist()
kpt['hand_type_score'] = hand_type[i][2:4].tolist()
if rel_root_depth is not None:
kpt['rel_root_depth'] = float(rel_root_depth[i])
kpts.append(kpt)
kpts = self._sort_and_unique_bboxes(kpts)
self._write_keypoint_results(kpts, res_file)
info_str = self._report_metric(res_file, metrics)
name_value = OrderedDict(info_str)
return name_value
@staticmethod
def _get_accuracy(outputs, gts, masks):
"""Get accuracy of multi-label classification.
Note:
batch_size: N
label_num: C
Args:
outputs (np.array[N, C]): predicted multi-label.
gts (np.array[N, C]): Groundtruth muti-label.
masks (np.array[N, ]): masked outputs will be ignored for
accuracy calculation.
Returns:
accuracy (float)
"""
acc = (outputs == gts).all(axis=1)
return np.mean(acc[masks])
def _report_metric(self, res_file, metrics):
"""Keypoint evaluation.
Args:
res_file (str): Json file stored prediction results.
metrics (str | list[str]): Metric to be performed.
Options: 'MRRPE', 'MPJPE', 'Handedness_acc'.
Returns:
List: Evaluation results for evaluation metric.
"""
info_str = []
with open(res_file, 'r') as fin:
preds = json.load(fin)
assert len(preds) == len(self.db)
gts_rel_root = []
preds_rel_root = []
rel_root_masks = []
gts_joint_coord_cam = []
preds_joint_coord_cam = []
single_masks = []
interacting_masks = []
all_masks = []
gts_hand_type = []
preds_hand_type = []
hand_type_masks = []
for pred, item in zip(preds, self.db):
# mrrpe
if 'MRRPE' in metrics:
if item['hand_type'].all() and item['joints_3d_visible'][
20, 0] and item['joints_3d_visible'][41, 0]:
rel_root_masks.append(True)
pred_left_root_img = np.array(
pred['keypoints'][41], dtype=np.float32)[None, :]
pred_left_root_img[:, 2] += item['abs_depth'][0] + pred[
'rel_root_depth']
pred_left_root_cam = self._pixel2cam(
pred_left_root_img, item['focal'], item['princpt'])
pred_right_root_img = np.array(
pred['keypoints'][20], dtype=np.float32)[None, :]
pred_right_root_img[:, 2] += item['abs_depth'][0]
pred_right_root_cam = self._pixel2cam(
pred_right_root_img, item['focal'], item['princpt'])
preds_rel_root.append(pred_left_root_cam -
pred_right_root_cam)
gts_rel_root.append(
[item['joints_cam'][41] - item['joints_cam'][20]])
else:
rel_root_masks.append(False)
preds_rel_root.append([[0., 0., 0.]])
gts_rel_root.append([[0., 0., 0.]])
if 'MPJPE' in metrics:
pred_joint_coord_img = np.array(
pred['keypoints'], dtype=np.float32)
gt_joint_coord_cam = item['joints_cam'].copy()
pred_joint_coord_img[:21, 2] += item['abs_depth'][0]
pred_joint_coord_img[21:, 2] += item['abs_depth'][1]
pred_joint_coord_cam = self._pixel2cam(pred_joint_coord_img,
item['focal'],
item['princpt'])
pred_joint_coord_cam[:21] -= pred_joint_coord_cam[20]
pred_joint_coord_cam[21:] -= pred_joint_coord_cam[41]
gt_joint_coord_cam[:21] -= gt_joint_coord_cam[20]
gt_joint_coord_cam[21:] -= gt_joint_coord_cam[41]
preds_joint_coord_cam.append(pred_joint_coord_cam)
gts_joint_coord_cam.append(gt_joint_coord_cam)
mask = (np.array(item['joints_3d_visible'])[:, 0]) > 0
if item['hand_type'].all():
single_masks.append(
np.zeros(self.ann_info['num_joints'], dtype=bool))
interacting_masks.append(mask)
all_masks.append(mask)
else:
single_masks.append(mask)
interacting_masks.append(
np.zeros(self.ann_info['num_joints'], dtype=bool))
all_masks.append(mask)
if 'Handedness_acc' in metrics:
pred_hand_type = np.array(pred['hand_type'], dtype=int)
preds_hand_type.append(pred_hand_type)
gts_hand_type.append(item['hand_type'])
hand_type_masks.append(item['hand_type_valid'] > 0)
gts_rel_root = np.array(gts_rel_root, dtype=np.float32)
preds_rel_root = np.array(preds_rel_root, dtype=np.float32)
rel_root_masks = np.array(rel_root_masks, dtype=bool)[:, None]
gts_joint_coord_cam = np.array(gts_joint_coord_cam, dtype=np.float32)
preds_joint_coord_cam = np.array(
preds_joint_coord_cam, dtype=np.float32)
single_masks = np.array(single_masks, dtype=bool)
interacting_masks = np.array(interacting_masks, dtype=bool)
all_masks = np.array(all_masks, dtype=bool)
gts_hand_type = np.array(gts_hand_type, dtype=int)
preds_hand_type = np.array(preds_hand_type, dtype=int)
hand_type_masks = np.array(hand_type_masks, dtype=bool)
if 'MRRPE' in metrics:
info_str.append(('MRRPE',
keypoint_epe(preds_rel_root, gts_rel_root,
rel_root_masks)))
if 'MPJPE' in metrics:
info_str.append(('MPJPE_all',
keypoint_epe(preds_joint_coord_cam,
gts_joint_coord_cam, all_masks)))
info_str.append(('MPJPE_single',
keypoint_epe(preds_joint_coord_cam,
gts_joint_coord_cam, single_masks)))
info_str.append(
('MPJPE_interacting',
keypoint_epe(preds_joint_coord_cam, gts_joint_coord_cam,
interacting_masks)))
if 'Handedness_acc' in metrics:
info_str.append(('Handedness_acc',
self._get_accuracy(preds_hand_type, gts_hand_type,
hand_type_masks)))
return info_str
|
the-stack_0_7662 | # Author: Niels Nuyttens <[email protected]>
#
# License: Apache Software License 2.0
"""Statistical drift calculation using `Kolmogorov-Smirnov` and `chi2-contingency` tests."""
from typing import Any, Dict, List, cast
import numpy as np
import pandas as pd
from scipy.stats import chi2_contingency, ks_2samp
from nannyml.chunk import Chunker
from nannyml.drift.base import DriftCalculator
from nannyml.drift.model_inputs.univariate.statistical.results import UnivariateDriftResult
from nannyml.exceptions import CalculatorNotFittedException, MissingMetadataException
from nannyml.metadata import BinaryClassificationMetadata, MulticlassClassificationMetadata, RegressionMetadata
from nannyml.metadata.base import NML_METADATA_COLUMNS, NML_METADATA_PARTITION_COLUMN_NAME, ModelMetadata
from nannyml.preprocessing import preprocess
ALERT_THRESHOLD_P_VALUE = 0.05
class UnivariateStatisticalDriftCalculator(DriftCalculator):
"""A drift calculator that relies on statistics to detect drift."""
def __init__(
self,
model_metadata: ModelMetadata,
features: List[str] = None,
chunk_size: int = None,
chunk_number: int = None,
chunk_period: str = None,
chunker: Chunker = None,
):
"""Constructs a new UnivariateStatisticalDriftCalculator.
Parameters
----------
model_metadata: ModelMetadata
Metadata for the model whose data is to be processed.
features: List[str], default=None
An optional list of feature names to use during drift calculation. None by default, in this case
all features are used during calculation.
chunk_size: int
Splits the data into chunks containing `chunks_size` observations.
Only one of `chunk_size`, `chunk_number` or `chunk_period` should be given.
chunk_number: int
Splits the data into `chunk_number` pieces.
Only one of `chunk_size`, `chunk_number` or `chunk_period` should be given.
chunk_period: str
Splits the data according to the given period.
Only one of `chunk_size`, `chunk_number` or `chunk_period` should be given.
chunker : Chunker
The `Chunker` used to split the data sets into a lists of chunks.
Examples
--------
>>> import nannyml as nml
>>> ref_df, ana_df, _ = nml.load_synthetic_binary_classification_dataset()
>>> metadata = nml.extract_metadata(ref_df)
>>> # Create a calculator that will chunk by week
>>> drift_calc = nml.UnivariateStatisticalDriftCalculator(model_metadata=metadata, chunk_period='W')
"""
super(UnivariateStatisticalDriftCalculator, self).__init__(
model_metadata, features, chunk_size, chunk_number, chunk_period, chunker
)
self.__prediction_column_names: List[str] = []
self.__predicted_probability_column_names: List[str] = []
# add continuous predictions or predicted probabilities from metadata to the selected features
if isinstance(model_metadata, BinaryClassificationMetadata):
if model_metadata.predicted_probability_column_name is None:
raise MissingMetadataException(
"missing value for 'predicted_probability_column_name'. "
"Please update your model metadata accordingly."
)
self.__prediction_column_names = []
self.__predicted_probabilities_column_names = [
cast(BinaryClassificationMetadata, self.model_metadata).predicted_probability_column_name
]
elif isinstance(model_metadata, MulticlassClassificationMetadata):
if model_metadata.predicted_probabilities_column_names is None:
raise MissingMetadataException(
"missing value for 'predicted_probability_column_name'. "
"Please update your model metadata accordingly."
)
md = cast(MulticlassClassificationMetadata, self.model_metadata)
self.__prediction_column_names = []
self.__predicted_probabilities_column_names = list(md.predicted_probabilities_column_names.values())
elif isinstance(model_metadata, RegressionMetadata):
if model_metadata.prediction_column_name is None:
raise MissingMetadataException(
"missing value for 'prediction_column_name'. " "Please update your model metadata accordingly."
)
self.__prediction_column_names = [model_metadata.prediction_column_name]
self.__predicted_probabilities_column_names = []
self.selected_features += self.__predicted_probabilities_column_names + self.__prediction_column_names
self._reference_data = None
def fit(self, reference_data: pd.DataFrame):
"""Fits the drift calculator using a set of reference data.
Parameters
----------
reference_data : pd.DataFrame
A reference data set containing predictions (labels and/or probabilities) and target values.
Returns
-------
calculator: DriftCalculator
The fitted calculator.
Examples
--------
>>> import nannyml as nml
>>> ref_df, ana_df, _ = nml.load_synthetic_binary_classification_dataset()
>>> metadata = nml.extract_metadata(ref_df, model_type=nml.ModelType.CLASSIFICATION_BINARY)
>>> # Create a calculator and fit it
>>> drift_calc = nml.UnivariateStatisticalDriftCalculator(model_metadata=metadata, chunk_period='W').fit(ref_df)
"""
reference_data = preprocess(data=reference_data, metadata=self.model_metadata, reference=True)
self._reference_data = reference_data.copy(deep=True)
return self
def calculate(
self,
data: pd.DataFrame,
) -> UnivariateDriftResult:
"""Calculates the data reconstruction drift for a given data set.
Parameters
----------
data : pd.DataFrame
The dataset to calculate the reconstruction drift for.
Returns
-------
reconstruction_drift: UnivariateDriftResult
A :class:`result<nannyml.drift.model_inputs.univariate.statistical.results.UnivariateDriftResult>`
object where each row represents a :class:`~nannyml.chunk.Chunk`,
containing :class:`~nannyml.chunk.Chunk` properties and the reconstruction_drift calculated
for that :class:`~nannyml.chunk.Chunk`.
Examples
--------
>>> import nannyml as nml
>>> ref_df, ana_df, _ = nml.load_synthetic_binary_classification_dataset()
>>> metadata = nml.extract_metadata(ref_df, model_type=nml.ModelType.CLASSIFICATION_BINARY)
>>> # Create a calculator and fit it
>>> drift_calc = nml.UnivariateStatisticalDriftCalculator(model_metadata=metadata, chunk_period='W').fit(ref_df)
>>> drift = drift_calc.calculate(data)
"""
data = preprocess(data=data, metadata=self.model_metadata)
# Get lists of categorical <-> categorical features
categorical_column_names = [f.column_name for f in self.model_metadata.categorical_features]
continuous_column_names = (
[f.column_name for f in self.model_metadata.continuous_features]
+ self.__predicted_probabilities_column_names
+ self.__prediction_column_names
)
features_and_metadata = NML_METADATA_COLUMNS + self.selected_features
chunks = self.chunker.split(data, columns=features_and_metadata, minimum_chunk_size=500)
chunk_drifts = []
# Calculate chunk-wise drift statistics.
# Append all into resulting DataFrame indexed by chunk key.
for chunk in chunks:
chunk_drift: Dict[str, Any] = {
'key': chunk.key,
'start_index': chunk.start_index,
'end_index': chunk.end_index,
'start_date': chunk.start_datetime,
'end_date': chunk.end_datetime,
'partition': 'analysis' if chunk.is_transition else chunk.partition,
}
present_categorical_column_names = list(set(chunk.data.columns) & set(categorical_column_names))
for column in present_categorical_column_names:
statistic, p_value, _, _ = chi2_contingency(
pd.concat(
[
self._reference_data[column].value_counts(), # type: ignore
chunk.data[column].value_counts(),
],
axis=1,
).fillna(0)
)
chunk_drift[f'{column}_chi2'] = statistic
chunk_drift[f'{column}_p_value'] = np.round(p_value, decimals=3)
chunk_drift[f'{column}_alert'] = (p_value < ALERT_THRESHOLD_P_VALUE) and (
chunk.data[NML_METADATA_PARTITION_COLUMN_NAME] == 'analysis'
).all()
chunk_drift[f'{column}_threshold'] = ALERT_THRESHOLD_P_VALUE
present_continuous_column_names = list(set(chunk.data.columns) & set(continuous_column_names))
for column in present_continuous_column_names:
statistic, p_value = ks_2samp(self._reference_data[column], chunk.data[column]) # type: ignore
chunk_drift[f'{column}_dstat'] = statistic
chunk_drift[f'{column}_p_value'] = np.round(p_value, decimals=3)
chunk_drift[f'{column}_alert'] = (p_value < ALERT_THRESHOLD_P_VALUE) and (
chunk.data[NML_METADATA_PARTITION_COLUMN_NAME] == 'analysis'
).all()
chunk_drift[f'{column}_threshold'] = ALERT_THRESHOLD_P_VALUE
chunk_drifts.append(chunk_drift)
res = pd.DataFrame.from_records(chunk_drifts)
res = res.reset_index(drop=True)
res.attrs['nml_drift_calculator'] = __name__
if self.chunker is None:
raise CalculatorNotFittedException(
'chunker has not been set. '
'Please ensure you run ``calculator.fit()`` '
'before running ``calculator.calculate()``'
)
return UnivariateDriftResult(analysis_data=chunks, drift_data=res, model_metadata=self.model_metadata)
|
the-stack_0_7663 | import discord
from discord.ext import commands
class ServerUtils(commands.Cog):
def __init__(self, bot):
self.bot = bot
async def message_from_link(self, link):
"""Returns a Discord message given a link to the message."""
split_link = link.split("/")
channel = self.bot.get_channel(int(split_link[-2]))
message = await self.bot.get_message(channel, int(split_link[-1]))
return message
async def add_star(self, message):
"""Adds an image to #night-sky for posterity."""
author = message.author
channel = self.bot.get_channel(483357571756064782)
description = message.clean_content
if len(message.clean_content) == 0 and len(message.embeds) > 0 and "description" in message.embeds[0]:
description += message.embeds[0]["description"]
embed = discord.Embed(
description = description,
timestamp = message.created_at,
colour = author.colour
)
if len(message.embeds) > 0 and message.embeds[0].type == "image":
print(message.embeds[0].url)
embed.set_image(url=message.embeds[0].url)
elif len(message.attachments) > 0:
embed.set_image(url=message.attachments[0].url)
url = "https://discordapp.com/channels/{0}/{1}/{2}".format(
str(message.guild.id),
str(message.channel.id),
str(message.id)
)
embed.set_author(name=author.name + " in #" + message.channel.name, url=url, icon_url=author.avatar_url)
await channel.send(embed = embed)
@commands.Cog.listener()
async def on_raw_reaction_add(self, payload):
"""Detects #night-sky add requests."""
if payload.emoji.name == "⭐":
message = await self.bot.get_channel(payload.channel_id).fetch_message(payload.message_id)
await self.add_star(message)
@commands.command()
async def star(self, message : message_from_link):
"""Manually adds to #night-sky given link."""
await self.add_star(message)
def setup(bot):
bot.add_cog(ServerUtils(bot))
|
the-stack_0_7666 | import numpy as np
from .Composition import Composition
from morpheus.utils import debug_print
VERBOSITY = 1
class SequentialComposition(Composition):
def __init__(self):
super().__init__()
self.all_desc_ids = np.array([])
return
def predict(self, X, **kwargs):
n_rows, n_atts = X.shape
s_pred = np.zeros((n_rows, self.n_outputs_))
D = np.empty((n_rows, len(self.all_desc_ids))) # D is extended input matrix
idx_map = self._map_elements_idx(
self.desc_ids, self.all_desc_ids, return_array=True
)
X_idx, D_idx = idx_map[:, 0], idx_map[:, 1]
D[:, D_idx] = X[:, X_idx] # We fill up some entries of the D-matrix.
for e in self.estimators_:
idx_map = self._map_elements_idx(
e.desc_ids, self.all_desc_ids, return_array=True
)
d_idx = idx_map[:, 1]
e_outcome = self._predict_estimator_tidy(e, D[:, d_idx], **kwargs)
msg = """
e_outcome.shape: {}
""".format(
e_outcome.shape
)
debug_print(msg, V=VERBOSITY)
c_idx_map = self._map_elements_idx(
e.targ_ids, self.all_desc_ids, return_array=True
) # Map of connections
# If I predict one of the connections
if c_idx_map.size > 0:
c_idx_e, c_idx_s = c_idx_map[:, 0], c_idx_map[:, 1]
D[:, c_idx_s] = e_outcome[:, c_idx_e]
t_idx_map = self._map_elements_idx(
e.targ_ids, self.targ_ids, return_array=True
) # Map of targets
# If I predict one of the targets
if t_idx_map.size > 0:
msg = """
t_idx_map: {}
""".format(
t_idx_map
)
debug_print(msg, V=VERBOSITY)
t_idx_e, t_idx_s = t_idx_map[:, 0], t_idx_map[:, 1]
s_pred[:, t_idx_s] = e_outcome[:, t_idx_e]
if s_pred.shape[1] == 1:
return s_pred.ravel()
else:
return s_pred
def predict_numeric(self, X, **kwargs):
n_rows, n_atts = X.shape
s_numeric = np.zeros((n_rows, len(self.numeric_targ_ids)))
s_weights = [
t_weight
for t_idx, t_weight in enumerate(self.targ_weights)
if self.targ_types[t_idx] == "numeric"
]
D = np.empty((n_rows, len(self.all_desc_ids))) # D is extended input matrix
idx_map = self._map_elements_idx(
self.desc_ids, self.all_desc_ids, return_array=True
)
X_idx, D_idx = idx_map[:, 0], idx_map[:, 1]
D[:, D_idx] = X[:, X_idx] # We fill up some entries of the D-matrix.
for e in self.estimators_:
idx_map = self._map_elements_idx(
e.desc_ids, self.all_desc_ids, return_array=True
)
d_idx = idx_map[:, 1]
c_idx_map = self._map_elements_idx(
e.targ_ids, self.all_desc_ids, return_array=True
) # Map of connections
# If I predict one of the connections
if c_idx_map.size > 0:
e_outcome = self._predict_estimator_tidy(e, D[:, d_idx], **kwargs)
c_idx_e, c_idx_s = c_idx_map[:, 0], c_idx_map[:, 1]
D[:, c_idx_s] = e_outcome[:, c_idx_e]
t_idx_map = self._map_elements_idx(
e.targ_ids, self.numeric_targ_ids, return_array=True
) # Map of targets
# If I predict one of the targets
if t_idx_map.size > 0:
e_numeric = self._predict_numeric_estimator_tidy(
e, D[:, d_idx], **kwargs
)
s_numeric = self._add_numeric_estimator_outcomes(
e, e_numeric, s_numeric
)
# Normalize
s_numeric /= s_weights
if s_numeric.shape[1] == 1:
return s_numeric.ravel()
else:
return s_numeric
def predict_nominal(self, X, **kwargs):
n_rows, n_atts = X.shape
s_nominal = [np.zeros((n_rows, n_clas)) for n_clas in self.n_classes_]
s_weights = [
t_weight
for t_idx, t_weight in enumerate(self.targ_weights)
if self.targ_types[t_idx] == "nominal"
]
D = np.empty((n_rows, len(self.all_desc_ids))) # D is extended input matrix
idx_map = self._map_elements_idx(
self.desc_ids, self.all_desc_ids, return_array=True
)
X_idx, D_idx = idx_map[:, 0], idx_map[:, 1]
D[:, D_idx] = X[:, X_idx] # We fill up some entries of the D-matrix.
for e in self.estimators_:
idx_map = self._map_elements_idx(
e.desc_ids, self.all_desc_ids, return_array=True
)
d_idx = idx_map[:, 1]
c_idx_map = self._map_elements_idx(
e.targ_ids, self.all_desc_ids, return_array=True
) # Map of connections
# If I predict one of the connections
if c_idx_map.size > 0:
e_outcome = self._predict_estimator_tidy(e, D[:, d_idx], **kwargs)
c_idx_e, c_idx_s = c_idx_map[:, 0], c_idx_map[:, 1]
D[:, c_idx_s] = e_outcome[:, c_idx_e]
t_idx_map = self._map_elements_idx(
e.targ_ids, self.nominal_targ_ids, return_array=True
) # Map of targets
# If I predict one of the targets
if t_idx_map.size > 0:
e_nominal = self._predict_nominal_estimator_tidy(
e, D[:, d_idx], **kwargs
)
s_nominal = self._add_nominal_estimator_outcomes(
e, e_nominal, s_nominal
)
# Normalize
s_nominal = [
s_nominal[t_idx] / s_weights[t_idx]
for t_idx in range(len(self.nominal_targ_ids))
]
# redo sklearn convention from hell
if len(s_nominal) == 1:
return s_nominal[0]
else:
return s_nominal
# Add (i.e., incremental update)
def _add_estimator(self, e, location="out"):
def check_connection(model_a, model_b):
connecting_attributes = np.intersect1d(model_a.targ_ids, model_b.desc_ids)
msg = """
Connecting attributes: {}
""".format(
connecting_attributes
)
debug_print(msg, V=VERBOSITY)
return connecting_attributes.size > 0
if len(self.estimators_) == 0:
# No estimator yet, everything is OK.
self.estimators_.insert(0, e)
elif location in {"out", "output", "append", "back", "end"}:
msg = """
Trying to add a model to end of the chain.
Current chain targ_ids: {}
New estimator desc_ids: {}
""".format(
self.targ_ids, e.desc_ids
)
debug_print(msg, V=VERBOSITY)
if check_connection(self, e):
self.estimators_.append(e)
else:
msg = """
Failed to connect the new estimator to the existing chain.
Current chain has target attributes: {}
New estimator has descriptive attributes: {}
Since you decided to add this estimator to the end of the
current chain, there should be an overlap between the two
in order to connect them. This is not the case.
""".format(
self.targ_ids, e.desc_ids
)
raise ValueError(msg)
elif location in {"in", "input", "prepend", "front", "begin"}:
if check_connection(e, self):
self.estimators_.insert(0, e)
else:
msg = """
Failed to connect the new estimator to the existing chain.
New estimator has target attributes: {}
Current chain has descriptive attributes: {}
Since you decided to add this estimator to the beginning of the
current chain, there should be an overlap between the two
in order to connect them. This is not the case.
""".format(
e.desc_ids, self.targ_ids
)
raise ValueError(msg)
else:
msg = """
An estimator can only be added to a sequential composition if at
least one of its input attributes is an output attribute of the
current sequential composition so far.
Input attributes new estimator: {}
Output attributes current sequential composition: {}
""".format(
e.desc_ids, self.targ_ids
)
raise ValueError(msg)
return
def _add_ids_estimator(self, e):
conn_ids = np.intersect1d(self.targ_ids, e.desc_ids)
self.all_desc_ids = np.unique(np.concatenate((self.all_desc_ids, e.desc_ids)))
self.desc_ids = np.unique(np.concatenate((self.desc_ids, e.desc_ids)))
self.targ_ids = np.unique(np.concatenate((self.targ_ids, e.targ_ids)))
# Remove the connection ids
self.desc_ids = self.desc_ids[~np.in1d(self.desc_ids, conn_ids)]
self.targ_ids = self.targ_ids[~np.in1d(self.targ_ids, conn_ids)]
return
|
the-stack_0_7667 | #!/usr/bin/python
# *****************************************************************************
#
# Copyright (c) 2016, EPAM SYSTEMS INC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ******************************************************************************
import logging
import json
import sys
from dlab.fab import *
from dlab.meta_lib import *
from dlab.actions_lib import *
import os
import uuid
from dlab.meta_lib import *
from dlab.actions_lib import *
import boto3
import argparse
import sys
def stop_notebook(nb_tag_value, bucket_name, tag_name, ssh_user, key_path):
print('Terminating EMR cluster and cleaning EMR config from S3 bucket')
try:
clusters_list = get_emr_list(nb_tag_value, 'Value')
if clusters_list:
for cluster_id in clusters_list:
client = boto3.client('emr')
cluster = client.describe_cluster(ClusterId=cluster_id)
cluster = cluster.get("Cluster")
emr_name = cluster.get('Name')
emr_version = cluster.get('ReleaseLabel')
s3_cleanup(bucket_name, emr_name, os.environ['edge_user_name'])
print("The bucket {} has been cleaned successfully".format(bucket_name))
terminate_emr(cluster_id)
print("The EMR cluster {} has been terminated successfully".format(emr_name))
remove_kernels(emr_name, tag_name, nb_tag_value, ssh_user, key_path, emr_version)
print("{} kernels have been removed from notebook successfully".format(emr_name))
else:
print("There are no EMR clusters to terminate.")
except:
sys.exit(1)
print("Stopping data engine cluster")
try:
cluster_list = []
master_ids = []
cluster_instances_list = get_ec2_list('dataengine_notebook_name', nb_tag_value)
for instance in cluster_instances_list:
for tag in instance.tags:
if tag['Key'] == 'Type' and tag['Value'] == 'master':
master_ids.append(instance.id)
for id in master_ids:
for tag in get_instance_attr(id, 'tags'):
if tag['Key'] == 'Name':
cluster_list.append(tag['Value'].replace(' ', '')[:-2])
stop_ec2('dataengine_notebook_name', nb_tag_value)
except:
sys.exit(1)
print("Stopping notebook")
try:
stop_ec2(tag_name, nb_tag_value)
except:
sys.exit(1)
if __name__ == "__main__":
local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['edge_user_name'],
os.environ['request_id'])
local_log_filepath = "/logs/" + os.environ['conf_resource'] + "/" + local_log_filename
logging.basicConfig(format='%(levelname)-8s [%(asctime)s] %(message)s',
level=logging.DEBUG,
filename=local_log_filepath)
# generating variables dictionary
create_aws_config_files()
print('Generating infrastructure names and tags')
notebook_config = dict()
notebook_config['service_base_name'] = os.environ['conf_service_base_name']
notebook_config['notebook_name'] = os.environ['notebook_instance_name']
notebook_config['bucket_name'] = (notebook_config['service_base_name'] + '-ssn-bucket').lower().replace('_', '-')
notebook_config['tag_name'] = notebook_config['service_base_name'] + '-Tag'
notebook_config['key_path'] = os.environ['conf_key_dir'] + '/' + os.environ['conf_key_name'] + '.pem'
logging.info('[STOP NOTEBOOK]')
print('[STOP NOTEBOOK]')
try:
stop_notebook(notebook_config['notebook_name'], notebook_config['bucket_name'], notebook_config['tag_name'],
os.environ['conf_os_user'], notebook_config['key_path'])
except Exception as err:
append_result("Failed to stop notebook.", str(err))
sys.exit(1)
try:
with open("/root/result.json", 'w') as result:
res = {"notebook_name": notebook_config['notebook_name'],
"Tag_name": notebook_config['tag_name'],
"user_own_bucket_name": notebook_config['bucket_name'],
"Action": "Stop notebook server"}
print(json.dumps(res))
result.write(json.dumps(res))
except:
print("Failed writing results.")
sys.exit(0)
|
the-stack_0_7668 | # coding=utf-8
# !/usr/bin/env python
"""
:mod:"IKA_RET_Control_Visc" -- API for IKA RET Control Visc remote controllable hotplate stirrer
===================================
.. module:: IKA_RET_Control_Visc
:platform: Windows
:synopsis: Control IKA RET Control Visc hotplate stirrer.
.. moduleauthor:: Sebastian Steiner <[email protected]>
.. moduleauthor:: Stefan Glatzel <[email protected]>
(c) 2017 The Cronin Group, University of Glasgow
This provides a python class for the IKA RET Control Visc Hotplates
based on software developed by Stefan Glatzel.
The command implementation is based on the english manual:
English manual version: 20000004159, RET control-visc_112015
Pages 31 - 34, the german version, same file pages 15 - 18 appears
to contain more and better information.
For style guide used see http://xkcd.com/1513/
"""
# system imports
import re
import serial
import os
import inspect
import sys
HERE = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
sys.path.append(os.path.join(HERE, '..'))
# additional module imports
from SerialDevice.serial_labware import SerialDevice, command
class IKARETControlVisc(SerialDevice):
"""
This provides a python class for the IKA RET Control Visc Hotplates
The command implementation is based on the english manual:
English manual version: 20000004159, RET control-visc_112015, Pages 31 - 34,
the german version, same file pages 15 - 18 appears to contain more and better information.
"""
def __init__(self, port=None, device_name=None, connect_on_instantiation=False, soft_fail_for_testing=False):
"""
Initializer of the IKARETControlVisc class.
Args:
port (str): The port name/number of the hotplate
device_name (str): A descriptive name for the device, used mainly in debug prints.
connect_on_instantiation (bool): (optional) determines if the connection is established on instantiation of
the class. Default: Off
soft_fail_for_testing (bool): (optional) determines if an invalid serial port raises an error or merely logs
a message. Default: Off
"""
super().__init__(port, device_name, soft_fail_for_testing)
# serial settings
self.baudrate = 9600
self.bytesize = serial.SEVENBITS
self.parity = serial.PARITY_EVEN
self.rtscts = True
self.write_delay = 0.1
self.read_delay = 0.1
# answer patterns
self.stranswer = re.compile("([0-9A-Z_]+)\r\n")
self.valueanswer = re.compile("(\d+\.\d+) (\d)\r\n")
self.wdanswer = re.compile("(\d+\.\d+)\r\n")
# other settings
self.IKA_default_name = "IKARET"
# DOCUMENTED COMMANDS for easier maintenance
self.GET_STIR_RATE_PV = "IN_PV_4"
self.GET_STIR_RATE_SP = "IN_SP_4"
self.SET_STIR_RATE_SP = "OUT_SP_4"
self.GET_TEMP_PV = "IN_PV_1"
self.GET_TEMP_SP = "IN_SP_1"
self.SET_TEMP_SP = "OUT_SP_1"
self.START_TEMP = "START_1"
self.STOP_TEMP = "STOP_1"
self.START_STIR = "START_4"
self.STOP_STIR = "STOP_4"
self.START_PH = "START_80"
self.STOP_PH = "STOP_80"
self.START_WEIGHING = "START_90"
self.STOP_WEIGHING = "STOP_90"
self.RESET = "RESET"
self.GET_NAME = "IN_NAME"
self.SET_NAME = "OUT_NAME"
self.GET_SOFTWARE_VERSION = "IN_SOFTWARE"
self.GET_MEDIUM_TEMPERATURE_SP = "IN_SP_7"
self.GET_HOT_PLATE_TEMPERATURE_PV = "IN_PV_2"
self.GET_HOT_PLATE_TEMPERATURE_SP = "IN_SP_2"
self.SET_HOT_PLATE_TEMPERATURE_SP = "OUT_SP_2"
self.GET_HOT_PLATE_SAFETY_TEMPERATURE_PV = "IN_PV_3"
self.GET_HOT_PLATE_SAFETY_TEMPERATURE_SP = "IN_SP_3"
self.GET_PH_PV = "IN_PV_80"
self.GET_WEIGHT_PV = "IN_PV_90"
self.launch_command_handler()
if connect_on_instantiation:
self.open_connection()
@property
@command
def stir_rate_pv(self):
"""
Reads the process variable (i.e. the current) stir rate
:return: call back to send_message with a request to return and check a value
"""
return self.send_message(self.GET_STIR_RATE_PV, True, self.valueanswer)
@property
@command
def stir_rate_sp(self):
"""
Reads the set point (target) for the stir rate
:return: call back to send_message with a request to return and check a value
"""
return self.send_message(self.GET_STIR_RATE_SP, True, self.valueanswer)
@stir_rate_sp.setter
@command
def stir_rate_sp(self, stir_rate=None):
"""
Sets the stirrer rate and return the set point from the hot plate so the user can verify that it was successful.
Args:
stir_rate (int): the target stir rate of the hot plate
Returns:
call back to get_stirrer_rate_set_point()
"""
try:
# type checking of the stir rate that the user provided
stir_rate = int(stir_rate)
except ValueError:
raise(ValueError("Error setting stir rate. Rate was not a valid integer \"{0}\"".format(stir_rate)))
self.logger.debug("Setting stir rate to {0} RPM...".format(stir_rate))
# actually sending the command
self.send_message("{0} {1}".format(self.SET_STIR_RATE_SP, stir_rate))
@property
@command
def temperature_pv(self):
# reading the process variable
return self.send_message(self.GET_TEMP_PV, True, self.valueanswer)
@property
@command
def temperature_sp(self):
return self.send_message(self.GET_TEMP_SP, True, self.valueanswer)
@temperature_sp.setter
@command
def temperature_sp(self, temperature=None):
"""
Sets the target temperature for sensor 1 (i.e. "medium temperature (external temperature sensor)"
Args:
temperature (float): the target temperature
"""
try:
temperature = float(temperature)
except ValueError:
raise(ValueError("Error setting temperature. Value was not a valid float \"{0}\"".format(temperature)))
self.logger.debug("Setting temperature setpoint to {0}°C...".format(temperature))
# actually sending the command
self.send_message("{0} {1}".format(self.SET_TEMP_SP, temperature))
@command
def start_heater(self):
self.logger.debug("Starting heater...")
return self.send_message(self.START_TEMP)
@command
def stop_heater(self):
self.logger.debug("Stopping heater...")
return self.send_message(self.STOP_TEMP)
@command
def start_stirrer(self):
self.logger.debug("Starting stirrer...")
return self.send_message(self.START_STIR)
@command
def stop_stirrer(self):
self.logger.debug("Stopping heater...")
return self.send_message(self.STOP_STIR)
@command
def start_ph_meter(self):
return self.send_message(self.START_PH)
@command
def stop_ph_meter(self):
return self.send_message(self.STOP_PH)
@command
def start_weighing(self):
return self.send_message(self.START_WEIGHING)
@command
def stop_weighing(self):
return self.send_message(self.STOP_WEIGHING)
@command
def reset_hot_plate(self):
return self.send_message(self.RESET)
@property
@command
def name(self):
"""
Returns the name of the hot plate
:return: call back to send_message with a request to return the name
"""
return self.send_message(self.GET_NAME, True)
@name.setter
@command
def name(self, name=None):
"""
Sets the name of the hotplate to "name". Resets to default (self.IKA_default_name) if no name is passed.
Warns that names longer than 6 characters get truncated upon restart of the hotplate.
Args:
name (str): the new name
"""
if name is None:
name = self.IKA_default_name
if len(name) > 6:
self.logger.debug("Warning name will be shortened to \"{}\" by the hot plate, after restart.".format(name[0:6]))
self.send_message("{0} {1}".format(self.SET_NAME, name))
@property
@command
def software_version(self):
"""
Returns the software version of the firmware
!!!WARNING!!! Despite being documented this does not seem to work as intended, it just returns an empty string
:return: (supposed to...) software version of the firmware
"""
return self.send_message(self.GET_SOFTWARE_VERSION, True)
@command
def set_watch_dog_temp(self):
# TODO handle echo!
pass
@command
def set_watch_dog_stir_rate(self):
# TODO handle echo!
pass
@command
def get_hot_plate_temp_current(self):
pass
@property
@command
def temperature_heat_transfer_medium_sp(self):
return self.send_message(self.GET_MEDIUM_TEMPERATURE_SP, True, self.valueanswer)
@property
@command
def temperature_hot_plate_pv(self):
return self.send_message(self.GET_HOT_PLATE_TEMPERATURE_PV, True, self.valueanswer)
@property
@command
def temperature_hot_plate_sp(self):
return self.send_message(self.GET_HOT_PLATE_TEMPERATURE_SP, True, self.valueanswer)
@temperature_hot_plate_sp.setter
@command
def temperature_hot_plate_sp(self, temperature):
"""
Sets the target temperature for sensor 2 (i.e. "hot plate temperature"
Args:
temperature (float): the target temperature
"""
try:
temperature = float(temperature)
except ValueError:
raise(ValueError("Error setting hot plate temperature. "
"Value was not a valid float \"{0}\"".format(temperature)
))
self.send_message("{0} {1}".format(self.SET_HOT_PLATE_TEMPERATURE_SP, temperature))
@property
@command
def temperature_hot_plate_safety_pv(self):
"""
This is a documented function and does return values, but I cannot figure out what it's supposed to be...
:return: excellent question...
"""
self.logger.debug("WARNING! Don't use temperature_hot_plate_safety_pv! (see docstring)")
return self.send_message(self.GET_HOT_PLATE_SAFETY_TEMPERATURE_PV, True, self.valueanswer)
@property
@command
def temperature_hot_plate_safety_sp(self):
"""
This returns the current safety temperature set point. There is no equivalent setter function (for obvious
safety reasons, it actually does not exist in the firmware)
:return: The current setting of the hot plate safety temperature
"""
return self.send_message(self.GET_HOT_PLATE_SAFETY_TEMPERATURE_SP, True, self.valueanswer)
@command
def get_viscosity_trend(self):
pass
@command
def get_ph(self):
return self.send_message(self.GET_PH_PV, True, self.valueanswer)
@command
def get_weight(self):
# only works with start weight, takes about 4 sec to calibrate
return self.send_message(self.GET_WEIGHT_PV, True, self.valueanswer)
if __name__ == '__main__':
hp = IKARETControlVisc(port="COM5", connect_on_instantiation=True)
hp.temperature_sp = 40 # setting temperature to 100 °C
print("temperature_pv {}".format(hp.temperature_pv))
hp.start_heater() # starting the heater
hp.stop_heater() # stopping heater
print("temperature_hot_plate_safety_pv {}".format(hp.temperature_hot_plate_pv))
print("temperature_hot_plate_safety_sp {}".format(hp.temperature_hot_plate_sp))
print("temperature_hot_plate_safety_pv {}".format(hp.temperature_hot_plate_safety_pv))
print("temperature_hot_plate_safety_sp {}".format(hp.temperature_hot_plate_safety_sp))
print("software_version {}".format(hp.software_version))
while True:
pass
|
the-stack_0_7670 | # coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for google_research.google_research.cold_posterior_bnn.core.statistics."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import math
from absl import logging
from absl.testing import parameterized
import numpy as np
import tensorflow.compat.v1 as tf
import tensorflow_probability as tfp
from cold_posterior_bnn.core import statistics as stats
tfd = tfp.distributions
TOL = 1e-7
class StatisticsTest(parameterized.TestCase, tf.test.TestCase):
def test_classification_prob(self):
cprob = stats.ClassificationLogProb()
logits1 = tf.math.log([[0.3, 0.7], [0.6, 0.4]])
logits2 = tf.math.log([[0.2, 0.8], [0.5, 0.5]])
logits3 = tf.math.log([[0.4, 0.6], [0.4, 0.6]])
cprob.reset()
cprob.update(logits1)
cprob.update(logits2)
cprob.update(logits3)
log_prob = cprob.result()
self.assertAlmostEqual(math.log(0.3), float(log_prob[0, 0]), delta=TOL)
self.assertAlmostEqual(math.log(0.7), float(log_prob[0, 1]), delta=TOL)
self.assertAlmostEqual(math.log(0.5), float(log_prob[1, 0]), delta=TOL)
self.assertAlmostEqual(math.log(0.5), float(log_prob[1, 1]), delta=TOL)
def test_brier_score(self):
logits1 = tf.math.log([[0.3, 0.7], [0.3, 0.7]])
logits2 = tf.math.log([[0.2, 0.8], [0.6, 0.4]])
logits3 = tf.math.log([[0.4, 0.6], [0.4, 0.6]])
labels = tf.convert_to_tensor([0, 1], dtype=tf.int32)
brier = stats.BrierScore()
brier.reset()
brier.update(logits1, labels)
brier.update(logits2, labels)
brier.update(logits3, labels)
brier_score = brier.result()
brier_score_true_0 = 0.3*0.3 + 0.7*0.7 - 2.0*0.3
brier_score_true_1 = (1.3/3.0)**2.0 + (1.7/3.0)**2.0 - 2.0*(1.7/3.0)
self.assertAlmostEqual(float(brier_score[0]), brier_score_true_0, delta=TOL)
self.assertAlmostEqual(float(brier_score[1]), brier_score_true_1, delta=TOL)
def _generate_perfect_calibration_logits(self, nsamples, nclasses,
inv_temp=2.0):
"""Generate well distributed and well calibrated probabilities.
Args:
nsamples: int, >= 1, number of samples to generate.
nclasses: int, >= 2, number of classes.
inv_temp: float, >= 0.0, inverse temperature parameter.
Returns:
logits: Tensor, shape (nsamples, nclasses), tf.float32, unnormalized log
probabilities (logits) of the probabilistic predictions.
labels: Tensor, shape (nsamples,), tf.int32, the true class labels. Each
element is in the range 0,..,nclasses-1.
"""
logits = inv_temp*tf.random.normal((nsamples, nclasses))
logits = tf.math.log_softmax(logits)
py = tfp.distributions.Categorical(logits=logits)
labels = py.sample()
return logits, labels
def _generate_random_calibration_logits(self, nsamples, nclasses):
"""Generate well distributed and poorly calibrated probabilities.
Args:
nsamples: int, >= 1, number of samples to generate.
nclasses: int, >= 2, number of classes.
Returns:
logits: Tensor, shape (nsamples, nclasses), tf.float32, unnormalized log
probabilities (logits) of the probabilistic predictions.
labels: Tensor, shape (nsamples,), tf.int32, the true class labels. Each
element is in the range 0,..,nclasses-1.
"""
logits = 2.0*tf.random.normal((nsamples, nclasses))
logits = tf.math.log_softmax(logits)
py = tfp.distributions.Categorical(logits=logits)
labels = py.sample()
logits_other = 2.0*tf.random.normal((nsamples, nclasses))
logits_other = tf.math.log_softmax(logits_other)
return logits_other, labels
@parameterized.parameters(
(5, 3, 50000), (10, 5, 50000)
)
def test_ece_calibrated(self, num_bins, nclasses, nsamples):
logits, labels = self._generate_perfect_calibration_logits(
nsamples, nclasses)
ece_stat = stats.ECE(num_bins)
ece_stat.reset()
ece_stat.update(logits, labels)
ece = float(ece_stat.result())
ece_tolerance = 0.01
self.assertLess(ece, ece_tolerance, msg="ECE %.5f > %.2f for perfectly "
"calibrated logits" % (ece, ece_tolerance))
@parameterized.parameters(
(True, 3, 50000), (True, 5, 50000), (True, 10, 50000),
(False, 3, 50000), (False, 5, 50000), (False, 10, 50000),
)
def test_brier_decomposition(self, well_calib, nclasses, nsamples):
"""Recompose the Brier decomposition and compare it to the Brier score."""
if well_calib:
logits, labels = self._generate_perfect_calibration_logits(
nsamples, nclasses, inv_temp=0.25)
else:
logits, labels = self._generate_random_calibration_logits(
nsamples, nclasses)
score = stats.BrierScore()
uncert = stats.BrierUncertainty()
resol = stats.BrierResolution()
reliab = stats.BrierReliability()
for stat in [score, uncert, resol, reliab]:
stat.reset()
stat.update(logits, labels)
score = float(tf.reduce_mean(score.result()))
uncert = float(uncert.result())
resol = float(resol.result())
reliab = float(reliab.result())
self.assertGreaterEqual(resol, 0.0, "Brier resolution is negative, this "
"should not happen.")
self.assertGreaterEqual(reliab, 0.0, "Brier reliability is negative, this "
"should not happen.")
score_from_decomposition = uncert - resol + reliab
if well_calib:
calib_str = "calibrated"
else:
calib_str = "uncalibrated"
logging.info("Brier decomposition (%s) (n=%d, K=%d), "
"%.5f = %.5f - %.5f + %.5f (%.5f, diff %.5f)",
calib_str, nsamples, nclasses, score, uncert, resol, reliab,
score_from_decomposition, score - score_from_decomposition)
self.assertAlmostEqual(score, score_from_decomposition, delta=0.025,
msg="Brier decomposition sums to %.5f which "
"deviates from Brier score %.5f" % (
score_from_decomposition, score))
@parameterized.parameters(
(3, 50000), (5, 50000)
)
def test_brierreliab_poorly_calibrated(self, nclasses, nsamples):
logits, labels = self._generate_random_calibration_logits(
nsamples, nclasses)
brierreliab_stat = stats.BrierReliability()
brierreliab_stat.reset()
brierreliab_stat.update(logits, labels)
reliab = float(brierreliab_stat.result())
reliab_lower = 0.2
self.assertGreater(reliab, reliab_lower,
msg="Brier reliability %.5f < %.2f for random "
"logits" % (reliab, reliab_lower))
@parameterized.parameters(
(3, 50000), (5, 50000)
)
def test_brierreliab_calibrated(self, nclasses, nsamples):
logits, labels = self._generate_perfect_calibration_logits(
nsamples, nclasses)
brierreliab_stat = stats.BrierReliability()
brierreliab_stat.reset()
brierreliab_stat.update(logits, labels)
reliab = float(brierreliab_stat.result())
reliab_tolerance = 0.1
self.assertLess(reliab, reliab_tolerance,
msg="Brier reliability %.5f > %.2f for perfectly "
"calibrated logits" % (reliab, reliab_tolerance))
@parameterized.parameters(
(5, 3, 50000), (10, 5, 50000)
)
def test_ece_poorly_calibrated(self, num_bins, nclasses, nsamples):
logits, labels = self._generate_random_calibration_logits(
nsamples, nclasses)
ece_stat = stats.ECE(num_bins)
ece_stat.reset()
ece_stat.update(logits, labels)
ece = float(ece_stat.result())
ece_lower = 0.2
self.assertGreater(ece, ece_lower, msg="ECE %.5f < %.2f for random "
"logits" % (ece, ece_lower))
def test_standarddeviation(self):
logits = tf.math.log([[0.3, 0.7], [0.3, 0.7]])
labels = tf.convert_to_tensor([0, 1], dtype=tf.int32)
caccuracy = stats.Accuracy()
caccuracy.reset()
caccuracy.update(logits, labels)
accuracy = caccuracy.result()
self.assertEqual(0.0, float(accuracy[0]))
self.assertEqual(1.0, float(accuracy[1]))
accstddev = stats.StandardDeviation(stats.Accuracy())
accstddev.reset()
accstddev.update(logits, labels)
stddev = accstddev.result()
self.assertAlmostEqual(0.5*math.sqrt(2.0), float(stddev), delta=TOL)
def test_standarderror(self):
logits = tf.math.log([[0.3, 0.7], [0.3, 0.7]])
labels = tf.convert_to_tensor([0, 1], dtype=tf.int32)
accsem = stats.StandardError(stats.Accuracy())
accsem.reset()
accsem.update(logits, labels)
sem = accsem.result()
self.assertAlmostEqual(0.5, float(sem), delta=TOL)
def test_classification_accuracy(self):
logits1 = tf.math.log([[0.3, 0.7], [0.3, 0.7]])
logits2 = tf.math.log([[0.2, 0.8], [0.6, 0.4]])
logits3 = tf.math.log([[0.4, 0.6], [0.4, 0.6]])
labels = tf.convert_to_tensor([0, 1], dtype=tf.int32)
caccuracy = stats.Accuracy()
caccuracy.reset()
caccuracy.update(logits1, labels)
caccuracy.update(logits2, labels)
caccuracy.update(logits3, labels)
accuracy = caccuracy.result()
self.assertEqual(0.0, float(accuracy[0]))
self.assertEqual(1.0, float(accuracy[1]))
gaccuracy = stats.GibbsAccuracy()
gaccuracy.reset()
gaccuracy.update(logits1, labels)
gaccuracy.update(logits2, labels)
gaccuracy.update(logits3, labels)
accuracy = gaccuracy.result()
self.assertEqual(0.0, float(accuracy[0]))
self.assertAlmostEqual(0.666666667, float(accuracy[1]), delta=TOL)
def test_classification_ce(self):
cce = stats.ClassificationCrossEntropy()
logits1 = tf.math.log([[0.3, 0.7], [0.6, 0.4]])
logits2 = tf.math.log([[0.2, 0.8], [0.5, 0.5]])
logits3 = tf.math.log([[0.4, 0.6], [0.4, 0.6]])
labels = tf.convert_to_tensor([1, 0], dtype=tf.int32)
cce.reset()
cce.update(logits1, labels)
cce.update(logits2, labels)
cce.update(logits3, labels)
ce = cce.result()
self.assertAlmostEqual(-math.log(0.7), float(ce[0]), delta=TOL)
self.assertAlmostEqual(-math.log(0.5), float(ce[1]), delta=TOL)
ces = []
gce = stats.ClassificationGibbsCrossEntropy()
gce.reset()
for logits in [logits1, logits2, logits3]:
cce.reset()
cce.update(logits, labels)
ces.append(cce.result())
gce.update(logits, labels)
self.assertAllClose(
tf.reduce_mean(tf.stack(ces, axis=0), axis=0),
gce.result(),
atol=TOL,
msg="Gibbs cross entropy does not match mean CE.")
REGRESSION_MODEL_OUTPUT_TYPES = ["tensors", "dists"]
def NewRegressionModelOutputs(tensor_model_outputs, model_output_type="tensors",
outputs_with_log_stddevs=False, stddev=1.0):
model_outputs = None
if model_output_type == "tensors":
model_outputs = tensor_model_outputs
elif model_output_type == "dists":
if outputs_with_log_stddevs:
n_targets = tensor_model_outputs.shape[-1] // 2
model_outputs = tfd.Normal(tensor_model_outputs[:, :, :n_targets],
tf.exp(tensor_model_outputs[:, :, n_targets:]))
else:
model_outputs = tfd.Normal(tensor_model_outputs, stddev)
else:
raise Exception("Unknown model_output_type: {}".format(model_output_type))
return model_outputs
class RegressionOutputsTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.parameters(itertools.product(REGRESSION_MODEL_OUTPUT_TYPES))
def test_regression_outputs_only_means_1d(self, model_output_type):
tensor_model_outputs = tf.constant([
[[0.3], [0.6]], # Member 0, Example 0 and 1
[[0.2], [0.5]], # Member 1, Example 0 and 1
[[0.4], [0.4]], # Member 2, Example 0 and 1
])
model_outputs = NewRegressionModelOutputs(tensor_model_outputs,
model_output_type)
ens_reg_outputs = stats.RegressionOutputs()
ens_reg_outputs.update(model_outputs[0])
ens_reg_outputs.update(model_outputs[1])
ens_reg_outputs.update(model_outputs[2])
means, variances = ens_reg_outputs.result()
self.assertAlmostEqual(0.3, float(means[0][0]), delta=TOL)
self.assertAlmostEqual(0.5, float(means[1][0]), delta=TOL)
expected_variance = self._get_mixture_variance(
probs=[1 / 3, 1 / 3, 1 / 3],
means=[0.3, 0.2, 0.4],
stddevs=[1.0, 1.0, 1.0])
self.assertAlmostEqual(
float(expected_variance), float(variances[0][0]), delta=1e-5)
expected_variance = self._get_mixture_variance(
probs=[1 / 3, 1 / 3, 1 / 3],
means=[0.6, 0.5, 0.4],
stddevs=[1.0, 1.0, 1.0])
self.assertAlmostEqual(
float(expected_variance), float(variances[1][0]), delta=1e-5)
@parameterized.parameters(itertools.product(REGRESSION_MODEL_OUTPUT_TYPES))
def test_regression_outputs_only_means_2d_diff_stddev(self,
model_output_type):
tensor_model_outputs = tf.constant([
[[0.3, 0.4], [1.6, 0.6]], # Member 0, Example 0 and 1
[[0.2, 0.2], [0.8, 0.5]], # Member 1, Example 0 and 1
[[0.4, 0.6], [2.4, 0.4]], # Member 2, Example 0 and 1
])
model_outputs = NewRegressionModelOutputs(tensor_model_outputs,
model_output_type,
stddev=0.1)
ens_reg_outputs = stats.RegressionOutputs(stddev=0.1)
ens_reg_outputs.update(model_outputs[0])
ens_reg_outputs.update(model_outputs[1])
ens_reg_outputs.update(model_outputs[2])
means, variances = ens_reg_outputs.result()
self.assertAlmostEqual(0.3, float(means[0][0]), delta=TOL)
self.assertAlmostEqual(0.4, float(means[0][1]), delta=TOL)
self.assertAlmostEqual(1.6, float(means[1][0]), delta=TOL)
self.assertAlmostEqual(0.5, float(means[1][1]), delta=TOL)
# Expected mixture, does not have to use normal distributions
expected_variance = self._get_mixture_variance(
probs=[1 / 3, 1 / 3, 1 / 3],
means=[0.3, 0.2, 0.4],
stddevs=[0.1, 0.1, 0.1])
self.assertAlmostEqual(
float(expected_variance), float(variances[0][0]), delta=1e-5)
expected_variance = self._get_mixture_variance(
probs=[1 / 3, 1 / 3, 1 / 3],
means=[0.4, 0.2, 0.6],
stddevs=[0.1, 0.1, 0.1])
self.assertAlmostEqual(
float(expected_variance), float(variances[0][1]), delta=1e-5)
expected_variance = self._get_mixture_variance(
probs=[1 / 3, 1 / 3, 1 / 3],
means=[1.6, 0.8, 2.4],
stddevs=[0.1, 0.1, 0.1])
self.assertAlmostEqual(
float(expected_variance), float(variances[1][0]), delta=1e-5)
expected_variance = self._get_mixture_variance(
probs=[1 / 3, 1 / 3, 1 / 3],
means=[0.6, 0.5, 0.4],
stddevs=[0.1, 0.1, 0.1])
self.assertAlmostEqual(
float(expected_variance), float(variances[1][1]), delta=1e-5)
@parameterized.parameters(itertools.product(REGRESSION_MODEL_OUTPUT_TYPES))
def test_regression_outputs_means_and_variances_2d(self, model_output_type):
tensor_model_outputs = tf.constant([
[ # member 0 tensor_model_outputs
[0.3, 0.4, np.log(0.01), np.log(0.02)], # Example 0
[1.6, 0.6, np.log(2.0), np.log(0.01)], # Example 1
],
[ # member 1 tensor_model_outputs
[0.2, 0.2, np.log(0.1), np.log(0.2)], # Example 0
[0.8, 0.5, np.log(0.5), np.log(0.2)], # Example 1
],
[ # member 2 tensor_model_outputs
[0.4, 0.6, np.log(1.0), np.log(1.5)], # Example 0
[2.4, 0.4, np.log(0.05), np.log(0.1)], # Example 1
]
])
model_outputs = NewRegressionModelOutputs(tensor_model_outputs,
model_output_type,
outputs_with_log_stddevs=True)
ens_reg_outputs = stats.RegressionOutputs(outputs_with_log_stddevs=True)
ens_reg_outputs.update(model_outputs[0]) # Member 0 outputs
ens_reg_outputs.update(model_outputs[1]) # Member 1 outputs
ens_reg_outputs.update(model_outputs[2]) # Member 2 outputs
means, variances = ens_reg_outputs.result()
self.assertAlmostEqual(0.3, float(means[0][0]), delta=TOL)
self.assertAlmostEqual(0.4, float(means[0][1]), delta=TOL)
self.assertAlmostEqual(1.6, float(means[1][0]), delta=TOL)
self.assertAlmostEqual(0.5, float(means[1][1]), delta=TOL)
# Expected mixture, does not have to use normal distributions
expected_variance = self._get_mixture_variance(
probs=[1 / 3, 1 / 3, 1 / 3],
means=[0.3, 0.2, 0.4],
stddevs=[0.01, 0.1, 1.0])
self.assertAlmostEqual(
float(expected_variance), float(variances[0][0]), delta=1e-5)
expected_variance = self._get_mixture_variance(
probs=[1 / 3, 1 / 3, 1 / 3],
means=[0.4, 0.2, 0.6],
stddevs=[0.02, 0.2, 1.5])
self.assertAlmostEqual(
float(expected_variance), float(variances[0][1]), delta=1e-5)
expected_variance = self._get_mixture_variance(
probs=[1 / 3, 1 / 3, 1 / 3],
means=[1.6, 0.8, 2.4],
stddevs=[2.0, 0.5, 0.05])
self.assertAlmostEqual(
float(expected_variance), float(variances[1][0]), delta=1e-5)
expected_variance = self._get_mixture_variance(
probs=[1 / 3, 1 / 3, 1 / 3],
means=[0.6, 0.5, 0.4],
stddevs=[0.01, 0.2, 0.1])
self.assertAlmostEqual(
float(expected_variance), float(variances[1][1]), delta=1e-5)
@staticmethod
def _get_mixture_variance(probs, means, stddevs):
assert len(probs) == len(means) == len(stddevs)
n = len(probs)
components = []
for i in range(n):
components.append(tfd.Normal(loc=means[i], scale=stddevs[i]))
mixture = tfd.Mixture(
cat=tfd.Categorical(probs=probs), components=components)
variance = mixture.variance()
return variance
class RegressionNormalLogProbTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.parameters(itertools.product(REGRESSION_MODEL_OUTPUT_TYPES))
def test_regression_normal_log_prob_means_and_stddevs_2d(self,
model_output_type):
tensor_model_outputs = tf.constant([
[[0.3, 0.4, np.log(0.01), np.log(0.02)],
[1.6, 0.6, np.log(2.0), np.log(0.01)]],
[[0.2, 0.2, np.log(0.1), np.log(0.2)],
[0.8, 0.5, np.log(0.5), np.log(0.2)]],
[[0.4, 0.6, np.log(1.0), np.log(1.5)],
[2.4, 0.4, np.log(0.05), np.log(0.1)]],
])
labels = tf.constant([[0.2, 0.4], [1.4, 1.0]])
model_outputs = NewRegressionModelOutputs(tensor_model_outputs,
model_output_type,
outputs_with_log_stddevs=True)
ens_reg_outputs = stats.RegressionOutputs(outputs_with_log_stddevs=True)
ens_reg_outputs.update(model_outputs[0])
ens_reg_outputs.update(model_outputs[1])
ens_reg_outputs.update(model_outputs[2])
means, variances = ens_reg_outputs.result()
expected_nll = -tfd.Normal(means, variances**0.5).log_prob(labels)
rnlls = stats.RegressionNormalLogProb(outputs_with_log_stddevs=True)
rnlls.update(model_outputs[0], labels)
rnlls.update(model_outputs[1], labels)
rnlls.update(model_outputs[2], labels)
nlls = rnlls.result()
self.assertAllClose(expected_nll, nlls, atol=TOL)
if __name__ == "__main__":
tf.enable_eager_execution()
tf.test.main()
|
the-stack_0_7672 | import biom
import pandas as pd
import numpy as np
import tensorflow as tf
from skbio import OrdinationResults
from qiime2.plugin import Metadata
from mmvec.multimodal import MMvec
from mmvec.util import split_tables
from scipy.sparse import coo_matrix
from scipy.sparse.linalg import svds
def paired_omics(microbes: biom.Table,
metabolites: biom.Table,
metadata: Metadata = None,
training_column: str = None,
num_testing_examples: int = 5,
min_feature_count: int = 10,
epochs: int = 100,
batch_size: int = 50,
latent_dim: int = 3,
input_prior: float = 1,
output_prior: float = 1,
learning_rate: float = 1e-5,
summary_interval: int = 60) -> (
pd.DataFrame, OrdinationResults
):
if metadata is not None:
metadata = metadata.to_dataframe()
# Note: there are a couple of biom -> pandas conversions taking
# place here. This is currently done on purpose, since we
# haven't figured out how to handle sparse matrix multiplication
# in the context of this algorithm. That is a future consideration.
res = split_tables(
microbes, metabolites,
metadata=metadata, training_column=training_column,
num_test=num_testing_examples,
min_samples=min_feature_count)
(train_microbes_df, test_microbes_df,
train_metabolites_df, test_metabolites_df) = res
train_microbes_coo = coo_matrix(train_microbes_df.values)
test_microbes_coo = coo_matrix(test_microbes_df.values)
with tf.Graph().as_default(), tf.Session() as session:
model = MMvec(
latent_dim=latent_dim,
u_scale=input_prior, v_scale=output_prior,
learning_rate=learning_rate)
model(session,
train_microbes_coo, train_metabolites_df.values,
test_microbes_coo, test_metabolites_df.values)
loss, cv = model.fit(epoch=epochs, summary_interval=summary_interval)
ranks = pd.DataFrame(model.ranks(), index=train_microbes_df.columns,
columns=train_metabolites_df.columns)
u, s, v = svds(ranks - ranks.mean(axis=0), k=latent_dim)
ranks = ranks.T
ranks.index.name = 'featureid'
s = s[::-1]
u = u[:, ::-1]
v = v[::-1, :]
microbe_embed = u @ np.diag(s)
metabolite_embed = v.T
pc_ids = ['PC%d' % i for i in range(microbe_embed.shape[1])]
features = pd.DataFrame(
microbe_embed, columns=pc_ids,
index=train_microbes_df.columns)
samples = pd.DataFrame(
metabolite_embed, columns=pc_ids,
index=train_metabolites_df.columns)
short_method_name = 'mmvec biplot'
long_method_name = 'Multiomics mmvec biplot'
eigvals = pd.Series(s, index=pc_ids)
proportion_explained = pd.Series(s**2 / np.sum(s**2), index=pc_ids)
biplot = OrdinationResults(
short_method_name, long_method_name, eigvals,
samples=samples, features=features,
proportion_explained=proportion_explained)
return ranks, biplot
|
the-stack_0_7674 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Copyright 2020-2022 Francesco Di Lauro. All Rights Reserved.
See Licence file for details.
"""
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as stats
import sys
sys.path.append('../')
from Likelihood import log_likelihood_models
from PDE_solver import SIR_PDEroutine
from Selkealgo import Sellke_algo
#This is a fully workout example that uses all the methods of this repo
#It is divided into three parts:
# 1) Generate some Data according to some parameters
# 2) Solve the PDE with the same parameters to compare it to data
# 3) Use the likelihood to infer the parameters from the data and plot them
#These three bits are independent, so if one is interested only in solving the
#PDE, they can copy and adapt the relative bit on a new code
if __name__ == "__main__":
np.random.seed(3) #Initialize random seed for reproducibility
#1) Generate some data. In this example, we choose gamma distribution for
#infectious period and exponential distribution for contact intervals
N=10000
T_f = 150
beta = 0.2 #infectiousness
mean = 9 #mean infectious period
variance =6 #variance of infectious period distribution
scale = variance/mean #inverserate
a = mean**2/variance #shape
I_0 = 20 #Initial number of infected people
tau = beta/(N-1) #This is because there is a factor N in the Sellke construction
check_data=False
#First thing one needs is to generate the infectious periods for each node
#Not all the nodes will use that, as not everyone will get infected.
T_recover = stats.gamma.rvs(a=a,scale=scale, size=N)
while check_data==False:
time,I,S,R=Sellke_algo(tau,I_0,N,T_f,T_recover,showplot=False,return_full=False)
if len(time)>200: #Make sure this epidemic is not dying out
check_data=True
plt.figure()
plt.plot(time,I/N, color='black', label='data') #Plot Data
plt.xlim(0,150)
plt.ylim(0)
#If you want to save the data in a Dataframe format
#time, I, S
#data= np.c_[np.array(time),np.array(I),np.array(S)]
#name = "Example data,csv"
#np.savetxt(name,zipped, header='time,I,S')
#======================================================================
#2) solution of the PDE with the true parameters
#We need two quantities:
#1) infectious period/recovery time hazard function
#2) contact interval hazard functions
#Note, in general one can use the fact that the hazard function is
# pdf/survival function
def rec_haz(u, *recovDistParams):
a = float(recovDistParams[0])
scale = float(recovDistParams[1])
tol = 1e-10
#Basically: use de l'hopital when the ratio becomes 0/0
#Otherwise go with definition. This regularises a lot the numerics
x = np.where(stats.gamma.cdf(u,a=a,scale=scale)>1-tol,
1/scale - (a-1)/u,
stats.gamma.pdf(u,a=a,scale=scale)/(1- stats.gamma.cdf(u,a=a,scale=scale)))
return x
def inf_haz(u,*CIdistParms):
beta = float(CIdistParms[0])
return beta*np.ones_like(u)
grids=T_f*20 #How fine the time solver grid (finder -> more precise but more time)
rho = I_0/(N-I_0)
pde = SIR_PDEroutine(rho, CIdist=inf_haz, CIdistParms=[beta],\
recovDist=rec_haz, recovDistParms=[a, scale],\
nTgrid=grids, nUgrid=grids, T=T_f)
#Initial condition is a vector as long as the grid that contains the distribution
#of recovery times of initially infected individuals
#In this case should be a delta in 0.
initial_condition=np.zeros_like(pde.tgrids)
initial_condition[0]=1
#Solve the PDE
S_pde,I_pde=pde.finDiffUpdate(intiial_condition=initial_condition)
plt.plot(pde.tgrids,I_pde, color='b', label= 'PDE')
#======================================================================
#3) Maximise the likelihood with infection and recovery times
#We use infection and recovery times from the data generated
infection_times=time[np.where(np.diff(I)>0)]
recovery_times=time[np.where(np.diff(I)<0)]
#We need also the recovery distribution to run the likelihood
def rec_distr(u, *recovDistParams):
a = float(recovDistParams[0])
scale = float(recovDistParams[1])
return stats.gamma.pdf(u,a=a,scale=scale)
ll=log_likelihood_models(grids,hazard_inf=inf_haz,hazard_rec=rec_haz,
rec_distr = rec_distr,
T=T_f, infect_times=infection_times,recov_times=recovery_times,hazard_inf_par=1,rec_parms=2)
result = ll.minimize_likelihood(np.array([5e-4,0.01,1,0.1]), np.array([1e-2,2,20,1]))
parameters=result.x
#Plot the MLE
pde = SIR_PDEroutine(parameters[0], CIdist=inf_haz, CIdistParms=[parameters[1]],\
recovDist=rec_haz, recovDistParms=[parameters[2], parameters[3]],\
nTgrid=grids, nUgrid=grids, T=T_f)
#Initial condition in this case should be a delta in 0.
initial_condition=np.zeros_like(pde.tgrids)
initial_condition[0]=1
#Solve the PDE
S_mle,I_mle=pde.finDiffUpdate(intiial_condition=initial_condition)
plt.plot(pde.tgrids,I_mle, color='r', label= 'MLE')
plt.legend()
|
the-stack_0_7675 | import pickle
from datetime import date
from pytest import raises, fixture
from elasticsearch_dsl import response, Search, Document, Date, Object
from elasticsearch_dsl.aggs import Terms
from elasticsearch_dsl.response.aggs import AggResponse, BucketData, Bucket
@fixture
def agg_response(aggs_search, aggs_data):
return response.Response(aggs_search, aggs_data)
def test_agg_response_is_pickleable(agg_response):
agg_response.hits
r = pickle.loads(pickle.dumps(agg_response))
assert r == agg_response
assert r._search == agg_response._search
assert r.hits == agg_response.hits
def test_response_is_pickleable(dummy_response):
res = response.Response(Search(), dummy_response)
res.hits
r = pickle.loads(pickle.dumps(res))
assert r == res
assert r._search == res._search
assert r.hits == res.hits
def test_hit_is_pickleable(dummy_response):
res = response.Response(Search(), dummy_response)
hits = pickle.loads(pickle.dumps(res.hits))
assert hits == res.hits
assert hits[0].meta == res.hits[0].meta
def test_response_stores_search(dummy_response):
s = Search()
r = response.Response(s, dummy_response)
assert r._search is s
def test_attribute_error_in_hits_is_not_hidden(dummy_response):
def f(hit):
raise AttributeError()
s = Search().doc_type(employee=f)
r = response.Response(s, dummy_response)
with raises(TypeError):
r.hits
def test_interactive_helpers(dummy_response):
res = response.Response(Search(), dummy_response)
hits = res.hits
h = hits[0]
rhits = "[<Hit(test-index/company/elasticsearch): {}>, <Hit(test-index/employee/42): {}...}}>, <Hit(test-index/employee/47): {}...}}>, <Hit(test-index/employee/53): {{}}>]".format(
repr(dummy_response['hits']['hits'][0]['_source']),
repr(dummy_response['hits']['hits'][1]['_source'])[:60],
repr(dummy_response['hits']['hits'][2]['_source'])[:60],
)
assert res
assert '<Response: %s>' % rhits == repr(res)
assert rhits == repr(hits)
assert {'meta', 'city', 'name'} == set(dir(h))
assert "<Hit(test-index/company/elasticsearch): %r>" % dummy_response['hits']['hits'][0]['_source'] == repr(h)
def test_empty_response_is_false(dummy_response):
dummy_response['hits']['hits'] = []
res = response.Response(Search(), dummy_response)
assert not res
def test_len_response(dummy_response):
res = response.Response(Search(), dummy_response)
assert len(res) == 4
def test_iterating_over_response_gives_you_hits(dummy_response):
res = response.Response(Search(), dummy_response)
hits = list(h for h in res)
assert res.success()
assert 123 == res.took
assert 4 == len(hits)
assert all(isinstance(h, response.Hit) for h in hits)
h = hits[0]
assert 'test-index' == h.meta.index
assert 'company' == h.meta.doc_type
assert 'elasticsearch' == h.meta.id
assert 12 == h.meta.score
assert hits[1].meta.routing == 'elasticsearch'
def test_hits_get_wrapped_to_contain_additional_attrs(dummy_response):
res = response.Response(Search(), dummy_response)
hits = res.hits
assert 123 == hits.total
assert 12.0 == hits.max_score
def test_hits_provide_dot_and_bracket_access_to_attrs(dummy_response):
res = response.Response(Search(), dummy_response)
h = res.hits[0]
assert 'Elasticsearch' == h.name
assert 'Elasticsearch' == h['name']
assert 'Honza' == res.hits[2].name.first
with raises(KeyError):
h['not_there']
with raises(AttributeError):
h.not_there
def test_slicing_on_response_slices_on_hits(dummy_response):
res = response.Response(Search(), dummy_response)
assert res[0] is res.hits[0]
assert res[::-1] == res.hits[::-1]
def test_aggregation_base(agg_response):
assert agg_response.aggs is agg_response.aggregations
assert isinstance(agg_response.aggs, response.AggResponse)
def test_metric_agg_works(agg_response):
assert 25052.0 == agg_response.aggs.sum_lines.value
def test_aggregations_can_be_iterated_over(agg_response):
aggs = [a for a in agg_response.aggs]
assert len(aggs) == 3
assert all(map(lambda a: isinstance(a, AggResponse), aggs))
def test_aggregations_can_be_retrieved_by_name(agg_response, aggs_search):
a = agg_response.aggs['popular_files']
assert isinstance(a, BucketData)
assert isinstance(a._meta['aggs'], Terms)
assert a._meta['aggs'] is aggs_search.aggs.aggs['popular_files']
def test_bucket_response_can_be_iterated_over(agg_response):
popular_files = agg_response.aggregations.popular_files
buckets = [b for b in popular_files]
assert all(isinstance(b, Bucket) for b in buckets)
assert buckets == popular_files.buckets
def test_bucket_keys_get_deserialized(aggs_data, aggs_search):
class Commit(Document):
info = Object(properties={'committed_date': Date()})
class Index:
name = 'test-commit'
aggs_search = aggs_search.doc_type(Commit)
agg_response = response.Response(aggs_search, aggs_data)
per_month = agg_response.aggregations.per_month
for b in per_month:
assert isinstance(b.key, date)
|
the-stack_0_7676 | SITEURL = ""
SITENAME = "pelican-jupyter-test"
PATH = "content"
LOAD_CONTENT_CACHE = False
TIMEZONE = "UTC"
DEFAULT_LANG = "en"
THEME = "notmyidea"
# Plugin config
MARKUP = ("md", "ipynb")
from pelican_jupyter import markup as nb_markup # noqa
PLUGINS = [nb_markup]
IPYNB_MARKUP_USE_FIRST_CELL = True
IGNORE_FILES = [".ipynb_checkpoints"]
|
the-stack_0_7677 | from app import create_app
from flask_script import Manager,Server
# Creating app instance
app = create_app('development')
manager = Manager(app)
manager.add_command('server',Server)
if __name__ == '__main__':
manager.run()
|
the-stack_0_7678 | import ctypes
import struct
# 3p
import bson
from bson.codec_options import CodecOptions
from bson.son import SON
# project
from ...ext import net as netx
from ...internal.compat import to_unicode
from ...internal.logger import get_logger
log = get_logger(__name__)
# MongoDB wire protocol commands
# http://docs.mongodb.com/manual/reference/mongodb-wire-protocol
OP_CODES = {
1: "reply",
1000: "msg", # DEV: 1000 was deprecated at some point, use 2013 instead
2001: "update",
2002: "insert",
2003: "reserved",
2004: "query",
2005: "get_more",
2006: "delete",
2007: "kill_cursors",
2010: "command",
2011: "command_reply",
2013: "msg",
}
# The maximum message length we'll try to parse
MAX_MSG_PARSE_LEN = 1024 * 1024
header_struct = struct.Struct("<iiii")
class Command(object):
"""Command stores information about a pymongo network command,"""
__slots__ = ["name", "coll", "db", "tags", "metrics", "query"]
def __init__(self, name, db, coll):
self.name = name
self.coll = coll
self.db = db
self.tags = {}
self.metrics = {}
self.query = None
def __repr__(self):
return ("Command(" "name=%s," "db=%s," "coll=%s)") % (self.name, self.db, self.coll)
def parse_msg(msg_bytes):
"""Return a command from a binary mongo db message or None if we shouldn't
trace it. The protocol is documented here:
http://docs.mongodb.com/manual/reference/mongodb-wire-protocol
"""
# NOTE[matt] this is used for queries in pymongo <= 3.0.0 and for inserts
# in up to date versions.
msg_len = len(msg_bytes)
if msg_len <= 0:
return None
header = header_struct.unpack_from(msg_bytes, 0)
(length, req_id, response_to, op_code) = header
op = OP_CODES.get(op_code)
if not op:
log.debug("unknown op code: %s", op_code)
return None
db = None
coll = None
offset = header_struct.size
cmd = None
if op == "query":
# NOTE[matt] inserts, updates and queries can all use this opcode
offset += 4 # skip flags
ns = _cstring(msg_bytes[offset:])
offset += len(ns) + 1 # include null terminator
# note: here coll could be '$cmd' because it can be overridden in the
# query itself (like {'insert':'songs'})
db, coll = _split_namespace(ns)
offset += 8 # skip numberToSkip & numberToReturn
if msg_len <= MAX_MSG_PARSE_LEN:
# FIXME[matt] don't try to parse large messages for performance
# reasons. ideally we'd just peek at the first bytes to get
# the critical info (op type, collection, query, # of docs)
# rather than parse the whole thing. i suspect only massive
# inserts will be affected.
codec = CodecOptions(SON)
spec = next(bson.decode_iter(msg_bytes[offset:], codec_options=codec))
cmd = parse_spec(spec, db)
else:
# let's still note that a command happened.
cmd = Command("command", db, "untraced_message_too_large")
# If the command didn't contain namespace info, set it here.
if not cmd.coll:
cmd.coll = coll
elif op == "msg":
# Skip header and flag bits
offset += 4
# Parse the msg kind
kind = ord(msg_bytes[offset : offset + 1])
offset += 1
# Kinds: https://docs.mongodb.com/manual/reference/mongodb-wire-protocol/#sections
# - 0: BSON Object
# - 1: Document Sequence
if kind == 0:
if msg_len <= MAX_MSG_PARSE_LEN:
codec = CodecOptions(SON)
spec = next(bson.decode_iter(msg_bytes[offset:], codec_options=codec))
cmd = parse_spec(spec, db)
else:
# let's still note that a command happened.
cmd = Command("command", db, "untraced_message_too_large")
else:
# let's still note that a command happened.
cmd = Command("command", db, "unsupported_msg_kind")
if cmd:
cmd.metrics[netx.BYTES_OUT] = msg_len
return cmd
def parse_query(query):
"""Return a command parsed from the given mongo db query."""
db, coll = None, None
ns = getattr(query, "ns", None)
if ns:
# version < 3.1 stores the full namespace
db, coll = _split_namespace(ns)
else:
# version >= 3.1 stores the db and coll separately
coll = getattr(query, "coll", None)
db = getattr(query, "db", None)
# pymongo < 3.1 _Query does not have a name field, so default to 'query'
cmd = Command(getattr(query, "name", "query"), db, coll)
cmd.query = query.spec
return cmd
def parse_spec(spec, db=None):
"""Return a Command that has parsed the relevant detail for the given
pymongo SON spec.
"""
# the first element is the command and collection
items = list(spec.items())
if not items:
return None
name, coll = items[0]
cmd = Command(name, db or spec.get("$db"), coll)
if "ordered" in spec: # in insert and update
cmd.tags["mongodb.ordered"] = spec["ordered"]
if cmd.name == "insert":
if "documents" in spec:
cmd.metrics["mongodb.documents"] = len(spec["documents"])
elif cmd.name == "update":
updates = spec.get("updates")
if updates:
# FIXME[matt] is there ever more than one here?
cmd.query = updates[0].get("q")
elif cmd.name == "delete":
dels = spec.get("deletes")
if dels:
# FIXME[matt] is there ever more than one here?
cmd.query = dels[0].get("q")
return cmd
def _cstring(raw):
"""Return the first null terminated cstring from the buffer."""
return ctypes.create_string_buffer(raw).value
def _split_namespace(ns):
"""Return a tuple of (db, collection) from the 'db.coll' string."""
if ns:
# NOTE[matt] ns is unicode or bytes depending on the client version
# so force cast to unicode
split = to_unicode(ns).split(".", 1)
if len(split) == 1:
raise Exception("namespace doesn't contain period: %s" % ns)
return split
return (None, None)
|
the-stack_0_7679 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import encode, cstr, cint, flt, comma_or
import openpyxl
import re
from openpyxl.styles import Font
from openpyxl import load_workbook
from six import StringIO, BytesIO, string_types
ILLEGAL_CHARACTERS_RE = re.compile(r'[\000-\010]|[\013-\014]|[\016-\037]')
# return xlsx file object
def make_xlsx(data, sheet_name, wb=None):
if wb is None:
wb = openpyxl.Workbook(write_only=True)
ws = wb.create_sheet(sheet_name, 0)
row1 = ws.row_dimensions[1]
row1.font = Font(name='Calibri',bold=True)
for row in data:
clean_row = []
for item in row:
if isinstance(item, string_types) and (sheet_name not in ['Data Import Template', 'Data Export']):
value = handle_html(item)
else:
value = item
if isinstance(item, string_types) and next(ILLEGAL_CHARACTERS_RE.finditer(value), None):
# Remove illegal characters from the string
value = re.sub(ILLEGAL_CHARACTERS_RE, '', value)
clean_row.append(value)
ws.append(clean_row)
xlsx_file = BytesIO()
wb.save(xlsx_file)
return xlsx_file
def handle_html(data):
# return if no html tags found
data = frappe.as_unicode(data)
if '<' not in data:
return data
if '>' not in data:
return data
from html2text import unescape, HTML2Text
h = HTML2Text()
h.unicode_snob = True
h = h.unescape(data or "")
obj = HTML2Text()
obj.ignore_links = True
obj.body_width = 0
value = obj.handle(h)
value = ", ".join(value.split(' \n'))
value = " ".join(value.split('\n'))
value = ", ".join(value.split('# '))
return value
def read_xlsx_file_from_attached_file(file_id=None, fcontent=None, filepath=None):
if file_id:
from frappe.utils.file_manager import get_file_path
filename = get_file_path(file_id)
elif fcontent:
from io import BytesIO
filename = BytesIO(fcontent)
elif filepath:
filename = filepath
else:
return
rows = []
wb1 = load_workbook(filename=filename, read_only=True, data_only=True)
ws1 = wb1.active
for row in ws1.iter_rows():
tmp_list = []
for cell in row:
tmp_list.append(cell.value)
rows.append(tmp_list)
return rows
|
the-stack_0_7680 |
# coding: utf-8
# # Nengo Example: A Single Neuron
# This demo shows you how to construct and manipulate a single leaky integrate-and-fire (LIF) neuron. The LIF neuron is a simple, standard neuron model, and here it resides inside a neural population, even though there is only one neuron.
# In[ ]:
import numpy as np
import matplotlib.pyplot as plt
import nengo
# ##Step 1: Create the Neuron
# In[ ]:
from nengo.dists import Uniform
model = nengo.Network(label='A Single Neuron')
with model:
neuron = nengo.Ensemble(1, dimensions=1, # Represent a scalar
intercepts=Uniform(-.5, -.5), # Set intercept to 0.5
max_rates=Uniform(100, 100), # Set the maximum firing rate of the neuron to 100hz
encoders=[[1]]) # Sets the neurons firing rate to increase for positive input
# ## Step 2: Provide Input to the Model
# Create an input node generating a cosine wave.
# In[ ]:
with model:
cos = nengo.Node(240)#lambda t: np.cos(8 * t))
# ##Step 3: Connect the Network Elements
# In[ ]:
with model:
# Connect the input signal to the neuron
nengo.Connection(cos, neuron)
# ##Step 4: Add Probes
# Anything that is probed will collect the data it produces over time, allowing us to analyze and visualize it later.
# In[ ]:
with model:
cos_probe = nengo.Probe(cos) # The original input
spikes = nengo.Probe(neuron.neurons) # The raw spikes from the neuron
voltage = nengo.Probe(neuron.neurons, 'voltage') # Subthreshold soma voltage of the neuron
filtered = nengo.Probe(neuron, synapse=0.01) # Spikes filtered by a 10ms post-synaptic filter
# ## Step 5: Run the Model
# In[ ]:
sim = nengo.Simulator(model) # Create the simulator
sim.run(1) # Run it for 1 seconds
# ##Step 6: Plot the Results
# In[ ]:
# Plot the decoded output of the ensemble
plt.plot(sim.trange(), sim.data[filtered])
plt.plot(sim.trange(), sim.data[cos_probe])
plt.xlim(0, 1)
# Plot the spiking output of the ensemble
from nengo.utils.matplotlib import rasterplot
plt.figure(figsize=(10, 8))
plt.subplot(221)
rasterplot(sim.trange(), sim.data[spikes])
plt.ylabel("Neuron")
plt.xlim(0, 1)
# Plot the soma voltages of the neurons
plt.subplot(222)
plt.plot(sim.trange(), sim.data[voltage][:,0], 'r')
plt.xlim(0, 1);
# The top graph shows that the input signal in green and the filtered output spikes from the single neuron population in blue. The spikes (that are filtered) from the neuron are shown in the bottom graph on the left. On the right is the subthreshold voltages for the neuron.
import pylab
pylab.show() |
the-stack_0_7682 | # Copyright (c) 2019, the Dart project authors. Please see the AUTHORS file
# for details. All rights reserved. Use of this source code is governed by a
# BSD-style license that can be found in the LICENSE file.
"""Shared front-end analyzer specific presubmit script.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into gcl.
"""
import imp
import os.path
import subprocess
def runSmokeTest(input_api, output_api):
hasChangedFiles = False
for git_file in input_api.AffectedTextFiles():
filename = git_file.AbsoluteLocalPath()
if filename.endswith(".dart"):
hasChangedFiles = True
break
if hasChangedFiles:
local_root = input_api.change.RepositoryRoot()
utils = imp.load_source('utils',
os.path.join(local_root, 'tools', 'utils.py'))
dart = os.path.join(utils.CheckedInSdkPath(), 'bin', 'dart')
smoke_test = os.path.join(local_root, 'pkg', '_fe_analyzer_shared',
'tool', 'smoke_test_quick.dart')
windows = utils.GuessOS() == 'win32'
if windows:
dart += '.exe'
if not os.path.isfile(dart):
print('WARNING: dart not found: %s' % dart)
return []
if not os.path.isfile(smoke_test):
print('WARNING: _fe_analyzer_shared smoke test not found: %s' %
smoke_test)
return []
args = [dart, smoke_test]
process = subprocess.Popen(
args, stdout=subprocess.PIPE, stdin=subprocess.PIPE)
outs, _ = process.communicate()
if process.returncode != 0:
return [
output_api.PresubmitError(
'_fe_analyzer_shared smoke test failure(s):',
long_text=outs)
]
return []
def CheckChangeOnCommit(input_api, output_api):
return runSmokeTest(input_api, output_api)
def CheckChangeOnUpload(input_api, output_api):
return runSmokeTest(input_api, output_api)
|
the-stack_0_7683 | """
This file is part of the accompanying code to our manuscript:
Kratzert, F., Klotz, D., Shalev, G., Klambauer, G., Hochreiter, S., Nearing, G., "Benchmarking
a Catchment-Aware Long Short-Term Memory Network (LSTM) for Large-Scale Hydrological Modeling".
submitted to Hydrol. Earth Syst. Sci. Discussions (2019)
You should have received a copy of the Apache-2.0 license along with the code. If not,
see <https://opensource.org/licenses/Apache-2.0>
"""
import json
import pickle
from collections import defaultdict
from pathlib import PosixPath
from typing import Callable, Dict, List, Tuple
import numpy as np
import tqdm
import xarray
from scipy.stats import wilcoxon
def get_run_dirs(root_dir: PosixPath, model: str, loss: str) -> List:
"""Get all folders that are trained for a specific model configuration
Parameters
----------
root_dir : PosixPath
Path to the folder containing all model runs.
model : str
One of ['ealstm', 'lstm', 'lstm_no_static'], defining the model type to find.
loss : str
One of ['NSELoss', 'MSELoss'], defining the loss function that the model was trained for.
Returns
-------
List
List of PosixPaths, where each path points to the folder of one model run.
Raises
------
ValueError
If an invalid model type was passed.
ValueError
If an invalid loss type was passed.
RuntimeError
If root directory contains no subfolder.
"""
valid_models = ["ealstm", "lstm", "lstm_no_static"]
if not model in valid_models:
raise ValueError(f"`model` must be one of {valid_models}")
valid_loss = ['MSELoss', 'NSELoss']
if not loss in valid_loss:
raise ValueError(f"`loss` must be one of {valid_loss}")
folders = list(root_dir.glob('*/'))
if len(folders) == 0:
raise RuntimeError(f"No subfolders found in {root_dir}")
run_dirs = []
for folder in folders:
if folder.is_dir():
with open(folder / "cfg.json", "r") as fp:
cfg = json.load(fp)
if (model == "ealstm") and (not cfg["concat_static"]) and (not cfg["no_static"]):
if (loss == "NSELoss") and (not cfg["use_mse"]):
run_dirs.append(folder)
elif (loss == "MSELoss") and (cfg["use_mse"]):
run_dirs.append(folder)
else:
pass
if (model == "lstm") and (cfg["concat_static"]) and (not cfg["no_static"]):
if (loss == "NSELoss") and (not cfg["use_mse"]):
run_dirs.append(folder)
elif (loss == "MSELoss") and (cfg["use_mse"]):
run_dirs.append(folder)
else:
pass
if (model == "lstm_no_static") and (cfg["no_static"]):
if (loss == "NSELoss") and (not cfg["use_mse"]):
run_dirs.append(folder)
elif (loss == "MSELoss") and (cfg["use_mse"]):
run_dirs.append(folder)
else:
pass
return run_dirs
def eval_benchmark_models(netcdf_folder: PosixPath, func: Callable) -> dict:
"""Evaluate benchmark models on specific metric function.
Parameters
----------
netcdf_folder : PosixPath
Directory, containing basin-wise netcdf files, which contain the benchmark model simulations
func : Callable
The metric function to evaluate. Must satisfy the func(obs, sim) convention.
Returns
-------
dict
Dictionary, containing the metric values of each basin and each benchmark model.
"""
nc_files = list(netcdf_folder.glob('*.nc'))
benchmark_models = defaultdict(dict)
for nc_file in tqdm.tqdm(nc_files):
basin = nc_file.name[:8]
xr = xarray.open_dataset(nc_file)
for key in xr.keys():
if key != 'QObs':
obs = xr['QObs'].values
sim = xr[key].values
sim = sim[obs >= 0]
obs = obs[obs >= 0]
value = func(obs, sim)
if np.isnan(value):
print(f"{key}: {nc_file}")
else:
benchmark_models[key][basin] = value
return benchmark_models
def eval_lstm_models(run_dirs: List, func: Callable) -> dict:
"""Evaluate LSTM outputs on specific metric function.
Returns the metric for each basin in each seed, as well as the results of the ensemble mean.
Parameters
----------
run_dirs : List
List of PosixPaths pointing to the different model directories.
func : Callable
The metric function to evaluate. Must satisfy the func(obs, sim) convention.
Returns
-------
dict
Dictionary, containing the metric value for each basin of each random seed, as well as the
ensemble mean.
"""
single_models = {}
model_ensemble = defaultdict(dict)
for run_dir in tqdm.tqdm(run_dirs):
check_eval_file = list(run_dir.glob("*.p"))
if check_eval_file:
eval_file = check_eval_file[0]
parts = eval_file.name.split('_')
seed = parts[-1][:-2]
single_models[seed] = {}
with eval_file.open("rb") as fp:
data = pickle.load(fp)
for basin, df in data.items():
obs = df["qobs"].values
sim = df["qsim"].values
sim = sim[obs >= 0]
obs = obs[obs >= 0]
single_models[seed][basin] = func(obs, sim)
if basin not in model_ensemble.keys():
model_ensemble[basin]["df"] = df
else:
model_ensemble[basin]["df"]["qsim"] += df["qsim"]
ensemble_nse = {}
for basin, data in model_ensemble.items():
obs = data["df"]["qobs"].values
sim = data["df"]["qsim"].values / len(single_models.keys())
sim = sim[obs >= 0]
obs = obs[obs >= 0]
ensemble_nse[basin] = func(obs, sim)
single_models["ensemble"] = ensemble_nse
return single_models
def get_pvals(metrics: dict, model1: str, model2: str) -> Tuple[List, float]:
"""[summary]
Parameters
----------
metrics : dict
Dictionary, containing the metric values of both models for all basins.
model1 : str
String, defining the first model to take. Must be a key in `metrics`
model2 : str
String, defining the second model to take. Must be a key in `metrics`
Returns
-------
p_vals : List
List, containing the p-values of all possible seed combinations.
p_val : float
P-value between the ensemble means.
"""
# p-values between mean performance per basin of both models
metric_model1 = get_mean_basin_performance(metrics, model1)
metric_model2 = get_mean_basin_performance(metrics, model2)
_, p_val_single = wilcoxon(list(metric_model1.values()), list(metric_model2.values()))
# p-value between ensemble means
_, p_val_ensemble = wilcoxon(list(metrics[model1]["ensemble"].values()),
list(metrics[model2]["ensemble"].values()))
return p_val_single, p_val_ensemble
def get_mean_basin_performance(metrics: dict, model: str) -> Dict:
"""Get the mean performance per basin for a given model
Parameters
----------
metrics : dict
Dictionary containing all evaluation metrics
model : str
Model identifier string
Returns
-------
Dict
Dictionary containing for each basin a key and the value is the mean performance.
"""
seeds = [k for k in metrics[model].keys() if k != "ensemble"]
metric = defaultdict(list)
for seed in seeds:
for basin, nse in metrics[model][seed].items():
metric[basin].append(nse)
return {basin: np.mean(values) for basin, values in metric.items()}
def get_cohens_d(values1: List, values2: List) -> float:
"""Calculate Cohen's Effect size
Parameters
----------
values1 : List
List of model performances of model 1
values2 : List
List of model performances of model 2
Returns
-------
float
Cohen's d
"""
s = np.sqrt(((len(values1) - 1) * np.var(values1) + (len(values2) - 1) * np.var(values2)) /
(len(values1) + len(values2) - 2))
d = (np.abs(np.mean(values1) - np.mean(values2))) / s
return d
|
the-stack_0_7685 | # Copyright (c) Microsoft. All rights reserved.
# Licensed under the MIT license. See LICENSE file in the project root for full license information.
import numpy
import numba
import time
from MiniFramework.ConvWeightsBias import *
from MiniFramework.ConvLayer import *
from MiniFramework.HyperParameters_4_2 import *
def calculate_output_size(input_h, input_w, filter_h, filter_w, padding, stride=1):
output_h = (input_h - filter_h + 2 * padding) // stride + 1
output_w = (input_w - filter_w + 2 * padding) // stride + 1
return (output_h, output_w)
def test_performance():
batch_size = 64
params = HyperParameters_4_2(
0.1, 1, batch_size,
net_type=NetType.MultipleClassifier,
init_method=InitialMethod.Xavier)
stride = 1
padding = 1
fh = 3
fw = 3
input_channel = 3
output_channel = 4
iw = 28
ih = 28
# 64 个 3 x 28 x 28 的图像输入(模拟 mnist)
x = np.random.randn(batch_size, input_channel, iw, ih)
c1 = ConvLayer((input_channel,iw,ih), (output_channel,fh,fw), (stride, padding), params)
c1.initialize("test", "test", False)
# dry run
for i in range(5):
f1 = c1.forward_numba(x)
delta_in = np.ones((f1.shape))
b1, dw1, db1 = c1.backward_numba(delta_in, 1)
# run
s1 = time.time()
for i in range(100):
f1 = c1.forward_numba(x)
b1, dw1, db1 = c1.backward_numba(delta_in, 1)
e1 = time.time()
print("method numba:", e1-s1)
# dry run
for i in range(5):
f2 = c1.forward_img2col(x)
b2, dw2, db2 = c1.backward_col2img(delta_in, 1)
# run
s2 = time.time()
for i in range(100):
f2 = c1.forward_img2col(x)
b2, dw2, db2 = c1.backward_col2img(delta_in, 1)
e2 = time.time()
print("method img2col:", e2-s2)
print("compare correctness of method 1 and method 2:")
print("forward:", np.allclose(f1, f2, atol=1e-7))
print("backward:", np.allclose(b1, b2, atol=1e-7))
print("dW:", np.allclose(dw1, dw2, atol=1e-7))
print("dB:", np.allclose(db1, db2, atol=1e-7))
if __name__ == '__main__':
test_performance()
|
the-stack_0_7686 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
import json
import random
import re
import time
import cv2
import numpy as np
from PIL import Image
from selenium import webdriver
from selenium.webdriver import ActionChains
from selenium.webdriver import ChromeOptions
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.wait import WebDriverWait
from app import get_random_float
from app.slider import base64_to_image
"""
jd 注册页面的
"""
class JD_Register(object):
def __init__(self, url, username, pwd=''):
super(JD_Register, self).__init__()
# 实际地址
self.url = url
options = ChromeOptions()
options.add_experimental_option('excludeSwitches', ['enable-automation'])
self.driver = webdriver.Chrome(options=options)
self.wait = WebDriverWait(self.driver, 10)
# 账户信息
self.username = username
self.password = pwd
# 下载图片的临时路径
self.target_path = "./static/temp/target_reg.png"
self.template_path = "./static/temp/template_reg.png"
# 网页图片缩放
self.zoom = 1
def open(self, url=None):
self.driver.get(url if url else self.url)
def close(self):
self.driver.close()
def refresh(self):
self.driver.refresh()
def main(self):
"""
程序入口
:return:
"""
print('是否打开页面?y:是;其它:跳过;')
is_open = input()
if is_open and is_open.lower() == 'y':
self.open()
print('是否开始程序?y:是;其它:退出;')
is_star = input()
if is_star and is_star.lower() == 'y':
self._init()
self._crack_slider()
def _init(self):
"""
登录
:return:
"""
print("填写账号")
input_ele = self.driver.find_element_by_id('form-phone')
input_ele.clear()
# username
time.sleep(random.uniform(0.1, 0.5))
input_ele.send_keys(self.username[0:3])
time.sleep(random.uniform(0.5, 0.8))
input_ele.send_keys(self.username[3:])
print("点击登录")
time.sleep(random.uniform(0.2, 0.8)) #
login_ele = self.driver.find_element_by_xpath('//*[@id="step1-wrap"]/div[2]/div[1]')
ActionChains(self.driver).move_to_element(login_ele).perform()
ActionChains(self.driver).move_by_offset(12, 5).perform()
login_ele.click()
# 滑块
def _crack_slider(self):
"""
解析滑块
:return:
"""
# 获取图片
pic_success = self._get_pic()
if pic_success:
# 模板匹配
target = cv2.imread(self.target_path)
template = cv2.imread(self.template_path)
distance = self._match_templet(target, template)
print("位移距离 distance = %d" % distance)
# 轨迹
tracks = self._get_tracks3(distance * self.zoom)
# 移动滑块
self._slider_action(tracks)
# 判断登录
print('是否继续测试?y:是;其它:退出')
is_go_on = input()
if is_go_on and is_go_on.lower() == 'y':
print("开始下一次尝试")
return self._crack_slider()
else:
return False
def _get_pic(self):
"""
下载图片到本地
:return:
"""
print("查找缺口图片")
time.sleep(1)
target = self.wait.until(EC.presence_of_element_located((By.XPATH, '//*[@id="slideAuthCode"]/div/div[1]/div[2]/div[1]/img')))
template = self.wait.until(EC.presence_of_element_located((By.XPATH, '//*[@id="slideAuthCode"]/div/div[1]/div[2]/div[2]/img')))
if target and template:
print("开始下载图片")
target_base64 = target.get_attribute('src')
template_base64 = template.get_attribute('src')
target_base64_str = re.sub(r'data:[a-z]*/[a-z]*;base64,', '', target_base64)
template_base64_str = re.sub(r'data:[a-z]*/[a-z]*;base64,', '', template_base64)
# save
base64_to_image(target_base64_str, self.target_path)
base64_to_image(template_base64_str, self.template_path)
time.sleep(1)
# zoom
local_img = Image.open(self.target_path)
size_loc = local_img.size
self.zoom = 364 / int(size_loc[0])
print("计算缩放比例 zoom = %f" % round(self.zoom, 4))
return True
else:
print("未找到缺口图片")
return False
def _slider_action(self, tracks):
"""
移动滑块
:return:
"""
print("开始移动滑块")
# 点击滑块
slider = self.wait.until(EC.element_to_be_clickable((By.XPATH, '//*[@id="slideAuthCode"]/div/div[2]/div[3]')))
if slider:
ActionChains(self.driver).click_and_hold(slider).perform()
# 正向滑动
for track in tracks['forward_tracks']:
yoffset_random = random.uniform(-2, 4)
ActionChains(self.driver).move_by_offset(xoffset=track, yoffset=yoffset_random).perform()
time.sleep(random.uniform(0.06, 0.5))
# 反向滑动
for back_tracks in tracks['back_tracks']:
yoffset_random = random.uniform(-2, 2)
ActionChains(self.driver).move_by_offset(xoffset=back_tracks, yoffset=yoffset_random).perform()
# 抖动
ActionChains(self.driver).move_by_offset(
xoffset=get_random_float(0, -1.67),
yoffset=get_random_float(-1, 1)
).perform()
ActionChains(self.driver).move_by_offset(
xoffset=get_random_float(0, 1.67),
yoffset=get_random_float(-1, 1)
).perform()
time.sleep(get_random_float(0.2, 0.6))
ActionChains(self.driver).release().perform()
print("滑块移动成功")
return True
else:
print("未找到滑块")
return False
# test 测试验证方法
def _match_profile(self, image_path):
"""
通过轮廓识别来找到位置
:param image_path: 带有缺口的图片
:return:
"""
image = cv2.imread(image_path)
blurred = cv2.GaussianBlur(image, (5, 5), 0)
# canny = cv2.Canny(blurred, 200, 400)
canny = cv2.Canny(blurred, 50, 370)
cv2.imshow('image2', blurred)
cv2.imshow('image3', canny)
cv2.imshow('image4', image)
"""
它返回了你所处理的图像,轮廓的点集,各层轮廓的索引
"""
binary, contours, hierarchy = cv2.findContours(canny, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# binary, contours, hierarchy = cv2.findContours(canny, 3, cv2.CHAIN_APPROX_SIMPLE)
for i, contour in enumerate(contours):
M = cv2.moments(contour)
if M['m00'] == 0:
cx = cy = 0
else:
cx, cy = M['m10'] / M['m00'], M['m01'] / M['m00']
# 轮廓筛选
if 20 < cv2.contourArea(contour) < 2000 and 50 < cv2.arcLength(contour, True) < 350:
# if cx < 400:
# continue
x, y, w, h = cv2.boundingRect(contour) # 外接矩形
cv2.rectangle(image, (x, y), (x + w, y + h), (0, 0, 255), 2)
cv2.imshow('image1', image)
print("选择的值 :area = {}, length = {}, cx = {}, cy = {}".format(
cv2.contourArea(contour),
cv2.arcLength(contour, True),
cx,
cy
))
print("选择的值 :x = {}, y = {}, w = {}, h = {}".format(x, y, w, h))
cv2.imshow('image1-1', image)
cv2.waitKey(0)
cv2.destroyAllWindows()
return 0
def _match_templet(self, img_target, img_template):
"""
模板匹配(用于寻找缺口)
:param img_target: 带有缺口的背景图
:param img_template: 缺口的滑块图
:return: 缺口所在的位置的x轴距离
"""
print("图片缺口模板匹配")
# 滑块图片处理
tpl = self.__handle_slider_img(img_template) # 误差来源就在于滑块的背景图为白色
# cv2.imshow("template", tpl)
# 图片高斯滤波
blurred = cv2.GaussianBlur(img_target, (3, 3), 0)
# cv2.imshow("blurred2", blurred)
# 图片灰度化
gray = cv2.cvtColor(blurred, cv2.COLOR_BGR2GRAY)
# cv2.imshow("gray2", gray)
width, height = tpl.shape[:2]
# 图片二值化(针对jd,二值化后的效果不是很好)
# ret, target = cv2.threshold(gray, 127, 255, cv2.THRESH_BINARY)
# ret, target = cv2.threshold(gray, 110, 255, cv2.THRESH_BINARY)
# cv2.imshow("target", target)
# 二值化模板匹配
# result = cv2.matchTemplate(target, tpl, cv2.TM_CCOEFF_NORMED) # 使用二值化图片
# 灰度化模板匹配
result = cv2.matchTemplate(gray, tpl, cv2.TM_CCOEFF_NORMED) # 使用灰度化图片
print("result = {}".format(len(np.where(result >= 0.5)[0])))
# 查找数组中匹配的最大值
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(result)
left_up = max_loc
right_down = (left_up[0] + height, left_up[1] + width)
cv2.rectangle(img_target, left_up, right_down, (7, 279, 151), 2)
print('匹配结果区域起点x坐标为:%d' % max_loc[0])
# cv2.imshow('dectected', img_target)
return left_up[0]
def __handle_slider_img(self, image):
"""
对滑块进行二值化处理
:param image: cv类型的图片对象
:return:
"""
kernel = np.ones((8, 8), np.uint8) # 去滑块的前景噪声内核
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) # 灰度化
# 灰化背景
width, heigth = gray.shape
for h in range(heigth):
for w in range(width):
if gray[w, h] == 0:
gray[w, h] = 96
# cv2.imshow('gray', gray)
# 排除背景
binary = cv2.inRange(gray, 96, 96)
res = cv2.morphologyEx(binary, cv2.MORPH_OPEN, kernel) # 开运算去除白色噪点
# cv2.imshow('res1', res)
return res
def _get_gap(self, img1, img2):
"""
获取缺口偏移量
:param img1: 不带缺口图片
:param img2: 带缺口图片
:return:
"""
left = 68
for i in range(left, img1.size[0]):
for j in range(img1.size[1]):
if not self.__is_pixel_equal(img1, img2, i, j):
left = i
return left
return left
def __is_pixel_equal(self, img1, img2, x, y):
"""
判断两个像素是否相同
:param img1:
:param img2:
:param x:
:param y:
:return:
"""
# 取两个图片的像素点
pix1 = img1.load()[x, y]
pix2 = img2.load()[x, y]
threshold = 60
if (abs(pix1[0] - pix2[0] < threshold) and abs(pix1[1] - pix2[1] < threshold) and abs(
pix1[2] - pix2[2] < threshold)):
return True
else:
return False
def _get_cookie(self):
cookie_items = self.driver.get_cookies()
ck_dict = {}
for cookie in cookie_items:
ck_dict[cookie['name']] = cookie['value']
print("cookie = %s" % ck_dict)
self._save_to_file(json.dumps(ck_dict, separators=(',', ':'), ensure_ascii=False))
# self.driver.quit()
def _save_to_file(self, str_data):
file = None
try:
file = open("../static/temp/cookie.txt", "w")
file.write(str_data)
except:
print("保存cookie异常")
finally:
if file:
file.close()
# ---- 拖拽轨迹计算 start ----
def _get_tracks0(self, distance):
"""
根据偏移量获取移动轨迹1
:param distance: 偏移量
:return: 移动轨迹
"""
trace = []
mid = distance * 3 / 5
# 设置初始位置、初始速度、时间间隔
current, v, t = 0, 0, 0.2
distance += 20
while current < distance:
if current < mid:
a = 2
else:
a = -3
s = v * t + 0.5 * a * (t ** 2)
v = v + a * t
current += s
trace.append(round(s))
back_tracks = [-3, -3, -2, -2, -2, -2, -2, -1, -1, -1]
return {'forward_tracks': trace, 'back_tracks': back_tracks}
def _get_tracks1(self, distance):
"""
根据偏移量获取移动轨迹1
:param distance: 偏移量
:return: 移动轨迹
"""
trace = []
mid = distance * round(random.uniform(3, 4), 4) / 5
# 设置初始位置、初始速度、时间间隔
current, v, t = 0, 500, 0.005
distance += 20
while current < distance:
if current < mid:
a = random.uniform(2.4, 2.8)
else:
a = random.uniform(-3, -2)
s = v * t + 0.5 * a * (t ** 2)
v = v + a * t
current += s
trace.append(round(s))
back_tracks = [-3, -3, -3, -2, -2, -2, -2, -2, -1, -1, -1, -1, -1]
return {'forward_tracks': trace, 'back_tracks': back_tracks}
def _get_tracks3(self, distance):
"""
根据偏移量获取移动轨迹3
:param distance: 偏移量
:return: 移动轨迹
"""
track = []
mid1 = round(distance * random.uniform(0.1, 0.2))
mid2 = round(distance * random.uniform(0.65, 0.76))
mid3 = round(distance * random.uniform(0.84, 0.88))
# 设置初始位置、初始速度、时间间隔
current, v, t = 0, 0, 0.2
distance = round(distance)
while current < distance:
# 四段加速度
if current < mid1:
a = random.randint(10, 15)
elif current < mid2:
a = random.randint(30, 40)
elif current < mid3:
a = -70
else:
a = random.randint(-25, -18)
# 初速度 v0
v0 = v
# 当前速度 v = v0 + at
v = v0 + a * t
v = v if v >= 0 else 0
move = v0 * t + 1 / 2 * a * (t ** 2)
move = round(move if move >= 0 else 1)
# 当前位移
current += move
# 加入轨迹
track.append(move)
print("current={}, distance={}".format(current, distance))
# 超出范围
back_tracks = []
out_range = distance - current
if out_range < -8:
sub = int(out_range + 8)
back_tracks = [-1, sub, -3, -1, -1, -1, -1]
elif out_range < -2:
sub = int(out_range + 3)
back_tracks = [-1, -1, sub]
print("forward_tracks={}, back_tracks={}".format(track, back_tracks))
return {'forward_tracks': track, 'back_tracks': back_tracks}
def _get_tracks4(self, distance):
"""
根据偏移量和手动操作模拟计算移动轨迹
:param distance: 偏移量
:return: 移动轨迹
"""
# 移动轨迹
tracks = []
# 当前位移
current = 0
# 减速阈值
mid = distance * 4 / 5
# 时间间隔
t = 0.2
# 初始速度
v = 0
while current < distance:
if current < mid:
a = random.uniform(2, 5)
else:
a = -(random.uniform(12.5, 13.5))
v0 = v
v = v0 + a * t
x = v0 * t + 1 / 2 * a * t * t
current += x
if 0.6 < current - distance < 1:
x = x - 0.53
tracks.append(round(x, 2))
elif 1 < current - distance < 1.5:
x = x - 1.4
tracks.append(round(x, 2))
elif 1.5 < current - distance < 3:
x = x - 1.8
tracks.append(round(x, 2))
else:
tracks.append(round(x, 2))
print(sum(tracks))
return {'forward_tracks': tracks, 'back_tracks': []}
# ---- 拖拽轨迹计算 end ----
if __name__ == '__main__':
c = JD_Register(url='https://reg.jd.com/p/regPage', username='15812344455')
c.main()
|
the-stack_0_7687 | # -*- coding: utf-8 -*-
# -------------------------------------------------------------------------
# This scaffolding model makes your app work on Google App Engine too
# File is released under public domain and you can use without limitations
# -------------------------------------------------------------------------
db = DAL('sqlite://storage.sqlite', migrate=True)
from gluon.tools import *
auth = Auth(db)
auth.define_tables()
crud = Crud(db)
db.define_table('page',
Field('title'),
Field('body', 'text'),
Field('created_on', 'datetime', default=request.now),
Field('created_by', 'reference auth_user', default=auth.user_id),
format='%(title)s')
db.define_table('post',
Field('page_id', 'reference page'),
Field('body', 'text'),
Field('created_on', 'datetime', default=request.now),
Field('created_by', 'reference auth_user', default=auth.user_id))
db.define_table('document',
Field('page_id', 'reference page'),
Field('name'),
Field('file', 'upload'),
Field('created_on', 'datetime', default=request.now),
Field('created_by', 'reference auth_user', default=auth.user_id),
format='%(name)s')
db.page.title.requires = IS_NOT_IN_DB(db, 'page.title')
db.page.body.requires = IS_NOT_EMPTY()
db.page.created_by.readable = db.page.created_by.writable = False
db.page.created_on.readable = db.page.created_on.writable = False
db.post.body.requires = IS_NOT_EMPTY()
db.post.page_id.readable = db.post.page_id.writable = False
db.post.created_by.readable = db.post.created_by.writable = False
db.post.created_on.readable = db.post.created_on.writable = False
db.document.name.requires = IS_NOT_IN_DB(db, 'document.name')
db.document.page_id.readable = db.document.page_id.writable = False
db.document.created_by.readable = db.document.created_by.writable = False
db.document.created_on.readable = db.document.created_on.writable = False
if request.global_settings.web2py_version < "2.14.1":
raise HTTP(500, "Requires web2py 2.13.3 or newer")
# -------------------------------------------------------------------------
# if SSL/HTTPS is properly configured and you want all HTTP requests to
# be redirected to HTTPS, uncomment the line below:
# -------------------------------------------------------------------------
# request.requires_https()
# -------------------------------------------------------------------------
# app configuration made easy. Look inside private/appconfig.ini
# -------------------------------------------------------------------------
from gluon.contrib.appconfig import AppConfig
# -------------------------------------------------------------------------
# once in production, remove reload=True to gain full speed
# -------------------------------------------------------------------------
myconf = AppConfig(reload=True)
if not request.env.web2py_runtime_gae:
# ---------------------------------------------------------------------
# if NOT running on Google App Engine use SQLite or other DB
# ---------------------------------------------------------------------
db = DAL(myconf.get('db.uri'),
pool_size=myconf.get('db.pool_size'),
migrate_enabled=myconf.get('db.migrate'),
check_reserved=['all'])
else:
# ---------------------------------------------------------------------
# connect to Google BigTable (optional 'google:datastore://namespace')
# ---------------------------------------------------------------------
db = DAL('google:datastore+ndb')
# ---------------------------------------------------------------------
# store sessions and tickets there
# ---------------------------------------------------------------------
session.connect(request, response, db=db)
# ---------------------------------------------------------------------
# or store session in Memcache, Redis, etc.
# from gluon.contrib.memdb import MEMDB
# from google.appengine.api.memcache import Client
# session.connect(request, response, db = MEMDB(Client()))
# ---------------------------------------------------------------------
# -------------------------------------------------------------------------
# by default give a view/generic.extension to all actions from localhost
# none otherwise. a pattern can be 'controller/function.extension'
# -------------------------------------------------------------------------
response.generic_patterns = ['*'] if request.is_local else []
# -------------------------------------------------------------------------
# choose a style for forms
# -------------------------------------------------------------------------
response.formstyle = myconf.get('forms.formstyle') # or 'bootstrap3_stacked' or 'bootstrap2' or other
response.form_label_separator = myconf.get('forms.separator') or ''
# -------------------------------------------------------------------------
# (optional) optimize handling of static files
# -------------------------------------------------------------------------
# response.optimize_css = 'concat,minify,inline'
# response.optimize_js = 'concat,minify,inline'
# -------------------------------------------------------------------------
# (optional) static assets folder versioning
# -------------------------------------------------------------------------
# response.static_version = '0.0.0'
# -------------------------------------------------------------------------
# Here is sample code if you need for
# - email capabilities
# - authentication (registration, login, logout, ... )
# - authorization (role based authorization)
# - services (xml, csv, json, xmlrpc, jsonrpc, amf, rss)
# - old style crud actions
# (more options discussed in gluon/tools.py)
# -------------------------------------------------------------------------
from gluon.tools import Auth, Service, PluginManager
# host names must be a list of allowed host names (glob syntax allowed)
auth = Auth(db, host_names=myconf.get('host.names'))
service = Service()
plugins = PluginManager()
# -------------------------------------------------------------------------
# create all tables needed by auth if not custom tables
# -------------------------------------------------------------------------
auth.define_tables(username=False, signature=False)
# -------------------------------------------------------------------------
# configure email
# -------------------------------------------------------------------------
mail = auth.settings.mailer
mail.settings.server = 'logging' if request.is_local else myconf.get('smtp.server')
mail.settings.sender = myconf.get('smtp.sender')
mail.settings.login = myconf.get('smtp.login')
mail.settings.tls = myconf.get('smtp.tls') or False
mail.settings.ssl = myconf.get('smtp.ssl') or False
# -------------------------------------------------------------------------
# configure auth policy
# -------------------------------------------------------------------------
auth.settings.registration_requires_verification = False
auth.settings.registration_requires_approval = False
auth.settings.reset_password_requires_verification = True
# -------------------------------------------------------------------------
# Define your tables below (or better in another model file) for example
#
# >>> db.define_table('mytable', Field('myfield', 'string'))
#
# Fields can be 'string','text','password','integer','double','boolean'
# 'date','time','datetime','blob','upload', 'reference TABLENAME'
# There is an implicit 'id integer autoincrement' field
# Consult manual for more options, validators, etc.
#
# More API examples for controllers:
#
# >>> db.mytable.insert(myfield='value')
# >>> rows = db(db.mytable.myfield == 'value').select(db.mytable.ALL)
# >>> for row in rows: print row.id, row.myfield
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
# after defining tables, uncomment below to enable auditing
# -------------------------------------------------------------------------
# auth.enable_record_versioning(db)
|
the-stack_0_7688 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numbers
from abc import ABCMeta, abstractmethod
from typing import Any, TYPE_CHECKING, Union
from pandas.api.types import CategoricalDtype
from pyspark.sql.types import (
ArrayType,
BinaryType,
BooleanType,
DataType,
DateType,
FractionalType,
IntegralType,
MapType,
NumericType,
StringType,
StructType,
TimestampType,
)
import pyspark.sql.types as types
from pyspark.pandas.base import IndexOpsMixin
from pyspark.pandas.typedef import Dtype
if TYPE_CHECKING:
from pyspark.pandas.indexes import Index # noqa: F401 (SPARK-34943)
from pyspark.pandas.series import Series # noqa: F401 (SPARK-34943)
def is_valid_operand_for_numeric_arithmetic(
operand: Any,
*,
allow_bool: bool = True
) -> bool:
"""Check whether the operand is valid for arithmetic operations against numerics."""
if isinstance(operand, numbers.Number) and not isinstance(operand, bool):
return True
elif isinstance(operand, IndexOpsMixin):
if isinstance(operand.dtype, CategoricalDtype):
return False
else:
return isinstance(operand.spark.data_type, NumericType) or (
allow_bool and isinstance(operand.spark.data_type, BooleanType))
else:
return False
def transform_boolean_operand_to_numeric(operand: Any, spark_type: types.DataType) -> Any:
"""Transform boolean operand to the given numeric spark_type.
Return the transformed operand if the operand is a boolean IndexOpsMixin,
otherwise return the original operand.
"""
if isinstance(operand, IndexOpsMixin) and isinstance(operand.spark.data_type, BooleanType):
return operand.spark.transform(lambda scol: scol.cast(spark_type))
else:
return operand
class DataTypeOps(object, metaclass=ABCMeta):
"""The base class for binary operations of pandas-on-Spark objects (of different data types)."""
def __new__(cls, dtype: Dtype, spark_type: DataType):
from pyspark.pandas.data_type_ops.binary_ops import BinaryOps
from pyspark.pandas.data_type_ops.boolean_ops import BooleanOps
from pyspark.pandas.data_type_ops.categorical_ops import CategoricalOps
from pyspark.pandas.data_type_ops.complex_ops import ArrayOps, MapOps, StructOps
from pyspark.pandas.data_type_ops.date_ops import DateOps
from pyspark.pandas.data_type_ops.datetime_ops import DatetimeOps
from pyspark.pandas.data_type_ops.num_ops import (
IntegralOps,
FractionalOps,
)
from pyspark.pandas.data_type_ops.string_ops import StringOps
if isinstance(dtype, CategoricalDtype):
return object.__new__(CategoricalOps)
elif isinstance(spark_type, FractionalType):
return object.__new__(FractionalOps)
elif isinstance(spark_type, IntegralType):
return object.__new__(IntegralOps)
elif isinstance(spark_type, StringType):
return object.__new__(StringOps)
elif isinstance(spark_type, BooleanType):
return object.__new__(BooleanOps)
elif isinstance(spark_type, TimestampType):
return object.__new__(DatetimeOps)
elif isinstance(spark_type, DateType):
return object.__new__(DateOps)
elif isinstance(spark_type, BinaryType):
return object.__new__(BinaryOps)
elif isinstance(spark_type, ArrayType):
return object.__new__(ArrayOps)
elif isinstance(spark_type, MapType):
return object.__new__(MapOps)
elif isinstance(spark_type, StructType):
return object.__new__(StructOps)
else:
raise TypeError("Type %s was not understood." % dtype)
def __init__(self, dtype: Dtype, spark_type: DataType):
self.dtype = dtype
self.spark_type = spark_type
@property
@abstractmethod
def pretty_name(self) -> str:
raise NotImplementedError()
def add(self, left, right) -> Union["Series", "Index"]:
raise TypeError("Addition can not be applied to %s." % self.pretty_name)
def sub(self, left, right) -> Union["Series", "Index"]:
raise TypeError("Subtraction can not be applied to %s." % self.pretty_name)
def mul(self, left, right) -> Union["Series", "Index"]:
raise TypeError("Multiplication can not be applied to %s." % self.pretty_name)
def truediv(self, left, right) -> Union["Series", "Index"]:
raise TypeError("True division can not be applied to %s." % self.pretty_name)
def floordiv(self, left, right) -> Union["Series", "Index"]:
raise TypeError("Floor division can not be applied to %s." % self.pretty_name)
def mod(self, left, right) -> Union["Series", "Index"]:
raise TypeError("Modulo can not be applied to %s." % self.pretty_name)
def pow(self, left, right) -> Union["Series", "Index"]:
raise TypeError("Exponentiation can not be applied to %s." % self.pretty_name)
def radd(self, left, right) -> Union["Series", "Index"]:
raise TypeError("Addition can not be applied to %s." % self.pretty_name)
def rsub(self, left, right) -> Union["Series", "Index"]:
raise TypeError("Subtraction can not be applied to %s." % self.pretty_name)
def rmul(self, left, right) -> Union["Series", "Index"]:
raise TypeError("Multiplication can not be applied to %s." % self.pretty_name)
def rtruediv(self, left, right) -> Union["Series", "Index"]:
raise TypeError("True division can not be applied to %s." % self.pretty_name)
def rfloordiv(self, left, right) -> Union["Series", "Index"]:
raise TypeError("Floor division can not be applied to %s." % self.pretty_name)
def rmod(self, left, right) -> Union["Series", "Index"]:
raise TypeError("Modulo can not be applied to %s." % self.pretty_name)
def rpow(self, left, right) -> Union["Series", "Index"]:
raise TypeError("Exponentiation can not be applied to %s." % self.pretty_name)
|
the-stack_0_7689 | from model import MusicTransformer
from custom.layers import *
from custom import callback
import params as par
from tensorflow.python.keras.optimizer_v2.adam import Adam
from data import Data
import utils
import argparse
import datetime
import sys
tf.executing_eagerly()
parser = argparse.ArgumentParser()
parser.add_argument('--l_r', default=None, help='학습률', type=float)
parser.add_argument('--batch_size', default=2, help='batch size', type=int)
parser.add_argument('--pickle_dir', default='music', help='데이터셋 경로')
parser.add_argument('--max_seq', default=2048, help='최대 길이', type=int)
parser.add_argument('--epochs', default=100, help='에폭 수', type=int)
parser.add_argument('--load_path', default=None, help='모델 로드 경로', type=str)
parser.add_argument('--save_path', default="result/0722", help='모델 저장 경로')
parser.add_argument('--is_reuse', default=False)
parser.add_argument('--multi_gpu', default=True)
args = parser.parse_args()
# set arguments
l_r = args.l_r
batch_size = args.batch_size
pickle_dir = args.pickle_dir
max_seq = args.max_seq
epochs = args.epochs
is_reuse = args.is_reuse
load_path = args.load_path
save_path = args.save_path
multi_gpu = args.multi_gpu
# load data
dataset = Data('dataset/processed')
print(dataset)
# load model
learning_rate = callback.CustomSchedule(par.embedding_dim) if l_r is None else l_r
opt = Adam(learning_rate, beta_1=0.9, beta_2=0.98, epsilon=1e-9)
# define model
mt = MusicTransformer(
embedding_dim=256,
vocab_size=par.vocab_size,
num_layer=6,
max_seq=max_seq,
dropout=0.2,
debug=False, loader_path=load_path)
mt.compile(optimizer=opt, loss=callback.transformer_dist_train_loss)
# define tensorboard writer
current_time = datetime.datetime.now().strftime('%Y%m%d-%H%M%S')
train_log_dir = 'logs/gradient_tape/'+current_time+'/train'
eval_log_dir = 'logs/gradient_tape/'+current_time+'/eval'
train_summary_writer = tf.summary.create_file_writer(train_log_dir)
eval_summary_writer = tf.summary.create_file_writer(eval_log_dir)
# Train Start
idx = 0
for e in range(epochs):
mt.reset_metrics()
for b in range(len(dataset.files) // batch_size):
try:
batch_x, batch_y = dataset.seq2seq_batch(batch_size, max_seq)
except:
continue
result_metrics = mt.train_on_batch(batch_x, batch_y)
if b % 100 == 0:
eval_x, eval_y = dataset.seq2seq_batch(batch_size, max_seq, 'eval')
eval_result_metrics, weights = mt.evaluate(eval_x, eval_y)
mt.save(save_path)
with train_summary_writer.as_default():
tf.summary.scalar('loss', result_metrics[0], step=idx)
tf.summary.scalar('accuracy', result_metrics[1], step=idx)
for i, weight in enumerate(weights):
with tf.name_scope("layer_%d" % i):
with tf.name_scope("_w0"):
utils.attention_image_summary(weight[0])
with tf.name_scope("_w1"):
utils.attention_image_summary(weight[1])
with eval_summary_writer.as_default():
tf.summary.scalar('loss', eval_result_metrics[0], step=idx)
tf.summary.scalar('accuracy', eval_result_metrics[1], step=idx)
idx += 1
print('\n====================================================')
print('Epoch/Batch: {}/{}'.format(e, b))
print('Train >>>> Loss: {:6.6}, Accuracy: {}'.format(result_metrics[0], result_metrics[1]))
print('Eval >>>> Loss: {:6.6}, Accuracy: {}'.format(eval_result_metrics[0], eval_result_metrics[1]))
|
the-stack_0_7691 | import datetime
from discord import utils
from discord.ext import commands
class VocalSalonSystem(commands.Cog):
""" VocalSalonSystem() -> Represent the creation of vocal custom with anyone ! """
def __init__(self,bot):
self.bot = bot
async def create_vocal(self,database,guild,member):
""" create_vocal() -> Create a channel when the member as joined "Crée un salon" """
category = utils.get(guild.categories,id=int(database["category_id"]))
# Create and get the new vocal channel
new_channel = await guild.create_voice_channel(f"{member.name}'s Channel.",bitrate=64000,category=category)
# Log
print(f"[{datetime.datetime.today().date()}] L'utilisateur {member.name} à crée un salon dans {guild.name} !")
# Move the member to the vocal channel created
await new_channel.edit(position=len(category.voice_channels)+1)
await member.move_to(new_channel)
async def delete_vocal(self,before,member):
""" delete_vocal() -> Delete a channel when the member as leave your channel """
# If 0 as in channel
if before.channel is not None:
if len(before.channel.members) == 0:
# Log
print(f"[{datetime.datetime.today().date()}] Le salon de {member.name} à été supprimé dans {member.guild.name} !")
return await before.channel.delete()
@commands.Cog.listener()
async def on_voice_state_update(self,member,before,after):
for database in self.bot.guilds_data[str(member.guild.id)]["channels"]:
if database["function"].count("create_private_vocal") == 1:
if after.channel is not None:
if int(after.channel.id) == int(database["channel_id"]):
return await self.create_vocal(database,member.guild,member)
if after.channel is None:
if int(before.channel.id) != int(database["channel_id"]):
return await self.delete_vocal(before,member)
|
the-stack_0_7692 | from django.shortcuts import render, get_object_or_404, redirect
from django.core.paginator import Paginator
from .choices import gender_choices, age_choices, size_choices
from .logic.pets_logic import delete_pet
from .models import Pet
def index(request):
queryset_list = Pet.objects.order_by(
'-list_date').filter(is_published=True)
# Keywords
if 'keywords' in request.GET:
keywords = request.GET['keywords']
if keywords:
queryset_list = queryset_list.filter(
description__icontains=keywords)
# City
if 'city' in request.GET:
city = request.GET['city']
if city:
queryset_list = queryset_list.filter(city__iexact=city)
# Gender
if 'gender' in request.GET:
gender = request.GET['gender']
if gender:
queryset_list = queryset_list.filter(gender__iexact=gender)
# Age
if 'age' in request.GET:
age = request.GET['age']
if age:
queryset_list = queryset_list.filter(age__lte=age)
# Size
if 'size' in request.GET:
size = request.GET['size']
if size:
queryset_list = queryset_list.filter(size__iexact=size)
paginator = Paginator(queryset_list, 6)
page = request.GET.get('page')
paged_pets = paginator.get_page(page)
context = {
'pets': paged_pets,
'gender_choices': gender_choices,
'age_choices': age_choices,
'size_choices': size_choices,
'values': request.GET
}
return render(request, 'pets/pets.html', context)
def pet(request, pet_id):
if request.GET.get('borrarpet_'+str(pet_id)):
delete_pet(pet_id)
return redirect("/")
else:
pet = get_object_or_404(Pet, pk=pet_id)
context = {
'pet': pet
}
return render(request, 'pets/pet.html', context)
|
the-stack_0_7693 | # coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Services for exploration-related statistics."""
__author__ = 'Sean Lip'
from core.domain import exp_domain
from core.domain import exp_services
from core.domain import stats_domain
from core.domain import stats_jobs
from core.platform import models
(stats_models,) = models.Registry.import_models([models.NAMES.statistics])
import feconf
IMPROVE_TYPE_DEFAULT = 'default'
IMPROVE_TYPE_INCOMPLETE = 'incomplete'
def get_top_unresolved_answers_for_default_rule(exploration_id, state_name):
return {
answer: count for (answer, count) in
stats_domain.StateRuleAnswerLog.get(
exploration_id, state_name, exp_domain.DEFAULT_RULESPEC_STR
).get_top_answers(3)
}
def get_state_rules_stats(exploration_id, state_name):
"""Gets statistics for the answer groups and rules of this state.
Returns:
A dict, keyed by the string '{HANDLER_NAME}.{RULE_STR}', whose
values are the corresponding stats_domain.StateRuleAnswerLog
instances.
"""
exploration = exp_services.get_exploration_by_id(exploration_id)
state = exploration.states[state_name]
# TODO(bhenning): Everything is handler name submit; therefore, it is
# pointless and should be removed.
_OLD_SUBMIT_HANDLER_NAME = 'submit'
rule_keys = []
for group in state.interaction.answer_groups:
for rule in group.rule_specs:
rule_keys.append((
_OLD_SUBMIT_HANDLER_NAME, rule.stringify_classified_rule()))
if state.interaction.default_outcome:
rule_keys.append((
_OLD_SUBMIT_HANDLER_NAME, exp_domain.DEFAULT_RULESPEC_STR))
answer_logs = stats_domain.StateRuleAnswerLog.get_multi(
exploration_id, [{
'state_name': state_name,
'rule_str': rule_key[1]
} for rule_key in rule_keys])
results = {}
for ind, answer_log in enumerate(answer_logs):
results['.'.join(rule_keys[ind])] = {
'answers': answer_log.get_top_answers(5),
'rule_hits': answer_log.total_answer_count
}
return results
def get_state_improvements(exploration_id, exploration_version):
"""Returns a list of dicts, each representing a suggestion for improvement
to a particular state.
"""
ranked_states = []
exploration = exp_services.get_exploration_by_id(exploration_id)
state_names = exploration.states.keys()
default_rule_answer_logs = stats_domain.StateRuleAnswerLog.get_multi(
exploration_id, [{
'state_name': state_name,
'rule_str': exp_domain.DEFAULT_RULESPEC_STR
} for state_name in state_names])
statistics = stats_jobs.StatisticsAggregator.get_statistics(
exploration_id, exploration_version)
state_hit_counts = statistics['state_hit_counts']
for ind, state_name in enumerate(state_names):
total_entry_count = 0
no_answer_submitted_count = 0
if state_name in state_hit_counts:
total_entry_count = (
state_hit_counts[state_name]['total_entry_count'])
no_answer_submitted_count = state_hit_counts[state_name].get(
'no_answer_count', 0)
if total_entry_count == 0:
continue
threshold = 0.2 * total_entry_count
default_rule_answer_log = default_rule_answer_logs[ind]
default_count = default_rule_answer_log.total_answer_count
eligible_flags = []
state = exploration.states[state_name]
if (default_count > threshold and
state.interaction.default_outcome is not None and
state.interaction.default_outcome.dest == state_name):
eligible_flags.append({
'rank': default_count,
'improve_type': IMPROVE_TYPE_DEFAULT})
if no_answer_submitted_count > threshold:
eligible_flags.append({
'rank': no_answer_submitted_count,
'improve_type': IMPROVE_TYPE_INCOMPLETE})
if eligible_flags:
eligible_flags = sorted(
eligible_flags, key=lambda flag: flag['rank'], reverse=True)
ranked_states.append({
'rank': eligible_flags[0]['rank'],
'state_name': state_name,
'type': eligible_flags[0]['improve_type'],
})
return sorted(
[state for state in ranked_states if state['rank'] != 0],
key=lambda x: -x['rank'])
def get_versions_for_exploration_stats(exploration_id):
"""Returns list of versions for this exploration."""
return stats_models.ExplorationAnnotationsModel.get_versions(
exploration_id)
def get_exploration_stats(exploration_id, exploration_version):
"""Returns a dict with state statistics for the given exploration id.
Note that exploration_version should be a string.
"""
exploration = exp_services.get_exploration_by_id(exploration_id)
exp_stats = stats_jobs.StatisticsAggregator.get_statistics(
exploration_id, exploration_version)
last_updated = exp_stats['last_updated']
state_hit_counts = exp_stats['state_hit_counts']
return {
'improvements': get_state_improvements(
exploration_id, exploration_version),
'last_updated': last_updated,
'num_completions': exp_stats['complete_exploration_count'],
'num_starts': exp_stats['start_exploration_count'],
'state_stats': {
state_name: {
'name': state_name,
'firstEntryCount': (
state_hit_counts[state_name]['first_entry_count']
if state_name in state_hit_counts else 0),
'totalEntryCount': (
state_hit_counts[state_name]['total_entry_count']
if state_name in state_hit_counts else 0),
} for state_name in exploration.states
},
}
|
the-stack_0_7694 | import json
import uuid
from pathlib import Path
def load_json_template(name):
script_dir = Path(__file__).parent
json_path = (script_dir / "json" / f"{name}.json").resolve()
with open(json_path, 'r') as json_file:
template = json_file.read()
return json.loads(template)
def create_minio_connection(address, key, secret_key):
obj = load_json_template("minio_connection")
obj["url"] = address
obj["key"] = key
obj["secretKey"] = secret_key
return obj
def create_minio_input(object_name, bucket_name, minio_connection):
obj = load_json_template("minio_input")
obj["objectName"] = object_name
obj["bucketName"] = bucket_name
obj["minIOConnection"] = minio_connection
return obj
def create_binder_execution(input_list, memory=1000):
obj = load_json_template("binder_execution")
obj["executionIdentifier"] = str(uuid.uuid4())
for requirement in obj["requirements"]:
if requirement["type"] == "ConfigurationRequirementRelationalInput":
requirement["settings"] = input_list
obj["memory"] = str(memory)
return obj
def convert_minio_input_to_execution(obj_minio_input):
exec_minio_input = load_json_template("execution_minio_input")
exec_minio_connection = load_json_template("execution_minio_connection")
obj_minio_connection = obj_minio_input["minIOConnection"]
exec_minio_connection["url"] = obj_minio_connection["url"]
exec_minio_connection["key"] = obj_minio_connection["key"]
exec_minio_connection["secretKey"] = obj_minio_connection["secretKey"]
exec_minio_input["object"] = obj_minio_input["objectName"]
exec_minio_input["bucket"] = obj_minio_input["bucketName"]
exec_minio_input["minIOConnection"] = exec_minio_connection
return exec_minio_input |
the-stack_0_7695 | import re
import sys
from ..specfile.helpers import detect_specfile, get_source_urls, detect_github_tag_prefix, get_current_version, get_url
from urllib.parse import urlparse
from typing import Optional
import requests
RE_GITHUB_PATH_REPO = re.compile('^/([^/]+/[^/]+)/?')
RE_GIT_COMMIT = re.compile('^[a-f0-9]{40}$')
def detect_previous_version(changes):
for line in changes:
previous_version_match = re.match('^- +(?:version )?update(?: to)?(?: version)? ([0-9.]+)', line, re.IGNORECASE)
if previous_version_match:
previous_version = previous_version_match[1]
break
else:
sys.exit("Could not determine the last mentioned version from the changes file.")
return previous_version
def get_changelog_from_github(previous_version: str, current_version: Optional[str] = None) -> dict:
"""
First, get the GitHub URL by interpreting the Source tags and the URL tag.
Then, detect the tag-prefix.
At the end, download the diff.
"""
specfilename = detect_specfile()
if not current_version:
current_version = get_current_version(specfilename=specfilename)
urls = get_source_urls(specfilename=specfilename)
for url in urls:
parsed = urlparse(url)
if parsed.hostname == 'github.com' and 'archive' in parsed.path:
repo_path = RE_GITHUB_PATH_REPO.match(parsed.path).group(1)
tag_prefix = detect_github_tag_prefix(specfilename=specfilename)
break
else:
url = get_url(specfilename=specfilename)
parsed = urlparse(url)
if parsed.hostname == 'github.com':
repo_path = RE_GITHUB_PATH_REPO.match(parsed.path).group(1)
tags = requests.get(f'https://api.github.com/repos/{repo_path}/tags')
tags.raise_for_status()
if tags.json()[0]['name'].startswith('v'):
tag_prefix = 'v'
else:
tag_prefix = ''
else:
sys.exit('Also found not Source URL or URL for GitHub.')
if not RE_GIT_COMMIT.match(current_version):
current_version = tag_prefix + current_version
url = f'https://api.github.com/repos/{repo_path}/compare/{tag_prefix}{previous_version}...{current_version}'
print(f'Downloading from: {url}', file=sys.stderr)
compare = requests.get(url)
compare.raise_for_status()
return compare.json()
|
the-stack_0_7696 | # Created by Kelvin_Clark on 2/1/2022, 1:43 PM
from typing import List, Optional
from fastapi import APIRouter, Depends
from sqlalchemy.ext.asyncio import AsyncSession
from sqlalchemy.orm import Session
from starlette.status import HTTP_201_CREATED
from app.api.dependencies.oauth import get_admin_system_user, get_current_user
from app.data import get_sync_session, get_async_session
from app.data.schema.pydantic.project import ProjectOut, ProjectIn, ProjectSummary, ProjectStat
from app.data.schema.pydantic.user import UserOut
from app.data.usecases.getters.get_project import (get_latest_projects, get_projects_summary, get_projects,
get_project_stats)
from app.data.usecases.insert.insert_project import insert_project
router = APIRouter(prefix="/project", tags=["Projects"])
@router.post("/add", response_model=ProjectOut, status_code=HTTP_201_CREATED)
async def create_project(project: ProjectIn, session: Session = Depends(get_sync_session),
_: UserOut = Depends(get_admin_system_user)):
project = insert_project(session=session, project=project)
return project
@router.get("/", response_model=List[ProjectOut])
async def get_projects__(page_number: Optional[int] = 0, session: AsyncSession = Depends(get_async_session),
_: UserOut = Depends(get_current_user)):
projects = await get_projects(session=session, page_number=page_number)
return projects
@router.get("/latest", response_model=List[ProjectOut])
async def _get_latest_projects(count: Optional[int] = 5, _: UserOut = Depends(get_current_user),
session: AsyncSession = Depends(get_async_session)):
projects = await get_latest_projects(session=session, count=count)
return projects
@router.get("/stats", response_model=ProjectStat)
async def _get_project_stat(project_id: int, session: AsyncSession = Depends(get_async_session),
_: UserOut = Depends(get_current_user)):
project_stat = await get_project_stats(session=session, project_id=project_id)
return project_stat
@router.get("/summary", response_model=ProjectSummary)
async def _get_project_summary(_: UserOut = Depends(get_current_user),
session: AsyncSession = Depends(get_async_session)):
summary = await get_projects_summary(session=session)
return summary
|
the-stack_0_7697 | class Solution:
"""
@param A : a list of integers
@param target : an integer to be inserted
@return : an integer
"""
def searchInsert(self, A, target):
if not A:
return 0
lo, hi = 0, len(A)-1
while lo <= hi:
mid = lo + (hi-lo)//2
val = A[mid]
if val == target:
return mid
elif val < target:
lo = mid + 1
else:
hi = mid - 1
return lo |
the-stack_0_7698 | import os
import bpy
from .pbr_utils import PbrSettings
from . import pman
from . import operators
class PandaButtonsPanel:
bl_space_type = "PROPERTIES"
bl_region_type = "WINDOW"
COMPAT_ENGINES = {'PANDA'}
@classmethod
def poll(cls, context):
return context.scene.render.engine in cls.COMPAT_ENGINES
class PandaRender_PT_project(PandaButtonsPanel, bpy.types.Panel):
bl_label = "Project Settings"
bl_context = "render"
def draw_with_config(self, context, _config):
layout = self.layout
project_settings = context.scene.panda_project
layout.prop(project_settings, 'project_name')
layout.prop(project_settings, 'renderer')
layout.prop(project_settings, 'pbr_materials')
layout.prop(project_settings, 'python_binary')
layout.operator(operators.UpdateProject.bl_idname)
def draw_no_config(self, _context):
layout = self.layout
layout.label(text="No config file detected")
def draw(self, context):
confdir = os.path.dirname(bpy.data.filepath) if bpy.data.filepath else None
if pman.config_exists(confdir):
self.draw_with_config(context, pman.get_config(confdir))
else:
self.draw_no_config(context)
layout = self.layout
layout.operator(operators.CreateProject.bl_idname)
layout.operator(operators.SwitchProject.bl_idname)
class PandaRender_PT_build(PandaButtonsPanel, bpy.types.Panel):
bl_label = "Build Settings"
bl_context = "render"
@classmethod
def poll(cls, context):
confdir = os.path.dirname(bpy.data.filepath) if bpy.data.filepath else None
return PandaButtonsPanel.poll(context) and pman.config_exists(confdir)
def draw(self, context):
layout = self.layout
project_settings = context.scene.panda_project
layout.prop(project_settings, 'asset_dir')
layout.prop(project_settings, 'export_dir')
layout.operator(operators.BuildProject.bl_idname)
class PandaRender_PT_run(PandaButtonsPanel, bpy.types.Panel):
bl_label = "Run Settings"
bl_context = "render"
@classmethod
def poll(cls, context):
confdir = os.path.dirname(bpy.data.filepath) if bpy.data.filepath else None
return PandaButtonsPanel.poll(context) and pman.config_exists(confdir)
def draw(self, context):
layout = self.layout
project_settings = context.scene.panda_project
layout.prop(project_settings, 'auto_save')
layout.prop(project_settings, 'auto_build')
layout.operator(operators.RunProject.bl_idname)
class Panda_PT_context_material(PandaButtonsPanel, bpy.types.Panel):
bl_label = ""
bl_context = "material"
bl_options = {'HIDE_HEADER'}
@classmethod
def poll(cls, context):
return (context.material or context.object) and PandaButtonsPanel.poll(context)
def draw(self, context):
layout = self.layout
mat = context.material
ob = context.object
slot = context.material_slot
space = context.space_data
is_sortable = len(ob.material_slots) > 1
if ob:
rows = 1
if is_sortable:
rows = 4
row = layout.row()
row.template_list("MATERIAL_UL_matslots", "", ob, "material_slots", ob, "active_material_index", rows=rows)
col = row.column(align=True)
col.operator("object.material_slot_add", icon='ZOOMIN', text="")
col.operator("object.material_slot_remove", icon='ZOOMOUT', text="")
col.menu("MATERIAL_MT_specials", icon='DOWNARROW_HLT', text="")
if is_sortable:
col.separator()
col.operator("object.material_slot_move", icon='TRIA_UP', text="").direction = 'UP'
col.operator("object.material_slot_move", icon='TRIA_DOWN', text="").direction = 'DOWN'
if ob.mode == 'EDIT':
row = layout.row(align=True)
row.operator("object.material_slot_assign", text="Assign")
row.operator("object.material_slot_select", text="Select")
row.operator("object.material_slot_deselect", text="Deselect")
split = layout.split(percentage=0.65)
if ob:
split.template_ID(ob, "active_material", new="material.new")
row = split.row()
if slot:
row.prop(slot, "link", text="")
else:
row.label()
elif mat:
split.template_ID(space, "pin_id")
split.separator()
class PandaMaterial_PT_basic(PandaButtonsPanel, bpy.types.Panel):
bl_label = "Basic Material"
bl_context = "material"
@classmethod
def poll(cls, context):
return context.material and PandaButtonsPanel.poll(context)
def draw(self, context):
layout = self.layout
mat = context.material
layout.label(text="Diffuse:")
split = layout.split()
col = split.column()
col.prop(mat, "diffuse_color", text="")
col = split.column()
col.prop(mat, "diffuse_intensity", text="Intensity")
layout.label(text="Specular:")
split = layout.split()
col = split.column()
col.prop(mat, "specular_color", text="")
col = split.column()
col.prop(mat, "specular_intensity", text="Intensity")
layout.prop(mat, "specular_hardness")
layout.prop(mat, "emit", text="Emit")
layout.prop(mat, "ambient", text="Ambient")
class PandaCamera_PT_lens(PandaButtonsPanel, bpy.types.Panel):
bl_label = "Lens"
bl_context = "data"
@classmethod
def poll(cls, context):
return context.camera and PandaButtonsPanel.poll(context)
def draw(self, context):
layout = self.layout
camera = context.camera
layout.prop(camera, "type", text="")
if camera.type == "PERSP":
split = layout.split()
col = split.column()
col.prop(camera, "lens")
col = split.column()
col.prop(camera, "lens_unit", text="")
elif camera.type == "ORTHO":
layout.prop(camera, "ortho_scale")
else:
layout.label("Not supported")
class PandaPhysics_PT_add(PandaButtonsPanel, bpy.types.Panel):
bl_label = ""
bl_options = {'HIDE_HEADER'}
bl_context = "physics"
@classmethod
def poll(cls, context):
return PandaButtonsPanel.poll(context) and context.object
def draw(self, context):
layout = self.layout
obj = context.object
if obj.rigid_body:
layout.operator('rigidbody.object_remove', text="Remove Rigid Body Physics")
else:
layout.operator('rigidbody.object_add', text="Add Rigid Body Physics")
def get_panels():
panels = [
"DATA_PT_camera_display",
"DATA_PT_camera_safe_areas",
"DATA_PT_context_lamp",
"DATA_PT_lamp",
"DATA_PT_context_mesh",
"DATA_PT_normals",
"DATA_PT_texture_space",
"DATA_PT_vertex_groups",
"DATA_PT_shape_keys",
"DATA_PT_uv_texture",
"DATA_PT_vertex_colors",
"DATA_PT_customdata",
"WORLD_PT_preview",
"WORLD_PT_world",
"TEXTURE_PT_context_texture",
"TEXTURE_PT_preview",
"TEXTURE_PT_colors",
"TEXTURE_PT_image",
"TEXTURE_PT_image_sampling",
"TEXTURE_PT_image_mapping",
"TEXTURE_PT_mapping",
"TEXTURE_PT_influence",
"PHYSICS_PT_rigid_body",
"PHYSICS_PT_rigid_body_collisions",
]
return [getattr(bpy.types, p) for p in panels if hasattr(bpy.types, p)]
def register():
for panel in get_panels():
panel.COMPAT_ENGINES.add('PANDA')
if not hasattr(bpy.types.Material, 'pbr_export_settings'):
bpy.types.Material.pbr_export_settings = bpy.props.PointerProperty(type=PbrSettings)
def unregister():
for panel in get_panels():
if 'PANDA' in panel.COMPAT_ENGINES:
panel.COMPAT_ENGINES.remove('PANDA')
if hasattr(bpy.types.Material, 'pbr_export_settings'):
del bpy.types.Material.pbr_export_settings
|
the-stack_0_7700 | from django.shortcuts import render
from apps.utils.functions import parse_formatting
# Create your views here.
from .models import About, ThirdPartyLicenses
def about_abstract(request, model, template, navbar_selected=False):
'''Abstract function for the pages'''
try:
query = model.objects.all()
content = None
for c in query:
content = c
print(type(content))
content.content = parse_formatting(content.content,
html=content.html,
markdown=content.markdown)
# Only display the page if it has been set to 'visible'.
if not content.visible:
content = None
except model.DoesNotExist:
content = None
context = {
'content': content,
'navbar': {
'selected': "about" if navbar_selected else None,
},
}
return render(request, template, context)
def about(request):
'''About page'''
return about_abstract(request, About, 'about.html', True)
def third_party_licenses(request):
'''Third party licenses page'''
return about_abstract(request, ThirdPartyLicenses,
'third_party_licenses.html', False)
|
the-stack_0_7701 | # Copyright 2015-2016 HyperBit developers
import os
from hyperbit import crypto
def do_pow(payload, trials, extra, ttl):
length = len(payload) + 8 + extra
target = int(2**64 / (trials * (length + max(ttl, 0) * length / (2**16))))
value = target + 1
initial = crypto.sha512(payload)
# Make it harder for attackers to determine how many numbers we have tried
nonce = int.from_bytes(os.urandom(8), 'big')
while value > target:
nonce = (nonce + 1) % (2**64)
a = nonce.to_bytes(8, 'big')
c = crypto.sha512d(a + initial)
value = int.from_bytes(c[:8], 'big')
return nonce
def check(payload, trials, extra, ttl, nonce):
length = len(payload) + 8 + extra
target = int(2**64 / (trials * (length + ttl * length / (2**16))))
initial = crypto.sha512(payload)
a = nonce.to_bytes(8, 'big')
c = crypto.sha512d(a + initial)
value = int.from_bytes(c[:8], 'big')
return value <= target
|
the-stack_0_7702 | # -*- coding: utf-8 -*-
'''
:codeauthor: `Anthony Shaw <[email protected]>`
tests.unit.cloud.clouds.dimensiondata_test
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
'''
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
try:
import libcloud.security
HAS_LIBCLOUD = True
except ImportError:
HAS_LIBCLOUD = False
# Import Salt Libs
from salt.cloud.clouds import dimensiondata
from salt.exceptions import SaltCloudSystemExit
from salt.utils.versions import LooseVersion
# Import Salt Testing Libs
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.unit import TestCase, skipIf
from tests.support.mock import MagicMock, NO_MOCK, NO_MOCK_REASON, patch, __version__ as mock_version
from tests.unit.cloud.clouds import _preferred_ip
VM_NAME = 'winterfell'
# Use certifi if installed
try:
if HAS_LIBCLOUD:
# This work-around for Issue #32743 is no longer needed for libcloud >= 1.4.0.
# However, older versions of libcloud must still be supported with this work-around.
# This work-around can be removed when the required minimum version of libcloud is
# 2.0.0 (See PR #40837 - which is implemented in Salt Oxygen).
if LooseVersion(libcloud.__version__) < LooseVersion('1.4.0'):
import certifi
libcloud.security.CA_CERTS_PATH.append(certifi.where())
except (ImportError, NameError):
pass
class ExtendedTestCase(TestCase):
'''
Extended TestCase class containing additional helper methods.
'''
def assertRaisesWithMessage(self, exc_type, exc_msg, func, *args, **kwargs):
try:
func(*args, **kwargs)
self.assertFail()
except Exception as exc:
self.assertEqual(type(exc), exc_type)
self.assertEqual(exc.message, exc_msg)
@skipIf(NO_MOCK, NO_MOCK_REASON)
class DimensionDataTestCase(ExtendedTestCase, LoaderModuleMockMixin):
'''
Unit TestCase for salt.cloud.clouds.dimensiondata module.
'''
def setup_loader_modules(self):
return {
dimensiondata: {
'__virtual__': MagicMock(return_value='dimensiondata'),
'__active_provider_name__': '',
'__opts__': {
'providers': {
'my-dimensiondata-cloud': {
'dimensiondata': {
'driver': 'dimensiondata',
'region': 'dd-au',
'user_id': 'jon_snow',
'key': 'IKnowNothing'
}
}
}
}
}
}
def test_avail_images_call(self):
'''
Tests that a SaltCloudSystemExit is raised when trying to call avail_images
with --action or -a.
'''
self.assertRaises(
SaltCloudSystemExit,
dimensiondata.avail_images,
call='action'
)
def test_avail_locations_call(self):
'''
Tests that a SaltCloudSystemExit is raised when trying to call avail_locations
with --action or -a.
'''
self.assertRaises(
SaltCloudSystemExit,
dimensiondata.avail_locations,
call='action'
)
def test_avail_sizes_call(self):
'''
Tests that a SaltCloudSystemExit is raised when trying to call avail_sizes
with --action or -a.
'''
self.assertRaises(
SaltCloudSystemExit,
dimensiondata.avail_sizes,
call='action'
)
def test_list_nodes_call(self):
'''
Tests that a SaltCloudSystemExit is raised when trying to call list_nodes
with --action or -a.
'''
self.assertRaises(
SaltCloudSystemExit,
dimensiondata.list_nodes,
call='action'
)
def test_destroy_call(self):
'''
Tests that a SaltCloudSystemExit is raised when trying to call destroy
with --function or -f.
'''
self.assertRaises(
SaltCloudSystemExit,
dimensiondata.destroy,
name=VM_NAME,
call='function'
)
@skipIf(HAS_LIBCLOUD is False, "Install 'libcloud' to be able to run this unit test.")
def test_avail_sizes(self):
'''
Tests that avail_sizes returns an empty dictionary.
'''
sizes = dimensiondata.avail_sizes(call='foo')
self.assertEqual(
len(sizes),
1
)
self.assertEqual(
sizes['default']['name'],
'default'
)
def test_import(self):
"""
Test that the module picks up installed deps
"""
with patch('salt.config.check_driver_dependencies', return_value=True) as p:
get_deps = dimensiondata.get_dependencies()
self.assertEqual(get_deps, True)
if LooseVersion(mock_version) >= LooseVersion('2.0.0'):
self.assertTrue(p.call_count >= 1)
def test_provider_matches(self):
"""
Test that the first configured instance of a dimensiondata driver is matched
"""
p = dimensiondata.get_configured_provider()
self.assertNotEqual(p, None)
def test_query_node_data_filter_preferred_ip_addresses(self):
'''
Test if query node data is filtering out unpreferred IP addresses.
'''
zero_ip = '0.0.0.0'
private_ips = [zero_ip, '1.1.1.1', '2.2.2.2']
vm = {'name': None}
data = MagicMock()
data.public_ips = []
dimensiondata.NodeState = MagicMock() # pylint: disable=blacklisted-unmocked-patching
dimensiondata.NodeState.RUNNING = True
with patch('salt.cloud.clouds.dimensiondata.show_instance',
MagicMock(return_value={'state': True,
'name': 'foo',
'public_ips': [],
'private_ips': private_ips})):
with patch('salt.cloud.clouds.dimensiondata.preferred_ip',
_preferred_ip(private_ips, [zero_ip])):
with patch('salt.cloud.clouds.dimensiondata.ssh_interface',
MagicMock(return_value='private_ips')):
self.assertEqual(dimensiondata._query_node_data(vm, data).public_ips, [zero_ip])
|
the-stack_0_7704 | #!/usr/bin/python
# Copyright (c) 2018 Warren Usui, MIT License
# pylint: disable=W0223
# pylint: disable=E1111
"""
Get the scores and w-L-T records of all matches for a player
"""
from html.parser import HTMLParser
from llama_slobber.ll_local_io import get_session
from llama_slobber.ll_local_io import get_page_data
from llama_slobber.ll_local_io import USER_DATA
from llama_slobber.handle_conn_err import handle_conn_err
class GetUserData(HTMLParser):
"""
Scan the Match Day table for completed matches
"""
def __init__(self):
HTMLParser.__init__(self)
self.right_table = False
self.result = [{}, {}]
self.season = 0
def handle_data(self, data):
if data.startswith("LL"):
snumb = data.split(' ')[0]
self.season = int(snumb[2:])
self.result[0][self.season] = []
self.result[1][self.season] = []
else:
if data.find(')-') > 0:
nscore = []
sparts = data.split('-')
for part in sparts:
chr2 = part[2]
if chr2 == 'F':
chr2 = '-1'
sval = [int(part[0]), int(chr2)]
nscore.append(sval)
self.result[0][self.season].append(nscore)
else:
parts = data.split('-')
if len(parts) == 3:
wlrecs = []
for wlpart in parts:
wlrecs.append(int(wlpart))
self.result[1][self.season].append(wlrecs)
@handle_conn_err
def get_user_data(player, session=None):
"""
Return information about a user:
Tuple of two dicts:
first Dict -- indexed by season, list of scores as 2 x 2 tuples.
second Dict -- indexed by season, list of W-L-T records as tuples
All values are integers
Input:
player -- player name
session request
"""
if session is None:
session = get_session()
return get_page_data(USER_DATA % player.lower(), GetUserData(),
session=session)
if __name__ == '__main__':
print(get_user_data('UsuiW')[1][78])
print(get_user_data('ConryM_Illuminati=REAL?')[1][79])
|
the-stack_0_7708 | # pylint: disable=W0223,W0221
from tornado.web import HTTPError
from codebase.web import APIRequestHandler
from codebase.models import (
User,
Role
)
class _Base(APIRequestHandler):
def get_user(self, _id):
user = self.db.query(User).filter_by(uuid=_id).first()
if user:
return user
raise HTTPError(400, reason="not-found")
def get_roles(self, role_ids):
"""通过给定的角色ID列表,查询对应的角色对象
返回:
1. `roles` : 找到的角色对象列表
2. `notexist` : 没有找到的角色ID列表
"""
notexsit = []
roles = []
for role_id in role_ids:
role = self.db.query(Role).filter_by(uuid=role_id).first()
if role:
roles.append(role)
else:
notexsit.append(role_id)
return roles, notexsit
class UserRoleHandler(_Base):
def get(self, _id):
"""获取指定用户的角色列表
"""
user = self.get_user(_id)
self.success(data=[role.isimple for role in user.roles])
class UserRoleAppendHandler(_Base):
def post(self, _id):
"""增加指定用户的角色
"""
body = self.get_body_json()
# 首先检查角色ID是否都存在
roles, notexist = self.get_roles(body["roles"])
if notexist:
self.fail(error="have-not-exist", data=notexist)
return
# 如果 user 不存在,说明还未记录过,我们这里需要创建新的 User
user = self.db.query(User).filter_by(uuid=_id).first()
if not user:
user = User(uuid=_id)
self.db.add(user)
self.db.commit()
# append roles
user.roles.extend(roles)
self.db.commit()
self.success()
class UserRoleRemoveHandler(_Base):
def post(self, _id):
"""删除指定用户的角色
"""
user = self.get_user(_id)
body = self.get_body_json()
roles, notexist = self.get_roles(body["roles"])
if notexist:
self.fail(error="have-not-exist", data=notexist)
return
# remove roles
for role in roles:
user.roles.remove(role)
self.db.commit()
self.success()
|
the-stack_0_7709 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for Explain
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-aiplatform
# [START aiplatform_generated_aiplatform_v1_PredictionService_Explain_sync]
from google.cloud import aiplatform_v1
def sample_explain():
"""Snippet for explain"""
# Create a client
client = aiplatform_v1.PredictionServiceClient()
# Initialize request argument(s)
instances = aiplatform_v1.Value()
instances.null_value = "NULL_VALUE"
request = aiplatform_v1.ExplainRequest(
endpoint="projects/{project}/locations/{location}/endpoints/{endpoint}",
instances=instances,
)
# Make the request
response = client.explain(request=request)
# Handle response
print(response)
# [END aiplatform_generated_aiplatform_v1_PredictionService_Explain_sync]
|
the-stack_0_7710 | import json
import pandas as pd
import gzip
from io import BytesIO
import requests
import time
import warnings
class UnityDataImporter:
'''
Class for creating and reading raw data exports from the
Unity API. Manual: https://docs.unity3d.com/Manual/UnityAnalyticsRawDataExport.html
Can be initialised with a project_id and api_key (strings).
'''
def __init__(self, project_id=None, api_key=None):
self.pid = project_id
self.key = api_key
self.export_id = None
if project_id != None:
self.base_url = f'https://analytics.cloud.unity3d.com/api/v2/projects/{project_id}/rawdataexports'
def set_keys(self, project_id=None, api_key=None):
'''
Update the project_id, api_key and base url used to make queries.
Paramaters:
project_id: str id of the project
api_key: str api key required to download data
'''
if api_key:
self.key = api_key
if project_id:
self.pid = project_id
self.base_url = f'https://analytics.cloud.unity3d.com/api/v2/projects/{project_id}/rawdataexports'
def check_setup(self):
'''
Checks that project_id, api_key and base_url have all been set up.
'''
if not self.pid or not self.key or not self.base_url:
raise ValueError('Project id, api_key and/or base_url are not defined.'
+ '\n' + 'Please run set_keys first.')
def create_export(self, params, return_value=False):
'''
Creates a new data export. Requires project_id, api_key and base_url
to be defined (see set_keys to do this). Executing this function will
also set a value for the export_id parameter.
Note that single exports cannot be longer than 31 days by default.
Parameters:
params: dict dictionary of the arguments for the request.
Arguments are:
startDate: str, required unless continueFrom is specified.
Inclusive start data of the export in YYYY-MM-DD format.
endDate: str, required. Exclusive end date of export in YYYY-MM-DD format.
format: str, optional. Default is JSON, alternative is tsv.
There is no reason to edit this, given that this only produces metadata.
dataset: str, required. One of the following event types:
appStart, appRunning, deviceInfo, custom or transaction
continueFrom: str, optional. Raw data export ID of a previously created data
export. Incompatible with startDate.
return_value: bool, optional, default False. Option to return the request.
If False, then the response status code is printed.
'''
self.check_setup()
if not 'format' in list(params.keys()):
params['format'] = 'json'
r = requests.post(self.base_url, json=params, auth=(self.pid, self.key))
try: self.export_id = r.json()['id']
except KeyError:
raise requests.HTTPError('Request failure:', r.content)
if return_value:
return r
def list_data_exports(self):
'''
Lists all available raw data export metadata.
Returns: json of all available data export metadata.
'''
self.check_setup()
return requests.get(self.base_url, auth=(self.pid, self.key)).json()
def get_data_export(self, export_id=None, output='data'):
'''
Get an existing data_export data/metadata with a specific id.
Parameters:
export_id: str, optional, if not specified, the id used is the export_id.
If the export_id has not been set, then this will return an error.
If this is used, then the export_id attribute will be updated on execution.
output: str, options are 'data', 'metadata' or 'both' with 'data as the default.
Determines what values to produce on the function return.
Returns: dict of metadata/list of dicts of data for each day according to the output argument.
If output is 'both', then output is a tuple of the data and metadata.
'''
self.check_setup()
if not output in ['data','metadata','both']:
raise ValueError(f'Invalid output argument {output}')
if export_id == None:
if self.export_id == None:
raise ValueError('Export id was not provided and it has not been set.')
else:
self.export_id = export_id
md = requests.get(self.base_url + f'/{self.export_id}', auth=(self.pid, self.key)).json()
if output == 'metadata':
return md
else:
if md['status'] == 'running': raise KeyError('Export has been created, but is not ready yet.')
out = []
try: md['result']['fileList']
except KeyError:
if output == 'data':
warnings.warn('No data found, return value is None')
return None
else:
warnings.warn('No data found, only metadata will be returned')
return md
for f in md['result']['fileList']:
data_url = f['url']
data_req = requests.get(data_url)
data_string = gzip.open(BytesIO(data_req.content)).read().decode('utf-8')
data_string = str(data_string).split('\n')
data = []
for d in data_string:
if d == '': pass
else:
data.append(json.loads(d))
out.append(data)
if output == 'data':
return out
else:
return out, md
def create_and_get_export(self, params):
'''
Performs the create_export and get_data_export functions in one go.
Note that single exports cannot be longer than 31 days by default.
You can use make_long_df to make larger exports in one go.
Parameters:
params: dict dictionary of the arguments for the request.
Arguments are:
startDate: str, required unless continueFrom is specified.
Inclusive start data of the export in YYYY-MM-DD format.
endDate: str, required. Exclusive end date of export in YYYY-MM-DD format.
dataset: str, required. One of the following event types:
appStart, appRunning, deviceInfo, custom or transaction
continueFrom: str, optional. Raw data export ID of a previously created data
export. Incompatible with startDate.
returns:
A json of the data from the request
'''
self.create_export(params)
# Need to wait until export is ready
counter = 0
while True:
status = self.get_data_export(output='metadata')['status']
if status == 'completed':
break
dot = '.' * (counter % 4)
counter += 1
print(f'Creating {params["dataset"]} export from {params["startDate"]} to {params["endDate"]}{dot} ', end='\r')
time.sleep(0.5)
print()
print('Data export ready')
data = self.get_data_export(output='data')
return data
def make_long_df(self, params):
'''
Same as make_df, but can be used to create data exports longer than 31 days.
Works by creating multiple data exports and aggregating them together.
Parameters:
params: dict dictionary of the arguments for the request.
Arguments are:
startDate: str, required. continueFrom will not work here
Inclusive start data of the export in YYYY-MM-DD format.
endDate: str, required. Exclusive end date of export in YYYY-MM-DD format.
dataset: str, required. One of the following event types:
appStart, appRunning, deviceInfo, custom or transaction
returns:
A pandas dataframe of the data from the request
'''
start_date, end_date = params['startDate'], params['endDate']
start_year, start_month = int(start_date[:4]), int(start_date[5:7])
end_year, end_month = int(end_date[:4]), int(end_date[5:7])
months = end_month - start_month + 12 * (end_year - start_year)
if months < 0:
raise ValueError(f'The given start date {start_date} is later than the end date {end_date}')
if months == 0:
df = convert_to_pandas(self.create_and_get_export(params))
else:
for month in range(months + 1):
if month == 0:
m = ((start_month + month - 1) % 12) + 1
y = (start_month + month - 1) // 12
if m == 12:
ed = f'{start_year + y + 1}-01-01'
else:
ed = f'{start_year + y}-{(m+1):02}-01'
params['endDate'] = ed
df = convert_to_pandas(self.create_and_get_export(params))
# Prevent an error if the end-date is on the first of a month
if ed == end_date: break
continue
elif month == months:
sd = f'{end_year}-{end_month:02}-01'
params['startDate'] = sd
params['endDate'] = end_date
df_ = convert_to_pandas(self.create_and_get_export(params))
else:
m = ((start_month + month - 1) % 12) + 1
y = (start_month + month - 1) // 12
sd = f'{start_year + y}-{m:02}-01'
if m == 12:
ed = f'{start_year + y + 1}-01-01'
else:
ed = f'{start_year + y}-{(m+1):02}-01'
params['startDate'] = sd
params['endDate'] = ed
df_ = convert_to_pandas(self.create_and_get_export(params))
# Prevent an error if the end-date is on the first of a month
if ed == end_date: break
if isinstance(df_, pd.DataFrame):
if isinstance(df, pd.DataFrame):
df = df.append(df_, ignore_index=True, sort=False)
else:
df = df_
return df
def convert_to_pandas(data):
'''
Converts the json file provided by Unity into a pandas DataFrame.
Parameters:
data: dict, the return value of UnityDataImporter.create_export
Returns:
A pandas dataframe version of the json file.
'''
if data == None:
return None
df = pd.DataFrame(data[0])
if len(data) > 1:
for day in data[1:]:
df = df.append(pd.DataFrame(day), ignore_index=True, sort=False)
return df |
the-stack_0_7711 | # Tencent is pleased to support the open source community by making ncnn available.
#
# Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
#
# Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self):
super(Model, self).__init__()
self.pool_0 = nn.AdaptiveAvgPool2d(output_size=(7,6))
self.pool_1 = nn.AdaptiveAvgPool2d(output_size=1)
def forward(self, x):
x = self.pool_0(x)
x = self.pool_1(x)
return x
def test():
net = Model()
net.eval()
torch.manual_seed(0)
x = torch.rand(1, 128, 13, 13)
a = net(x)
# export torchscript
mod = torch.jit.trace(net, x)
mod.save("test_nn_AdaptiveAvgPool2d.pt")
# torchscript to pnnx
import os
os.system("../../src/pnnx test_nn_AdaptiveAvgPool2d.pt inputshape=[1,128,13,13]")
# ncnn inference
import test_nn_AdaptiveAvgPool2d_ncnn
b = test_nn_AdaptiveAvgPool2d_ncnn.test_inference()
b = b.reshape_as(a)
return torch.allclose(a, b, 1e-4, 1e-4)
if __name__ == "__main__":
if test():
exit(0)
else:
exit(1)
|
the-stack_0_7715 | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import openpyxl
########################################################## Size ###################################################################
size = pd.read_excel(r'C:\Users\Jhona\OneDrive - Grupo Marista\Projetos\Factor Investing\Factor-Investing\Factor investing\Size\Size.xlsx',
parse_dates=True, index_col=0).dropna()
def columns(df):
df.columns = df.columns.str[-6:]
return df
columns(size)
size = size.T
size.replace('-', np.nan, inplace=True)
size.columns = size.columns.astype(str)
size.rename(columns={'2019-12-31': 'Market Cap'}, inplace=True)
size.sort_values('Market Cap', inplace=True)
assets = size.iloc[4:14]
assets.index
stocks = ['ATOM3.SA', 'IGBR3.SA', 'FHER3.SA', 'PDGR3.SA', 'SLED4.SA', 'BTTL3.SA',
'VIVR3.SA', 'RCSL4.SA', 'ETER3.SA', 'RSID3.SA']
df = pd.DataFrame()
from pandas_datareader import data
for i in stocks:
df[i] = data.DataReader(i, data_source='yahoo', start='2020-01-02', end = '2020-12-31')['Adj Close']
weights = np.array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1])
returns = df.pct_change().dropna()
df['Portfolio'] = (1+returns.dot(weights)).cumprod().dropna()
norm = pd.DataFrame()
for i in df:
norm[i] = df[i].div(df[i].iloc[1]).mul(100)
norm
plt.style.use('ggplot')
norm.plot()
plt.legend(loc='lower left')
plt.show()
### Portfolio vs IBOV
ibov = data.DataReader('^BVSP', data_source='yahoo', start='2020-01-01', end = '2020-12-31')
ibov.rename(columns = {'Adj Close':'IBOV'}, inplace=True)
ibov.drop(ibov.columns[[0,1,2,3,4]], axis=1, inplace=True)
ibov['Ibov'] = ibov['IBOV'].div(ibov['IBOV'].iloc[0]).mul(100)
ibov
plt.plot(norm['Portfolio'])
plt.plot(ibov['Ibov'])
plt.legend(['Portfolio - Size', 'Ibov'])
plt.show()
final = pd.concat([norm['Portfolio'], ibov['Ibov']], axis=1)
final.to_excel('teste.xlsx', sheet_name = 'Size')
writer = pd.ExcelWriter('final.xlsx')
final.to_excel(writer)
writer.save() |
the-stack_0_7719 | import logging
logger = logging.getLogger(__name__)
import datetime
from ..config import Config
from pyrogram import Client, filters
from pyrogram.types import InlineKeyboardButton, InlineKeyboardMarkup
from pyrogram.errors import UserNotParticipant, ChatAdminRequired, UsernameNotOccupied
@Client.on_message(filters.private & filters.incoming)
async def force_sub(c, m):
if Config.FORCE_SUB:
try:
chat = await c.get_chat_member(Config.FORCE_SUB, m.from_user.id)
if chat.status=='kicked':
return await m.reply_text('Hai you are kicked from my updates channel. So, you are not able to use me', quote=True)
except UserNotParticipant:
button = [[InlineKeyboardButton('join Updates channel', url=f'https://t.me/{Config.FORCE_SUB}')]]
markup = InlineKeyboardMarkup(button)
return await m.reply_text(text="Hey join in my updates channel to use me.", parse_mode='markdown', reply_markup=markup, quote=True)
except ChatAdminRequired:
logger.warning(f"Make me admin in @{Config.FORCE_SUB}")
if m.from_user.id in Config.AUTH_USERS:
return await m.reply_text(f"Make me admin in @{Config.FORCE_SUB}")
except UsernameNotOccupied:
logger.warning("The forcesub username was Incorrect. Please give the correct username.")
if m.from_user.id in Config.AUTH_USERS:
return await m.reply_text("The forcesub username was Incorrect. Please give the correct username.")
except Exception as e:
if "belongs to a user" in str(e):
logger.warning("Forcesub username must be a channel username Not yours or any other users username")
if m.from_user.id in Config.AUTH_USERS:
return await m.reply_text("Forcesub username must be a channel username Not yours or any other users username")
logger.error(e)
return await m.reply_text("Some thing went wrong. Try again and if same issue occur contact [our group](https://t.me/dk_botz)", disable_web_page_preview=True, quote=True)
await m.continue_propagation()
|
the-stack_0_7721 | #! /usr/bin/env python
#=========================================================================
# makefile_syntax.py
#=========================================================================
# Helper functions to generate Makefile syntax
#
# Author : Christopher Torng
# Date : June 11, 2019
#
import os
import textwrap
from .utils import stamp, get_top_dir
#-------------------------------------------------------------------------
# Writer class
#-------------------------------------------------------------------------
class Writer( object ):
def __init__( s, output, width=78 ):
s.output = output
s.width = width
def newline( s ):
s.output.write( '\n' )
def comment( s, text ):
lines = textwrap.wrap(
text, s.width-2, break_long_words=False, break_on_hyphens=False
)
for line in lines:
s.output.write( '# ' + line + '\n' )
def write( s, text ):
s.output.write( text )
def default( s, default_target ):
s.output.write( 'default: ' + default_target + '\n' )
#-------------------------------------------------------------------------
# Helper functions
#-------------------------------------------------------------------------
# make_cpdir
#
# Copies a directory and handles stamping
#
# - w : instance of Writer
# - dst : path to copied directory
# - src : path to source directory
# - deps : list, additional dependencies
# - sandbox : bool, True (copies src dir), False (symlinks src contents)
#
def make_cpdir( w, dst, src, deps=None, sandbox=True ):
if deps:
assert type( deps ) == list, 'Expecting deps to be of type list'
# $1 -- dst
# $2 -- src
# $3 -- stamp
if sandbox:
rule = 'cpdir-and-parameterize'
else:
rule = 'mkdir-and-symlink'
target = dst + '/.stamp'
# There may be many deps, so generate them on separate lines
if deps:
#deps = [ src ] + deps
deps = [ d for d in deps if ':' not in d ] # ignore colon files
if deps == None:
deps = ''
template_str = '{target}: {dep}\n'
for dep in deps:
w.write( template_str.format( target=target, dep=dep ) )
# Generate the build rule
template_str = '{target}:\n'
template_str += ' $(call {rule},{dst},{src},{stamp})\n'
w.write(
template_str.format(
target = target,
rule = rule,
dst = dst,
src = src,
stamp = target,
)
)
return target
# make_symlink
#
# Symlinks src to dst while handling stamping
#
# - w : instance of Writer
# - dst : path to linked file/directory
# - src : path to source file/directory
# - deps : additional dependencies
# - src_is_symlink : boolean, flag if source is a symlink (and has stamp)
# - ignore_src_dep : boolean, does not include src in deps if True
#
def make_symlink( w, dst, src, deps=None, src_is_symlink=False,
ignore_src_dep=False ):
if deps:
assert type( deps ) == list, 'Expecting deps to be of type list'
# $1 -- dst
# $2 -- src
# $3 -- stamp
template_str = '{target}: {deps}\n'
template_str += ' $(call {rule},{dst_dir},{dst},{src},{stamp})\n'
# Stamp files
dst_dir = os.path.dirname( dst )
dst_base = os.path.basename( dst )
dst_stamp = stamp( dst )
# Relative paths for symlinking after changing directories
src_relative = os.path.relpath( src, dst_dir )
dst_relative = dst_base
dst_stamp_relative = os.path.basename( dst_stamp )
# Depend on src stamp if src is also a symlink
if src_is_symlink:
src_stamp = stamp( src )
inputs = src_stamp
else:
inputs = src
# Make
target = dst_stamp
if not ignore_src_dep:
deps.append( inputs )
deps = ' '.join( deps )
else:
deps = ' '.join( deps )
if deps == None:
deps = ''
w.write(
template_str.format(
target = target,
deps = deps,
rule = 'symlink',
dst_dir = dst_dir,
dst = dst_relative,
src = src_relative,
stamp = dst_stamp_relative,
)
)
return target
# make_execute
#
# Runs the execute rule
#
# - w : instance of Writer
# - outputs : outputs of the execute rule
# - rule : name of the execute rule
# - command : string, command for the rule
# - deps : additional dependencies
# - touch_target : should we touch the target?
#
def make_execute( w, outputs, rule, command, deps=None,
touch_target=True ):
if deps:
assert type( deps ) == list, 'Expecting deps to be of type list'
# $1 -- rule name
# #2 -- rule command
rule_def = 'define {rule}\n'.format( rule=rule )
rule_def += ' {command}\n'.format( command=command )
rule_def += 'endef\n'
w.write( rule_def )
w.newline()
template_str = '{output}: {deps}\n'
template_str += ' $(call {rule})\n'
if touch_target:
template_str += ' touch $@\n'
if deps:
deps = ' '.join( deps )
if deps == None:
deps = ''
w.write(
template_str.format(
output = outputs[0],
deps = deps,
rule = rule,
)
)
w.newline()
# Make all other outputs just depend on the first output
template_str = '{output}: {deps}\n' + ' touch $@\n'
if len( outputs ) > 1:
for output in outputs[1:]:
w.write( template_str.format( output = output, deps = outputs[0] ) )
w.newline()
return outputs
# make_stamp
#
# Stamps the given file with a '.stamp.' prefix
#
# - w : instance of Writer
# - f : file to stamp
# - deps : additional dependencies
# - f_is_dep : should the file to be stamped also be a dependency?
#
def make_stamp( w, f, deps=None, f_is_dep=True ):
if deps:
assert type( deps ) == list, 'Expecting deps to be of type list'
f_stamp = stamp( f )
# $1 -- stamp
template_str = '{target}: {deps}\n'
template_str += ' $(call {rule},{stamp})\n'
if deps and f_is_dep:
deps = ' '.join( [ f ] + deps )
else:
deps = ' '.join( deps )
if deps == None:
deps = ''
w.write(
template_str.format(
target = f_stamp,
deps = deps,
rule = 'stamp',
stamp = f_stamp,
)
)
w.newline()
return f_stamp
# make_alias
#
# Create an alias for the given dependencies
#
# - w : instance of Writer
# - alias : alias name(s)
# - deps : dependencies
#
def make_alias( w, alias, deps ):
if deps:
assert type( deps ) == list, 'Expecting deps to be of type list'
# $1 -- stamp
template_str = '.PHONY: {alias}\n'
template_str += '\n'
template_str += '{alias}: {deps}\n'
if deps:
deps = ' '.join( deps )
if deps == None:
deps = ''
w.write( template_str.format( alias=alias, deps=deps ) )
w.newline()
return deps
# make_common_rules
#
# Write out the common rules
#
# - w : instance of Writer
#
def make_common_rules( w ):
w.write(
'''
# $1 -- $dst
# $2 -- $src
# $3 -- $stamp
define cpdir
rm -rf ./$1
cp -aL $2 $1 || true
chmod -R +w $1
touch $3
endef
# $1 -- $dst
# $2 -- $src
# $3 -- $stamp
define cpdir-and-parameterize
rm -rf ./$1
cp -aL $2 $1 || true
chmod -R +w $1
cp .mflowgen/$1/configure.yml $1
touch $3
endef
# $1 -- $dst
# $2 -- $src
# $3 -- $stamp
define mkdir-and-symlink
rm -rf ./$1
mkdir -p $1
cd $1 && ln -sf ../$2/* . && cd ..
rm $1/configure.yml && cp .mflowgen/$1/configure.yml $1
touch $3
endef
# $1 -- $dst_dir
# $2 -- $dst
# $3 -- $src
# $4 -- $stamp
define symlink
mkdir -p $1
cd $1 && ln -sf $3 $2 && touch $4
endef
# $1 -- $stamp
define stamp
touch $1
endef
''')
# make_clean
#
# Write out rules for cleaning
#
# - w : instance of Writer
#
def make_clean( w, name, command ):
template_str = '.PHONY: ' + name + '\n'
template_str += '\n'
template_str += name + ':\n'
template_str += ' {command}\n'
w.write( template_str.format( command=command ) )
w.newline()
# make_diff
#
# Write out rules for diffs
#
# - w : instance of Writer
#
def make_diff( w, name, src, dst ):
exclude_files = [
'configure.yml',
'.time_end',
'.time_start',
'mflowgen-run.*',
'mflowgen-debug.*',
'.stamp',
'inputs',
'outputs',
]
command = ' '.join( [
# Newline
'@echo &&',
# Diff the src and dst
'diff -r -u --minimal',
# Exclude build-system specific files
'--exclude={' + ','.join( exclude_files ) + '}',
src,
dst,
'|',
# Try to portably colorize the outputs with grep
"grep --color=always -e '^-.*' -e '$$' -e 'Only in " + src + ".*'",
'|',
"GREP_COLOR='01;32' grep --color=always -e '^+.*' -e '$$' -e 'Only in " + dst + ".*'",
# Newline
'&& echo',
# Ignore any issues
'|| true',
] )
template_str = '.PHONY: ' + name + '\n'
template_str += '\n'
template_str += name + ':\n'
template_str += ' {command}\n'
w.write( template_str.format( command=command ) )
w.newline()
# make_runtimes
#
# Write out rules for calculating runtimes from timestamps
#
# - w : instance of Writer
#
def make_runtimes( w ):
template_str = '.PHONY: runtimes\n'
template_str += '\n'
template_str += 'runtimes:\n'
template_str += ' {command}\n'
command = '@python ' + get_top_dir() + '/utils/runtimes.py'
w.write( template_str.format( command=command ) )
w.newline()
# make_list
#
# Write out rule to list all steps
#
# - w : instance of Writer
# - order : list of steps in order
# - debug_targets : dict of debug targets with key (id) and value (target)
#
def make_list( w, order, debug_targets ):
steps_str = \
[ '"{: >2} : {}"'.format(i,x) for i, x in enumerate( order ) ]
generic = [
'"list -- List all steps"',
'"status -- Print build status for each step"',
'"runtimes -- Print runtimes for each step"',
'"graph -- Generate a PDF of the step dependency graph"',
'"clean-all -- Remove all build directories"',
'"clean-N -- Clean target N"',
'"diff-N -- Diff target N"',
]
debug_str = \
[ '"debug-{: <2} : {}"'.format(i,tup) \
for i, tup in sorted( debug_targets.items(), key=lambda x:int(x[0]) ) ]
template_str = '.PHONY: list\n'
template_str += '\n'
template_str += 'list:\n'
template_str += ' {command}\n'
commands = [
'echo',
'echo Generic Targets\: && echo && ' + \
'printf " - %s\\n" ' + ' '.join( generic ),
'echo',
'echo Targets\: && echo && ' + \
'printf " - %s\\n" ' + ' '.join( steps_str ),
'echo',
'echo Debug Targets\: && echo && ' + \
'printf " - %s\\n" ' + ' '.join( debug_str ),
'echo',
]
command = '@' + ' && '.join( commands )
w.write( template_str.format( command=command ) )
w.newline()
# make_graph
#
# Write out rule to generate a PDF of the user-defined graph
#
# - w : instance of Writer
#
def make_graph( w ):
command = 'dot -Tpdf .mflowgen/graph.dot > graph.pdf'
template_str = '.PHONY: graph\n'
template_str += '\n'
template_str += 'graph:\n'
template_str += ' {command}\n'
w.write( template_str.format( command=command ) )
w.newline()
# make_status
#
# Write out rules for printing build status
#
# - w : instance of Writer
# - steps : list of step names to print status for
#
def make_status( w, steps ):
steps_comma_separated = ','.join( steps )
template_str = '.PHONY: status\n'
template_str += '\n'
template_str += 'status:\n'
template_str += ' {command}\n'
command = '@python ' + get_top_dir() + '/utils/status.py -s ' \
+ steps_comma_separated
w.write( template_str.format( command=command ) )
w.newline()
|
the-stack_0_7723 | from deidentify.methods.tagging_utils import (ParsedDoc, _bio_to_biluo,
_group_sentences,
fix_dangling_entities)
def test_group_sentences():
tags = [['O', 'O'], ['B', 'B'], ['B', 'I']]
docs = [
ParsedDoc(spacy_doc=None, name='doc_a', text=''),
ParsedDoc(spacy_doc=None, name='doc_a', text=''),
ParsedDoc(spacy_doc=None, name='doc_b', text='')]
output = _group_sentences(tags, docs)
assert output == [
(ParsedDoc(spacy_doc=None, name='doc_a', text=''), ['O', 'O', 'B', 'B']),
(ParsedDoc(spacy_doc=None, name='doc_b', text=''), ['B', 'I'])
]
def test_bio_to_biluo():
bio_tags = ['B-a', 'B-b', 'O', 'B-b', 'I-b', 'I-b', 'O', 'O', 'O', 'B-a', 'I-a']
assert _bio_to_biluo(bio_tags) == [
'U-a', 'U-b', 'O', 'B-b', 'I-b', 'L-b', 'O', 'O', 'O', 'B-a', 'L-a']
bio_tags = ['B-a']
assert _bio_to_biluo(bio_tags) == ['U-a']
bio_tags = ['B-a', 'B-a']
assert _bio_to_biluo(bio_tags) == ['U-a', 'U-a']
bio_tags = ['B-a', 'B-a', 'I-a']
assert _bio_to_biluo(bio_tags) == ['U-a', 'B-a', 'L-a']
bio_tags = ['B-a', 'O']
assert _bio_to_biluo(bio_tags) == ['U-a', 'O']
bio_tags = ['B-a', 'I-a']
assert _bio_to_biluo(bio_tags) == ['B-a', 'L-a']
bio_tags = ['O', 'O', 'O', 'B-a']
assert _bio_to_biluo(bio_tags) == ['O', 'O', 'O', 'U-a']
def test_fix_dangling_entities():
assert fix_dangling_entities(['I-a']) == ['B-a']
assert fix_dangling_entities(['O', 'I-a']) == ['O', 'B-a']
assert fix_dangling_entities(['I-a', 'O']) == ['B-a', 'O']
assert fix_dangling_entities(['I-a', 'I-b']) == ['B-a', 'B-b']
assert fix_dangling_entities(['B-a', 'I-b']) == ['B-a', 'B-b']
assert fix_dangling_entities(['I-b', 'B-b', 'I-b']) == ['B-b', 'B-b', 'I-b']
bio_tags = ['O', 'I-a', 'I-a', 'O', 'I-b', 'O', 'O', 'B-a', 'I-a', 'I-b']
fixed = ['O', 'B-a', 'I-a', 'O', 'B-b', 'O', 'O', 'B-a', 'I-a', 'B-b']
assert fix_dangling_entities(bio_tags) == fixed
|
the-stack_0_7725 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
import oslo_serialization.jsonutils as jsonutils
from pecan.hooks import HookController
from pecan.hooks import PecanHook
from pecan import rest
import zlib
LOG = logging.getLogger(__name__)
class ZipperHook(PecanHook):
def before(self, state):
if state.request.method.upper() != 'GET':
try:
zippedBody = state.request.body
body = zlib.decompress(zippedBody)
body = jsonutils.loads(body)
state.request.json_body = body
state.request.content_type = "application/json"
except Exception as e:
msg = ("Failed to process data ,Reason: %s" % (e))
LOG.error(msg)
def after(self, state):
data = state.response.body
state.response.body = zlib.compress(data)
state.response.content_type = "application/octet-stream"
class BaseController(rest.RestController, HookController):
"""This is root controller that forward the request to __init__.py
file inside controller folder inside v1
"""
__hooks__ = [ZipperHook()]
|
the-stack_0_7726 | # coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from .abstract_command_descriptor import AbstractCommandDescriptor
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class ClusterCommandDescriptor(AbstractCommandDescriptor):
"""
Command descriptor for querylanguage CLUSTER command.
"""
def __init__(self, **kwargs):
"""
Initializes a new ClusterCommandDescriptor object with values from keyword arguments. The default value of the :py:attr:`~oci.log_analytics.models.ClusterCommandDescriptor.name` attribute
of this class is ``CLUSTER`` and it should not be changed.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param name:
The value to assign to the name property of this ClusterCommandDescriptor.
Allowed values for this property are: "COMMAND", "SEARCH", "STATS", "GEO_STATS", "TIME_STATS", "SORT", "FIELDS", "ADD_FIELDS", "LINK", "LINK_DETAILS", "CLUSTER", "CLUSTER_DETAILS", "CLUSTER_SPLIT", "EVAL", "EXTRACT", "JSON_EXTRACT", "XML_EXTRACT", "EVENT_STATS", "BUCKET", "CLASSIFY", "TOP", "BOTTOM", "HEAD", "TAIL", "FIELD_SUMMARY", "REGEX", "RENAME", "TIME_COMPARE", "WHERE", "CLUSTER_COMPARE", "DELETE", "DELTA", "DISTINCT", "SEARCH_LOOKUP", "LOOKUP", "DEMO_MODE", "MACRO", "MULTI_SEARCH", "HIGHLIGHT", "HIGHLIGHT_ROWS", "HIGHLIGHT_GROUPS", "CREATE_VIEW", "MAP", "NLP", "COMPARE"
:type name: str
:param display_query_string:
The value to assign to the display_query_string property of this ClusterCommandDescriptor.
:type display_query_string: str
:param internal_query_string:
The value to assign to the internal_query_string property of this ClusterCommandDescriptor.
:type internal_query_string: str
:param category:
The value to assign to the category property of this ClusterCommandDescriptor.
:type category: str
:param referenced_fields:
The value to assign to the referenced_fields property of this ClusterCommandDescriptor.
:type referenced_fields: list[oci.log_analytics.models.AbstractField]
:param declared_fields:
The value to assign to the declared_fields property of this ClusterCommandDescriptor.
:type declared_fields: list[oci.log_analytics.models.AbstractField]
"""
self.swagger_types = {
'name': 'str',
'display_query_string': 'str',
'internal_query_string': 'str',
'category': 'str',
'referenced_fields': 'list[AbstractField]',
'declared_fields': 'list[AbstractField]'
}
self.attribute_map = {
'name': 'name',
'display_query_string': 'displayQueryString',
'internal_query_string': 'internalQueryString',
'category': 'category',
'referenced_fields': 'referencedFields',
'declared_fields': 'declaredFields'
}
self._name = None
self._display_query_string = None
self._internal_query_string = None
self._category = None
self._referenced_fields = None
self._declared_fields = None
self._name = 'CLUSTER'
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
|
the-stack_0_7727 | __copyright__ = "Copyright (c) 2020 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from functools import wraps
from typing import Callable
from .. import Response
from ..enums import CallbackOnType
from ..excepts import BadClientCallback
from ..helper import colored
from ..importer import ImportExtensions
from ..proto import jina_pb2
def pprint_routes(resp: 'Response', stack_limit: int = 3):
"""Pretty print routes with :mod:`prettytable`, fallback to :func:`print`
:param resp: the :class:`Response` object
:param stack_limit: traceback limit
:return:
"""
from textwrap import fill
routes = resp.routes
header = [colored(v, attrs=['bold']) for v in ('Pod', 'Time', 'Exception')]
# poor-man solution
table = []
def add_row(x):
for h, y in zip(header, x):
table.append(f'{h}\n{y}\n{"-" * 10}')
def visualize(x):
print('\n'.join(x))
with ImportExtensions(required=False):
from prettytable import PrettyTable, ALL
table = PrettyTable(field_names=header, align='l', hrules=ALL)
add_row = table.add_row
visualize = print
for route in routes:
status_icon = '🟢'
if route.status.code == jina_pb2.StatusProto.ERROR:
status_icon = '🔴'
elif route.status.code == jina_pb2.StatusProto.ERROR_CHAINED:
status_icon = '⚪'
add_row([f'{status_icon} {route.pod}',
f'{route.start_time.ToMilliseconds() - routes[0].start_time.ToMilliseconds()}ms',
fill(''.join(route.status.exception.stacks[-stack_limit:]), width=50,
break_long_words=False, replace_whitespace=False)])
visualize(table)
def extract_field(resp, callback_on: 'CallbackOnType'):
resp_body = getattr(resp, resp.WhichOneof('body'))
if callback_on == CallbackOnType.BODY:
return resp_body
elif callback_on == CallbackOnType.DOCS:
return resp_body.docs
elif callback_on == CallbackOnType.GROUNDTRUTHS:
return resp_body.groundtruths
elif callback_on == CallbackOnType.REQUEST:
return resp
else:
raise ValueError(f'callback_on={callback_on} is not supported, '
f'must be one of {list(CallbackOnType)}')
def _safe_callback(func: Callable, continue_on_error: bool, logger) -> Callable:
@wraps(func)
def arg_wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as ex:
err_msg = f'uncaught exception in callback {func.__name__}(): {repr(ex)}'
if continue_on_error:
logger.error(err_msg)
else:
raise BadClientCallback(err_msg) from ex
return arg_wrapper
def callback_exec(response, on_done, on_error, on_always, continue_on_error, logger):
if on_error and response.status.code >= jina_pb2.StatusProto.ERROR:
_safe_callback(on_error, continue_on_error, logger)(response)
elif on_done:
_safe_callback(on_done, continue_on_error, logger)(response)
if on_always:
_safe_callback(on_always, continue_on_error, logger)(response)
|
the-stack_0_7728 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# This file is included in the final Docker image and SHOULD be overridden when
# deploying the image to prod. Settings configured here are intended for use in local
# development environments. Also note that superset_config_docker.py is imported
# as a final step as a means to override "defaults" configured here
#
import logging
import os
from datetime import timedelta
from typing import Optional
from cachelib.file import FileSystemCache
from celery.schedules import crontab
logger = logging.getLogger()
def get_env_variable(var_name: str, default: Optional[str] = None) -> str:
"""Get the environment variable or raise exception."""
try:
return os.environ[var_name]
except KeyError:
if default is not None:
return default
else:
error_msg = "The environment variable {} was missing, abort...".format(
var_name
)
raise EnvironmentError(error_msg)
DATABASE_DIALECT = get_env_variable("DATABASE_DIALECT")
DATABASE_USER = get_env_variable("DATABASE_USER")
DATABASE_PASSWORD = get_env_variable("DATABASE_PASSWORD")
DATABASE_HOST = get_env_variable("DATABASE_HOST")
DATABASE_PORT = get_env_variable("DATABASE_PORT")
DATABASE_DB = get_env_variable("DATABASE_DB")
# The SQLAlchemy connection string.
SQLALCHEMY_DATABASE_URI = "%s://%s:%s@%s:%s/%s" % (
DATABASE_DIALECT,
DATABASE_USER,
DATABASE_PASSWORD,
DATABASE_HOST,
DATABASE_PORT,
DATABASE_DB,
)
REDIS_HOST = get_env_variable("REDIS_HOST")
REDIS_PORT = get_env_variable("REDIS_PORT")
REDIS_CELERY_DB = get_env_variable("REDIS_CELERY_DB", "0")
REDIS_RESULTS_DB = get_env_variable("REDIS_RESULTS_DB", "1")
RESULTS_BACKEND = FileSystemCache("/app/superset_home/sqllab")
class CeleryConfig(object):
BROKER_URL = f"redis://{REDIS_HOST}:{REDIS_PORT}/{REDIS_CELERY_DB}"
CELERY_IMPORTS = ("superset.sql_lab", "superset.tasks")
CELERY_RESULT_BACKEND = f"redis://{REDIS_HOST}:{REDIS_PORT}/{REDIS_RESULTS_DB}"
CELERYD_LOG_LEVEL = "DEBUG"
CELERYD_PREFETCH_MULTIPLIER = 1
CELERY_ACKS_LATE = False
CELERYBEAT_SCHEDULE = {
"reports.scheduler": {
"task": "reports.scheduler",
"schedule": crontab(minute="*", hour="*"),
},
"reports.prune_log": {
"task": "reports.prune_log",
"schedule": crontab(minute=10, hour=0),
},
}
CELERY_CONFIG = CeleryConfig
# ===========================================
# ================ IANIC ===================
# ===========================================
from security import OIDCSecurityManager
from flask_appbuilder.security.manager import AUTH_OID
AUTH_TYPE = AUTH_OID
OIDC_CLIENT_SECRETS = "/app/docker/pythonpath_dev/client_secret.json"
OIDC_REQUIRE_VERIFIED_EMAIL = False
CUSTOM_SECURITY_MANAGER = OIDCSecurityManager
OIDC_USER_INFO_ENABLED = True
AUTH_USER_REGISTRATION = True
AUTH_USER_REGISTRATION_ROLE = 'Gamma'
ENABLE_PROXY_FIX = True
SESSION_COOKIE_SAMESITE = 'None' # One of [None, 'None', 'Lax', 'Strict']
OIDC_ID_TOKEN_COOKIE_SECURE = True
ENABLE_CORS = True
APP_NAME = "Ianic Viz"
#APP_ICON = "/static/assets/images/ianic-logo-horiz.png"
WTF_CSRF_ENABLED = False
BABEL_DEFAULT_LOCALE = "el"
LANGUAGES = {
"en": {"flag": "us", "name": "English"},
"el": {"flag": "el", "name": "Greek"},
"es": {"flag": "es", "name": "Spanish"},
}
PUBLIC_ROLE_LIKE = "Gamma"
# ===========================================
# ================ IANIC ===================
# ===========================================
FEATURE_FLAGS = {"ALERT_REPORTS": True}
ALERT_REPORTS_NOTIFICATION_DRY_RUN = True
#WEBDRIVER_BASEURL = "https://viz.ianic.gr/"
WEBDRIVER_BASEURL = "http://superset:8088/"
# The base URL for the email report hyperlinks.
WEBDRIVER_BASEURL_USER_FRIENDLY = WEBDRIVER_BASEURL
SQLLAB_CTAS_NO_LIMIT = True
#
# Optionally import superset_config_docker.py (which will have been included on
# the PYTHONPATH) in order to allow for local settings to be overridden
#
try:
import superset_config_docker
from superset_config_docker import * # noqa
logger.info(
f"Loaded your Docker configuration at " f"[{superset_config_docker.__file__}]"
)
except ImportError:
logger.info("Using default Docker config...")
|
the-stack_0_7729 | # -*- coding: utf-8 -*-
"""
Created on Mon Oct 5 09:23:24 2020
Evaluate whether something is a good idea.
@author: Randy Zhu
"""
# Ask for the user's idea.
idea = input("Give me your idea: ")
# Make a list of ideas.
# ikea brand hashmap
# nested lists > dictionary lol
idea_questions = [
# Make an array of arrays, and the arrays inside this array are like key -
# value pairs, with index 0 being the questions, and index 1 being the
# weight.
["Out of 100, how much is this idea worth to you?: ", 5],
["Out of 100, how much will this idea matter in 2 days?: ", 1],
["Out of 100, how much will this idea matter in 2 weeks?: ", 2],
["Out of 100, how much will this idea matter in 2 years?: ", 4],
["Out of 100, how much will this idea matter in 20 years?: ", 5],
["How large will the impact of your idea be (big/medium/small)?: ", 5]
]
# Initialize the goodness score.
idea_goodness_score = 0
# Ask the question of how much it's worth.
worth = input(idea_questions[0][0])
# The goodness score is the worth, multiplied by the weight of the worth.
idea_goodness_score += int(worth) * idea_questions[0][1]
# For each question in the slice of indexes one to four,
# ask it, and then calculate with its average.
# We take the slice 1 - 4, as there are questions
# which must be evaluated differently.
for question in idea_questions[1:4]:
idea_goodness_score += int(input(question[0])) * question[1]
# Get the impact of the questions, by taking the 5th index, and asking it, then,
# if it's a big impact, give it 100, medium, 50, and small, 25.
impact = input(idea_questions[5][0])
if impact.lower().strip("!?. ") == "big":
impact = 100
elif impact.lower().strip("!?. ") == "medium":
impact = 50
elif impact.lower().strip("!?. ") == "small":
impact = 25
# Add the impact to the score.
idea_goodness_score += impact
# If the user answers all 100s, and they think it is a big impact, tell them to
# go for it.
if idea_goodness_score == 1300:
print("Go for it, it's the best idea every: ")
# If the user answers mostly good, then tell them it is good.
elif idea_goodness_score >= 650:
print(idea + " is good, go for it!")
# If the user gives a short - term, medium low impact, low value idea, tell
# them to reevaluate.
elif idea_goodness_score >= 300:
print(idea + " is decent, but you may want to reevaluate some parts")
# If the goodness score is low, then unfortunately, it is not a good idea.
elif idea_goodness_score < 300:
print("Hate to say it, but " + idea + " does not seem good to me.")
# Invalid case.
else:
print("I dunno about " + idea + ".")
|
the-stack_0_7731 | from cvm.constants import ID_PERMS
from cvm.models import VirtualMachineInterfaceModel
def test_to_vnc(vmi_model, project, security_group):
vmi_model.parent = project
vmi_model.security_group = security_group
vnc_vmi = vmi_model.vnc_vmi
assert vnc_vmi.name == vmi_model.uuid
assert vnc_vmi.parent_name == project.name
assert vnc_vmi.display_name == vmi_model.display_name
assert vnc_vmi.uuid == vmi_model.uuid
vnc_mac_address = vnc_vmi.virtual_machine_interface_mac_addresses.mac_address
assert vnc_mac_address == [vmi_model.vcenter_port.mac_address]
assert vnc_vmi.get_id_perms() == ID_PERMS
def test_construct_instance_ip(vmi_model, project, security_group):
vmi_model.parent = project
vmi_model.security_group = security_group
vmi_model.vn_model.vnc_vn.external_ipam = None
vmi_model.construct_instance_ip()
instance_ip = vmi_model.vnc_instance_ip
assert instance_ip.instance_ip_address is None
assert instance_ip.virtual_machine_interface_refs[0]['uuid'] == vmi_model.uuid
expected_uuid = VirtualMachineInterfaceModel.create_uuid(instance_ip.display_name)
assert instance_ip.uuid == expected_uuid
def test_update_ip_address(vmi_model):
check1 = vmi_model.update_ip_address('192.168.100.5')
vmi_model.update_ip_address('192.168.100.5')
check2 = vmi_model.update_ip_address('192.168.100.5')
assert check1 is True
assert check2 is False
assert vmi_model.ip_address == '192.168.100.5'
def test_create_uuid():
mac_str = 'mac-address'
mac_unicode = u'mac-address'
uuid_str = VirtualMachineInterfaceModel.create_uuid(mac_str)
uuid_unicode = VirtualMachineInterfaceModel.create_uuid(mac_unicode)
assert uuid_str == '2f269404-b466-3cc7-8817-d9ee99f63187'
assert uuid_unicode == '2f269404-b466-3cc7-8817-d9ee99f63187'
|
the-stack_0_7733 | # -*- coding: utf-8 -*-
'''
Module for sending messages to hipchat
:configuration: This module can be used by either passing an api key and version
directly or by specifying both in a configuration profile in the salt
master/minion config.
For example:
.. code-block:: yaml
hipchat:
api_key: peWcBiMOS9HrZG15peWcBiMOS9HrZG15
api_version: v1
'''
# Import Python Libs
from __future__ import absolute_import
import json
import logging
# Import 3rd-party Libs
# pylint: disable=import-error,no-name-in-module
from salt.ext.six.moves.urllib.parse import urljoin as _urljoin
from salt.ext.six.moves import range
# pylint: enable=import-error,no-name-in-module
try:
import requests
from requests.exceptions import ConnectionError
ENABLED = True
except ImportError:
ENABLED = False
log = logging.getLogger(__name__)
__virtualname__ = 'hipchat'
def __virtual__():
'''
Return virtual name of the module.
:return: The virtual name of the module.
'''
if not ENABLED:
return False
return __virtualname__
def _query(function, api_key=None, api_version=None, method='GET', data=None):
'''
HipChat object method function to construct and execute on the API URL.
:param api_key: The HipChat api key.
:param function: The HipChat api function to perform.
:param api_version: The HipChat api version (v1 or v2).
:param method: The HTTP method, e.g. GET or POST.
:param data: The data to be sent for POST method.
:return: The json response from the API call or False.
'''
headers = {}
query_params = {}
if data is None:
data = {}
if data.get('room_id'):
room_id = str(data.get('room_id'))
else:
room_id = '0'
hipchat_functions = {
'v1': {
'rooms': {
'request': 'rooms/list',
'response': 'rooms',
},
'users': {
'request': 'users/list',
'response': 'users',
},
'message': {
'request': 'rooms/message',
'response': 'status',
},
},
'v2': {
'rooms': {
'request': 'room',
'response': 'items',
},
'users': {
'request': 'user',
'response': 'items',
},
'message': {
'request': 'room/' + room_id + '/notification',
'response': None,
},
},
}
if not api_key or not api_version:
try:
options = __salt__['config.option']('hipchat')
if not api_key:
api_key = options.get('api_key')
if not api_version:
api_version = options.get('api_version')
except (NameError, KeyError, AttributeError):
log.error("No HipChat api key or version found.")
return False
api_url = 'https://api.hipchat.com'
base_url = _urljoin(api_url, api_version + '/')
path = hipchat_functions.get(api_version).get(function).get('request')
url = _urljoin(base_url, path, False)
if api_version == 'v1':
query_params['format'] = 'json'
query_params['auth_token'] = api_key
if method == 'POST':
headers['Content-Type'] = 'application/x-www-form-urlencoded'
if data.get('notify'):
data['notify'] = 1
else:
data['notify'] = 0
elif api_version == 'v2':
headers['Authorization'] = 'Bearer {0}'.format(api_key)
data = json.dumps(data)
else:
log.error('Unsupported HipChat API version')
return False
try:
result = requests.request(
method=method,
url=url,
headers=headers,
params=query_params,
data=data,
verify=True,
)
except ConnectionError as e:
log.error(e)
return False
if result.status_code == 200:
result = result.json()
response = hipchat_functions.get(api_version).get(function).get('response')
return result.get(response)
elif result.status_code == 204:
return True
else:
log.debug(url)
log.debug(query_params)
log.debug(data)
log.debug(result)
if result.json().get('error'):
log.error(result.json())
return False
def list_rooms(api_key=None, api_version=None):
'''
List all HipChat rooms.
:param api_key: The HipChat admin api key.
:param api_version: The HipChat api version, if not specified in the configuration.
:return: The room list.
CLI Example:
.. code-block:: bash
salt '*' hipchat.list_rooms
salt '*' hipchat.list_rooms api_key=peWcBiMOS9HrZG15peWcBiMOS9HrZG15 api_version=v1
'''
return _query(function='rooms', api_key=api_key, api_version=api_version)
def list_users(api_key=None, api_version=None):
'''
List all HipChat users.
:param api_key: The HipChat admin api key.
:param api_version: The HipChat api version, if not specified in the configuration.
:return: The user list.
CLI Example:
.. code-block:: bash
salt '*' hipchat.list_users
salt '*' hipchat.list_users api_key=peWcBiMOS9HrZG15peWcBiMOS9HrZG15 api_version=v1
'''
return _query(function='users', api_key=api_key, api_version=api_version)
def find_room(name, api_key=None, api_version=None):
'''
Find a room by name and return it.
:param name: The room name.
:param api_key: The HipChat admin api key.
:param api_version: The HipChat api version, if not specified in the configuration.
:return: The room object.
CLI Example:
.. code-block:: bash
salt '*' hipchat.find_room name="Development Room"
salt '*' hipchat.find_room name="Development Room" api_key=peWcBiMOS9HrZG15peWcBiMOS9HrZG15 api_version=v1
'''
rooms = list_rooms(api_key=api_key, api_version=api_version)
if rooms:
for x in range(0, len(rooms)):
if rooms[x]['name'] == name:
return rooms[x]
return False
def find_user(name, api_key=None, api_version=None):
'''
Find a user by name and return it.
:param name: The user name.
:param api_key: The HipChat admin api key.
:param api_version: The HipChat api version, if not specified in the configuration.
:return: The user object.
CLI Example:
.. code-block:: bash
salt '*' hipchat.find_user name="Thomas Hatch"
salt '*' hipchat.find_user name="Thomas Hatch" api_key=peWcBiMOS9HrZG15peWcBiMOS9HrZG15 api_version=v1
'''
users = list_users(api_key=api_key, api_version=api_version)
if users:
for x in range(0, len(users)):
if users[x]['name'] == name:
return users[x]
return False
def send_message(room_id,
message,
from_name,
api_key=None,
api_version=None,
color='yellow',
notify=False):
'''
Send a message to a HipChat room.
:param room_id: The room id or room name, either will work.
:param message: The message to send to the HipChat room.
:param from_name: Specify who the message is from.
:param api_key: The HipChat api key, if not specified in the configuration.
:param api_version: The HipChat api version, if not specified in the configuration.
:param color: The color for the message, default: yellow.
:param notify: Whether to notify the room, default: False.
:return: Boolean if message was sent successfully.
CLI Example:
.. code-block:: bash
salt '*' hipchat.send_message room_id="Development Room" message="Build is done" from_name="Build Server"
salt '*' hipchat.send_message room_id="Development Room" message="Build failed" from_name="Build Server" color="red" notify=True
'''
parameters = dict()
parameters['room_id'] = room_id
parameters['from'] = from_name[:15]
parameters['message'] = message[:10000]
parameters['message_format'] = 'text'
parameters['color'] = color
parameters['notify'] = notify
result = _query(function='message',
api_key=api_key,
api_version=api_version,
method='POST',
data=parameters)
if result:
return True
else:
return False
|
the-stack_0_7735 | from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import os
import System
import Eto.Drawing as drawing
import Eto.Forms as forms
import Rhino
import compas
class BrowserForm(forms.Form):
def __init__(self, url=None, width=800, height=400):
self.Owner = Rhino.UI.RhinoEtoApp.MainWindow
self.Title = 'RhinoVault2'
self.Padding = drawing.Padding(0)
self.Resizable = False
self.m_webview = forms.WebView()
self.m_webview.Size = drawing.Size(width, height)
if not url:
url = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'src', 'index.html')
self.m_webview.Url = System.Uri(url)
self.m_webview.BrowserContextMenuEnabled = True
layout = forms.DynamicLayout()
layout.Spacing = drawing.Size(5, 5)
layout.BeginVertical()
layout.AddRow(self.m_webview)
layout.EndVertical()
self.Content = layout
self.WindowStyle = getattr(forms.WindowStyle, 'None')
self.m_webview.DocumentLoading += self.action
if compas.WINDOWS:
self.Location = drawing.Point(self.Owner.Location.X + self.Owner.Size.Width / 2 - 400, self.Owner.Location.Y + self.Owner.Size.Height / 2 - 200)
def action(self, sender, e):
if e.Uri.Scheme == "action" and e.Uri.Host == "close":
self.Close()
def Browser():
browserForm = BrowserForm()
browserForm.Show()
|
the-stack_0_7737 | # ------------------------------------------------------------
# lex.py
#
# tokenizer for the language
# ------------------------------------------------------------
import ply
import ply.lex as lex
# List of token names. This is always required
tokens = (
'NUMBER',
'PLUS',
'MINUS',
'TIMES',
'DIVIDE',
'LPAREN',
'RPAREN',
'LBRACKET',
'RBRACKET',
'EXP',
'UNEXP',
'PTR',
'DEFBY',
'SEP',
'EXPRREF',
'VAR',
)
# Regular expression rules for simple tokens
t_PLUS = r'\+'
t_MINUS = r'\-'
t_TIMES = r'\*'
t_DIVIDE = r'\/'
t_LPAREN = r'\('
t_RPAREN = r'\)'
t_LBRACKET = r'\{'
t_RBRACKET = r'\}'
t_EXP = r'\$'
t_UNEXP = r'\@'
t_PTR = r'\&'
t_DEFBY = r'\:\='
t_SEP = r'\:'
t_EXPRREF = r'\;'
t_VAR = r'[a-zA-Z_]'
# A regular expression rule with some action code
def t_NUMBER(t):
r'\d+'
t.value = int(t.value)
return t
# Define a rule so we can track line numbers
def t_newline(t):
r'\n+'
t.lexer.lineno += len(t.value)
# A string containing ignored characters (spaces and tabs)
t_ignore = ' \t'
# Error handling rule
def t_error(t):
print("Illegal character '%s'" % t.value[0])
t.lexer.skip(1)
# Build the lexer
lexer = lex.lex()
if __name__ == "__main__":
# Test it out
data = '''
x := 2
g := $;x + 1$
'''
# Give the lexer some input
lexer.input(data)
# Tokenize
for tok in lexer:
print(str(tok.lineno) + "} " + str(tok.type) + " (VAL " + str(tok.value) + ") (POS " + str(tok.lexpos) + ")")
|
the-stack_0_7738 | import zlib
try:
import lzma
except ImportError:
lzma = None
import pytest
from ..compress import get_compressor, Compressor, CNONE, ZLIB, LZ4
buffer = bytes(2**16)
data = b'fooooooooobaaaaaaaar' * 10
params = dict(name='zlib', level=6, buffer=buffer)
def test_get_compressor():
c = get_compressor(name='none')
assert isinstance(c, CNONE)
c = get_compressor(name='lz4', buffer=buffer)
assert isinstance(c, LZ4)
c = get_compressor(name='zlib')
assert isinstance(c, ZLIB)
with pytest.raises(KeyError):
get_compressor(name='foobar')
def test_cnull():
c = get_compressor(name='none')
cdata = c.compress(data)
assert len(cdata) > len(data)
assert data in cdata # it's not compressed and just in there 1:1
assert data == c.decompress(cdata)
assert data == Compressor(**params).decompress(cdata) # autodetect
def test_lz4():
c = get_compressor(name='lz4', buffer=buffer)
cdata = c.compress(data)
assert len(cdata) < len(data)
assert data == c.decompress(cdata)
assert data == Compressor(**params).decompress(cdata) # autodetect
def test_zlib():
c = get_compressor(name='zlib')
cdata = c.compress(data)
assert len(cdata) < len(data)
assert data == c.decompress(cdata)
assert data == Compressor(**params).decompress(cdata) # autodetect
def test_lzma():
if lzma is None:
pytest.skip("No lzma support found.")
c = get_compressor(name='lzma')
cdata = c.compress(data)
assert len(cdata) < len(data)
assert data == c.decompress(cdata)
assert data == Compressor(**params).decompress(cdata) # autodetect
def test_autodetect_invalid():
with pytest.raises(ValueError):
Compressor(**params).decompress(b'\xff\xfftotalcrap')
with pytest.raises(ValueError):
Compressor(**params).decompress(b'\x08\x00notreallyzlib')
def test_zlib_compat():
# for compatibility reasons, we do not add an extra header for zlib,
# nor do we expect one when decompressing / autodetecting
for level in range(10):
c = get_compressor(name='zlib', level=level)
cdata1 = c.compress(data)
cdata2 = zlib.compress(data, level)
assert cdata1 == cdata2
data2 = c.decompress(cdata2)
assert data == data2
data2 = Compressor(**params).decompress(cdata2)
assert data == data2
def test_compressor():
params_list = [
dict(name='none', buffer=buffer),
dict(name='lz4', buffer=buffer),
dict(name='zlib', level=0, buffer=buffer),
dict(name='zlib', level=6, buffer=buffer),
dict(name='zlib', level=9, buffer=buffer),
]
if lzma:
params_list += [
dict(name='lzma', level=0, buffer=buffer),
dict(name='lzma', level=6, buffer=buffer),
# we do not test lzma on level 9 because of the huge memory needs
]
for params in params_list:
c = Compressor(**params)
assert data == c.decompress(c.compress(data))
|
the-stack_0_7739 | from django.contrib.admin.views.decorators import staff_member_required
from django.shortcuts import redirect, render
from .forms import MisComprobantesEmitidos_Form
from .scripts import extrae, mis_comprobantes_emitidos
from django.contrib import messages
# Create your views here.
@staff_member_required
def MisComprobantesEmitidosView(request):
tabla=None
excel = None
if request.method == 'POST':
form = MisComprobantesEmitidos_Form(request.POST, request.FILES)
if form.is_valid():
if request.FILES['file'].name.endswith('xlsx'):
excel = request.FILES['file']
try:
data = extrae(excel)
if 'Denominación_Emisor' in data.columns:
raise ValueError('recibido en emitido')
tabla = data.to_dict('records')
request.session['tabla'] = tabla
except:
mensaje = "Hubo un problema con el archivo."
messages.info(request,mensaje)
return render(request, 'emitidos/emitidos_upload.html', {'form': form})
contribuyente = request.POST.get('contribuyente')
request.session['contribuyente'] = contribuyente
mensaje = "Archivo Cargado exitosamente"
messages.info(request,mensaje)
return redirect('emitidos_succes')
form = MisComprobantesEmitidos_Form()
return render(request, 'emitidos/emitidos_upload.html', {'form': form})
@staff_member_required
def emitidos_succes(request):
tabla = None
if request.method == "POST":
if request.POST.get("Answer") == "si":
tabla = request.session['tabla']
contribuyente = request.session['contribuyente']
listaEmitidos = mis_comprobantes_emitidos(tabla,contribuyente)
n = len(listaEmitidos)
for comp in listaEmitidos:
comp.save()
messages.info(request,f"Se agregaron exitosamente {n} comprobantes")
#borra variables de sesión al terminar
if 'tabla' in request.session:
del request.session['tabla']
if 'contribuyente' in request.session:
del request.session['contribuyente']
return render(request, 'emitidos/emitidos_succes.html', {})
if request.POST.get("Answer") == "no":
return redirect('emitidos')
return render(request, 'emitidos/emitidos_succes.html', {}) |
the-stack_0_7740 | #!/usr/bin/python3
from http.server import HTTPServer, BaseHTTPRequestHandler
import sys, os, datetime, re, urllib, shutil
import socket
import socketserver
import threading
os.chdir(os.path.dirname(__file__) or '.') # CD to this directory
from helpers import *
import mimeLib
_statics = {
'buffer_size': 4096,
'config': read_ini('config.ini'),
'allowed_paths': [
'index.html'
]
}
def initdirs():
md = lambda s: os.makedirs(s, exist_ok=True)
def cf(s):
f = open(s, 'ab')
f.write(b'')
f.close()
md(_statics['config']['folder'])
initdirs()
class MyServer(BaseHTTPRequestHandler):
def do_GET(self):
if self.path == '/':
self.path = '/index.html'
path = recover(self.path)
v = urlvar(path)
if len(path) >= 2:
if path[0:2] == '/~':
if v['apikey'] != _statics['config']['apikey']:
self.send_response(403)
self.send_header('Content-Type', mimeLib.getmime('*.txt'))
self.end_headers()
self.wfile.write(b'Forbidden')
return
if v['action'] == 'get_list':
self.send_response(200)
self.send_header('Content-Type', mimeLib.getmime('*.txt'))
self.end_headers()
self.wfile.write('\n'.join(os.listdir(_statics['config']['folder'])).encode('utf-8'))
return
elif v['action'] == 'get_file':
filepath = _statics['config']['folder'] + v['file']
if os.path.exists(filepath):
self.send_response(200)
self.send_header('Content-Type', mimeLib.getmime(filepath))
# self.send_header('Cache-Control', 'public, max-age=86400')
self.end_headers()
with open(filepath, 'rb') as f:
while True:
data = f.read(_statics['buffer_size'])
if data:
self.wfile.write(data)
else:
break
return
else:
self.send_response(404)
self.send_header('Content-Type', mimeLib.getmime('*.html'))
self.end_headers()
self.wfile.write(b'Not Found')
return
else:
# local file
path = path[1:]
if os.path.exists(path) and path in _statics['allowed_paths']:
self.send_response(200)
self.send_header('Content-Type', mimeLib.getmime(path))
# self.send_header('Cache-Control', 'public, max-age=86400')
self.end_headers()
with open(path, 'rb') as f:
while True:
data = f.read(_statics['buffer_size'])
if data:
self.wfile.write(data)
else:
break
return
else:
self.send_response(404)
self.send_header('Content-Type', mimeLib.getmime('*.html'))
self.end_headers()
self.wfile.write(b'Not Found')
return
def do_POST(self):
if self.path == '/':
self.path = '/index.html'
path = recover(self.path)
v = urlvar(path)
if len(path) >= 2:
if path[0:2] == '/~':
if v['apikey'] != _statics['config']['apikey']:
self.send_response(403)
self.send_header('Content-Type', mimeLib.getmime('*.txt'))
self.end_headers()
self.wfile.write(b'Forbidden')
return
elif v['action'] == 'upload':
self.send_response(200)
self.send_header('Content-Type', mimeLib.getmime('*.txt'))
self.end_headers()
store_path = _statics['config']['folder']
store_name = v['name']
os.makedirs(store_path, exist_ok=True)
content_length = int(self.headers['Content-Length'])
read_bytes = 0
with open(store_path + store_name, 'ab', buffering=_statics['buffer_size']) as f:
while read_bytes < content_length:
bytes_to_write = content_length - read_bytes
data = self.rfile.read(min(_statics['buffer_size'], bytes_to_write))
f.write(data)
read_bytes += _statics['buffer_size']
self.wfile.write(b'OK')
class ThreadedHTTPServer(socketserver.ThreadingMixIn, HTTPServer):
""" Handle requests in a separate thread. """
if __name__ == '__main__':
d = _statics['config']
address, port = d['address'], int(d['port'])
server = ThreadedHTTPServer((address, port), MyServer)
print('Starting server on address %s, port %s...' % (address, port))
try:
# Start a thread with the server -- that thread will then start one
# more thread for each request
server_thread = threading.Thread(target=server.serve_forever)
# Exit the server thread when the main thread terminates
server_thread.daemon = True
server_thread.start()
# print("Server loop running in thread:", server_thread.name)
server.serve_forever()
except KeyboardInterrupt:
pass
|
the-stack_0_7742 | ###############################################################################
#
# Tests for XlsxWriter.
#
# SPDX-License-Identifier: BSD-2-Clause
# Copyright (c), 2013-2021, John McNamara, [email protected]
#
from ..excel_comparison_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('chart_data_labels25.xlsx')
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'column'})
chart.axis_ids = [108315392, 108329600]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column('A1', data[0])
worksheet.write_column('B1', data[1])
worksheet.write_column('C1', data[2])
chart.add_series({
'values': '=Sheet1!$A$1:$A$5',
'data_labels': {'value': 1,
'position': 'outside_end',
'font': {'rotation': 45, 'baseline': -1}},
})
chart.add_series({
'values': '=Sheet1!$B$1:$B$5',
'data_labels': {'value': 1,
'position': 'inside_base',
'font': {'rotation': -45, 'baseline': -1}},
})
chart.add_series({'values': '=Sheet1!$C$1:$C$5'})
worksheet.insert_chart('E9', chart)
workbook.close()
self.assertExcelEqual()
|
the-stack_0_7744 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for SparseAdd."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import timeit
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import sparse_ops
import tensorflow.python.ops.sparse_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
def _sparsify(x, thresh=0.5, index_dtype=np.int64):
x[x < thresh] = 0
non_zero = np.where(x)
x_indices = np.vstack(non_zero).astype(index_dtype).T
x_values = x[non_zero]
x_shape = x.shape
return sparse_tensor.SparseTensor(
indices=x_indices, values=x_values, dense_shape=x_shape), len(x_values)
class SparseAddTest(test.TestCase):
def _randomTensor(self, size, np_dtype, sparse=True):
n, m = size
x = np.random.randn(n, m).astype(np_dtype)
return _sparsify(x) if sparse else x
def _SparseTensorValue_3x3(self, negate=False):
# [ 1]
# [2 ]
# [3 4]
# ...or its cwise negation, if `negate`
ind = np.array([[0, 1], [1, 0], [2, 0], [2, 1]])
val = np.array([1, 2, 3, 4])
if negate:
val = -np.array([1, 2, 3, 4])
shape = np.array([3, 3])
return sparse_tensor.SparseTensorValue(
np.array(ind, np.int64),
np.array(val, np.float32), np.array(shape, np.int64))
def _SparseTensor_3x3(self, negate=False):
return sparse_tensor.SparseTensor.from_value(
self._SparseTensorValue_3x3(negate))
def _SparseTensor_3x3_v2(self):
# [ 1]
# [-1.9 ]
# [ 3 -4.2]
ind = np.array([[0, 1], [1, 0], [2, 0], [2, 1]])
val = np.array([1, -1.9, 3, -4.2])
shape = np.array([3, 3])
return sparse_tensor.SparseTensor(
constant_op.constant(ind, dtypes.int64),
constant_op.constant(val, dtypes.float32),
constant_op.constant(shape, dtypes.int64))
def testAddSelf(self):
with test_util.force_cpu():
for sp_a in (self._SparseTensorValue_3x3(), self._SparseTensor_3x3()):
for sp_b in (self._SparseTensorValue_3x3(), self._SparseTensor_3x3()):
sp_sum = sparse_ops.sparse_add(sp_a, sp_b)
self.assertAllEqual((3, 3), sp_sum.get_shape())
sum_out = self.evaluate(sp_sum)
self.assertEqual(sp_sum.dense_shape.get_shape(), [2])
self.assertAllEqual(sum_out.indices, [[0, 1], [1, 0], [2, 0], [2, 1]])
self.assertAllEqual(sum_out.values, [2, 4, 6, 8])
self.assertAllEqual(sum_out.dense_shape, [3, 3])
def testAddSelfAndNegation(self):
with test_util.force_cpu():
sp_a = self._SparseTensor_3x3()
sp_b = self._SparseTensor_3x3(negate=True)
sp_sum = sparse_ops.sparse_add(sp_a, sp_b, 0.1)
sum_out = self.evaluate(sp_sum)
self.assertEqual(sp_sum.dense_shape.get_shape(), [2])
self.assertAllEqual(sum_out.indices, np.empty([0, 2]))
self.assertAllEqual(sum_out.values, [])
self.assertAllEqual(sum_out.dense_shape, [3, 3])
def testSmallValuesShouldVanish(self):
with test_util.force_cpu():
sp_a = self._SparseTensor_3x3()
sp_b = self._SparseTensor_3x3_v2()
# sum:
# [ 2]
# [.1 ]
# [ 6 -.2]
# two values should vanish: |.1| < .21, and |-.2| < .21
sp_sum = sparse_ops.sparse_add(sp_a, sp_b, thresh=0.21)
sum_out = self.evaluate(sp_sum)
self.assertEqual(sp_sum.dense_shape.get_shape(), [2])
self.assertAllEqual(sum_out.indices, [[0, 1], [2, 0]])
self.assertAllEqual(sum_out.values, [2, 6])
self.assertAllEqual(sum_out.dense_shape, [3, 3])
# only .1 vanishes
sp_sum = sparse_ops.sparse_add(sp_a, sp_b, thresh=0.11)
sum_out = self.evaluate(sp_sum)
self.assertEqual(sp_sum.dense_shape.get_shape(), [2])
self.assertAllEqual(sum_out.indices, [[0, 1], [2, 0], [2, 1]])
self.assertAllClose(sum_out.values, [2, 6, -.2])
self.assertAllEqual(sum_out.dense_shape, [3, 3])
@test_util.run_deprecated_v1
def testGradients(self):
np.random.seed(1618) # Make it reproducible.
with self.session(use_gpu=False):
for n in [10, 31]:
for m in [4, 17]:
sp_a, nnz_a = self._randomTensor([n, m], np.float32)
sp_b, nnz_b = self._randomTensor([n, m], np.float32)
sp_sum = sparse_ops.sparse_add(sp_a, sp_b)
nnz_sum = len(self.evaluate(sp_sum.values))
err = gradient_checker.compute_gradient_error(
[sp_a.values, sp_b.values], [(nnz_a,), (nnz_b,)], sp_sum.values,
(nnz_sum,))
self.assertLess(err, 1e-3)
def testAddSparseDense(self):
np.random.seed(1618) # Make it reproducible.
n, m = np.random.randint(30, size=2)
for dtype in [np.float32, np.float64, np.int64, np.complex64]:
for index_dtype in [np.int32, np.int64]:
rand_vals_np = np.random.randn(n, m).astype(dtype)
dense_np = np.random.randn(n, m).astype(dtype)
with test_util.force_cpu():
sparse, unused_nnz = _sparsify(rand_vals_np, index_dtype=index_dtype)
s = self.evaluate(
sparse_ops.sparse_add(sparse, constant_op.constant(dense_np)))
self.assertAllEqual(dense_np + rand_vals_np, s)
self.assertTrue(s.dtype == dtype)
# check commutativity
s = self.evaluate(
sparse_ops.sparse_add(constant_op.constant(dense_np), sparse))
self.assertAllEqual(dense_np + rand_vals_np, s)
self.assertTrue(s.dtype == dtype)
@test_util.run_deprecated_v1
def testSparseTensorDenseAddGradients(self):
np.random.seed(1618) # Make it reproducible.
n, m = np.random.randint(30, size=2)
rand_vals_np = np.random.randn(n, m).astype(np.float32)
dense_np = np.random.randn(n, m).astype(np.float32)
with self.session(use_gpu=False):
sparse, nnz = _sparsify(rand_vals_np)
dense = constant_op.constant(dense_np, dtype=dtypes.float32)
s = sparse_ops.sparse_add(sparse, dense)
err = gradient_checker.compute_gradient_error([sparse.values, dense],
[(nnz,), (n, m)], s, (n, m))
self.assertLess(err, 1e-3)
@test_util.run_deprecated_v1
def testInvalidSparseTensor(self):
with test_util.force_cpu():
shape = [2, 2]
val = [0]
dense = constant_op.constant(np.zeros(shape, dtype=np.int32))
for bad_idx in [
[[-1, 0]], # -1 is invalid.
[[1, 3]], # ...so is 3.
]:
sparse = sparse_tensor.SparseTensorValue(bad_idx, val, shape)
s = sparse_ops.sparse_add(sparse, dense)
with self.assertRaisesRegex(errors_impl.InvalidArgumentError,
"invalid index"):
self.evaluate(s)
######################## Benchmarking code
def _s2d_add_vs_sparse_add(sparsity, n, m, num_iters=50):
np.random.seed(1618)
with session.Session(graph=ops.Graph()) as sess:
sp_vals = np.random.rand(n, m).astype(np.float32)
sp_t, unused_nnz = _sparsify(sp_vals, thresh=sparsity, index_dtype=np.int32)
vals = np.random.rand(n, m).astype(np.float32)
s2d = math_ops.add(
sparse_ops.sparse_tensor_to_dense(sp_t), constant_op.constant(vals))
sa = sparse_ops.sparse_add(sp_t, constant_op.constant(vals))
timeit.timeit(lambda: sess.run(s2d), number=3)
timeit.timeit(lambda: sess.run(sa), number=3)
s2d_total = timeit.timeit(lambda: sess.run(s2d), number=num_iters)
sa_total = timeit.timeit(lambda: sess.run(sa), number=num_iters)
# per-iter latency; secs to millis
return s2d_total * 1e3 / num_iters, sa_total * 1e3 / num_iters
class SparseAddBenchmark(test.Benchmark):
def benchmarkSparseAddDense(self):
print("SparseAddDense: add with sparse_to_dense vs. sparse_add")
print("%nnz \t n \t m \t millis(s2d) \t millis(sparse_add) \t speedup")
for sparsity in [0.99, 0.5, 0.01]:
for n in [1, 256, 50000]:
for m in [100, 1000]:
s2d_dt, sa_dt = _s2d_add_vs_sparse_add(sparsity, n, m)
print("%.2f \t %d \t %d \t %.4f \t %.4f \t %.2f" % (sparsity, n, m,
s2d_dt, sa_dt,
s2d_dt / sa_dt))
if __name__ == "__main__":
test.main()
|
the-stack_0_7747 |
def encrypt(message, key):
encrypted_message = ''
for char in message:
if char.isalpha():
#ord() returns an integer representing the Unicode code point of the character
unicode_num = ord(char)
unicode_num += key
if char.isupper():
if unicode_num > ord('Z'):
unicode_num -= 26
elif unicode_num < ord('A'):
unicode_num += 26
elif char.islower():
if unicode_num > ord('z'):
unicode_num -= 26
elif unicode_num < ord('a'):
unicode_num += 26
#chr() returns a character from a string
encrypted_message += chr(unicode_num)
else:
encrypted_message += char
return encrypted_message
def decrypt(encoded, key):
return encrypt(encoded, -key)
def encrypt_input():
e_message = input('\nEnter message to encrypt: ')
e_key = int(input('\nEnter key number from 1 - 26: '))
while e_key > 26:
e_key = int(input('\nEnter key number from 1 - 26: '))
return f'\nYour encrypted message is =====> {encrypt(e_message, e_key)}'
def decrypt_input():
d_message = input('\nEnter message to decrypt: ')
d_key = int(input('\nEnter key number from 1 - 26: '))
while d_key > 26:
d_key = int(input('\nEnter key number from 1 - 26: '))
return f'\nYour decrypted message is =====> {decrypt(d_message, d_key)}'
def start():
question = input('\nEncrpyt (e) or Decrypt (d) a message? ')
if question == 'e':
return encrypt_input()
if question == 'd':
return decrypt_input()
# else:
# start()
if __name__ == "__main__":
while True:
print(start())
|
the-stack_0_7749 | # -*- coding: utf-8 -*-
from gluon import current
from s3 import *
from s3layouts import *
try:
from .layouts import *
except ImportError:
pass
import s3menus as default
# =============================================================================
class S3MainMenu(default.S3MainMenu):
""" Custom Application Main Menu """
# -------------------------------------------------------------------------
@classmethod
def menu(cls):
""" Compose Menu """
# Modules menus
main_menu = MM()(
cls.menu_modules(),
cls.menu_login(),
cls.menu_personal(),
#cls.menu_lang(),
)
current.menu.footer = cls.menu_footer()
return main_menu
# -------------------------------------------------------------------------
@classmethod
def menu_modules(cls):
""" Custom Modules Menu """
return [
homepage(name = " ",
left = True,
icon = "%s/static/themes/img/logo-small.png" % \
current.request.application,
),
MM("Newsfeed", c="cms", f="newsfeed", m="datalist"),
MM("Needs", c="project", f="activity", m="summary"),
MM("Services", c="org", f="service_location", m="summary"),
MM("Organizations", c="org", f="organisation", m="summary"),
MM("Projects", c="project", f="project", m="summary"),
#MM("Aid Requests", link=False),
#MM("Aid Deliveries", link=False),
MM("Map", c="gis", f="index"),
MM("About", c="default", f="about"),
]
# -------------------------------------------------------------------------
@classmethod
def menu_footer(cls):
""" Footer menu """
return MF()(
MF("Newsfeed", c="cms", f="newsfeed", m="datalist"),
MF("Organizations", c="org", f="organisation"),
MF("Projects", c="project", f="project"),
#MF("Aid Requests", link=False),
#MM("Aid Deliveries", link=False),
MF("Map", c="gis", f="index"),
)
# -------------------------------------------------------------------------
@classmethod
def menu_login(cls):
if current.auth.s3_logged_in():
return None
login = MA("Login", c="default", f="user", m="login", button="secondary", column="5")
#settings = current.deployment_settings
#self_registration = settings.get_security_self_registration()
#if self_registration:
register = MA("Register", c="default", f="user", m="register", button="primary", column="7 end")
#else:
# register = None
return MA()(login, register)
# -------------------------------------------------------------------------
@classmethod
def menu_personal(cls):
""" Custom Personal Menu """
auth = current.auth
if not auth.is_logged_in():
return None
s3 = current.response.s3
settings = current.deployment_settings
s3_has_role = auth.s3_has_role
is_org_admin = lambda i: s3_has_role("ORG_ADMIN") and \
not s3_has_role("ADMIN")
menu_personal = MM(icon="user", link=False)(
MM("Administration", c="admin", f="index",
restrict = "ADMIN",
),
MM("Administration", c="admin", f="user",
check = is_org_admin,
),
MM("Change Password", c="default", f="user",
m = "change_password",
),
MM("Logout", c="default", f="user",
m = "logout",
),
)
return menu_personal
# -------------------------------------------------------------------------
@classmethod
def menu_lang(cls):
""" Language Selector """
s3 = current.response.s3
menu_lang = ML("Language", right=True)
for language in s3.l10n_languages.items():
code, name = language
menu_lang(
ML(name, translate=False, lang_code=code, lang_name=name)
)
return menu_lang
# =============================================================================
class S3OptionsMenu(default.S3OptionsMenu):
""" Custom Controller Menus """
# -------------------------------------------------------------------------
@staticmethod
def gis():
""" GIS / GIS Controllers """
auth = current.auth
if not auth.s3_has_role("MAP_ADMIN"):
# No Side Menu
return None
settings = current.deployment_settings
gis_menu = settings.get_gis_menu()
def pois(i):
poi_resources = settings.get_gis_poi_create_resources()
if not poi_resources:
return False
for res in poi_resources:
if res["table"] == "gis_poi":
return True
return False
def config_menu(i):
if not auth.is_logged_in():
# Anonymous users can never configure the Map
return False
s3db = current.s3db
if auth.s3_has_permission("create",
s3db.gis_config):
# If users can create configs then they can see the menu item
return True
# Look for this user's config
table = s3db.gis_config
query = (table.pe_id == auth.user.pe_id)
config = current.db(query).select(table.id,
limitby=(0, 1),
cache=s3db.cache).first()
if config:
return True
def config_args():
#if not auth.user:
# # Won't show anyway due to check
# return []
#if auth.s3_has_role("MAP_ADMIN"):
# Full List
return []
# Look for this user's config
s3db = current.s3db
table = s3db.gis_config
query = (table.pe_id == auth.user.pe_id)
config = current.db(query).select(table.id,
limitby=(0, 1),
cache=s3db.cache).first()
if config:
# Link direct to the User's config
return [config.id, "layer_entity"]
# Link to the Create form
return ["create"]
return M(c="gis")(
M("Fullscreen Map", c="gis", f="map_viewing_client"),
# Currently not got geocoding support
#M("Bulk Uploader", c="doc", f="bulk_upload"),
M("Locations", c="gis", f="location")(
M("Create", m="create"),
#M("Create Location Group", m="create", vars={"group": 1}),
M("Import from CSV", m="import"),
M("Import from OpenStreetMap", m="import_poi",
restrict=[MAP_ADMIN]),
#M("Geocode", f="geocode_manual"),
),
M("PoIs", c="gis", f="poi", check=pois)(),
#M("Population Report", f="location", m="report",
# vars=dict(rows="name",
# fact="sum(population)",
# ),
# ),
M("Configuration", c="gis", f="config", args=config_args(),
_id="gis_menu_config",
check=config_menu),
M("Admin", c="gis", restrict=[MAP_ADMIN])(
M("Hierarchy", f="hierarchy"),
M("Layers", f="catalog"),
M("Markers", f="marker"),
M("Menu", f="menu",
check=[gis_menu]),
M("PoI Types", f="poi_type",
check=[pois]),
M("Projections", f="projection"),
M("Styles", f="style"),
)
)
# -------------------------------------------------------------------------
@classmethod
def hrm(cls):
""" HRM / Human Resources Management """
return cls.org()
# -------------------------------------------------------------------------
def inv(self):
""" Aid Delivery """
if not current.auth.is_logged_in():
# No Side Menu
return None
ADMIN = current.session.s3.system_roles.ADMIN
#current.s3db.inv_recv_crud_strings()
#inv_recv_list = current.response.s3.crud_strings.inv_recv.title_list
#settings = current.deployment_settings
#use_adjust = lambda i: not settings.get_inv_direct_stock_edits()
#use_commit = lambda i: settings.get_req_use_commit()
return M()(
#M("Home", f="index"),
#M("Warehouses", c="inv", f="warehouse")(
# M("Create", m="create"),
# M("Import", m="import", p="create"),
#),
#M("Warehouse Stock", c="inv", f="inv_item")(
# M("Adjust Stock Levels", f="adj", check=use_adjust),
# M("Kitting", f="kitting"),
# M("Import", f="inv_item", m="import", p="create"),
#),
#M("Reports", c="inv", f="inv_item")(
# M("Warehouse Stock", f="inv_item", m="report"),
# M("Expiration Report", c="inv", f="track_item",
# vars=dict(report="exp")),
# M("Monetization Report", c="inv", f="inv_item",
# vars=dict(report="mon")),
# M("Utilization Report", c="inv", f="track_item",
# vars=dict(report="util")),
# M("Summary of Incoming Supplies", c="inv", f="track_item",
# vars=dict(report="inc")),
# M("Summary of Releases", c="inv", f="track_item",
# vars=dict(report="rel")),
#),
#M(inv_recv_list, c="inv", f="recv", translate=False)( # Already T()
# M("Create", m="create"),
# M("Timeline", args="timeline"),
#),
M("Shipments", c="inv", f="send")(
M("Create", m="create"),
M("Search Shipped Items", f="track_item"),
M("Timeline", args="timeline"),
),
M("Items", c="supply", f="item", m="summary")(
M("Create", m="create"),
M("Import", f="catalog_item", m="import", p="create"),
),
# Catalog Items moved to be next to the Item Categories
#M("Catalog Items", c="supply", f="catalog_item")(
#M("Create", m="create"),
#),
#M("Brands", c="supply", f="brand",
# restrict=[ADMIN])(
# M("Create", m="create"),
#),
M("Catalogs", c="supply", f="catalog")(
M("Create", m="create"),
),
M("Item Categories", c="supply", f="item_category",
restrict=[ADMIN])(
M("Create", m="create"),
),
M("Suppliers", c="inv", f="supplier")(
M("Create", m="create"),
M("Import", m="import", p="create"),
),
#M("Facilities", c="inv", f="facility")(
# M("Create", m="create", t="org_facility"),
#),
#M("Facility Types", c="inv", f="facility_type",
# restrict=[ADMIN])(
# M("Create", m="create"),
#),
#M("Warehouse Types", c="inv", f="warehouse_type",
# restrict=[ADMIN])(
# M("Create", m="create"),
#),
#M("Requests", c="req", f="req")(
# M("Create", m="create"),
# M("Requested Items", f="req_item"),
#),
#M("Commitments", c="req", f="commit", check=use_commit)(
#),
)
# -------------------------------------------------------------------------
@staticmethod
def org():
""" ORG / Organization Registry """
if not current.auth.s3_has_role("ADMIN"):
# No Side Menu
return None
system_roles = current.session.s3.system_roles
ADMIN = system_roles.ADMIN
AUTHENTICATED = system_roles.AUTHENTICATED
INDIVIDUALS = current.deployment_settings.get_hrm_staff_label()
return M()(
M("Organizations", c="org", f="organisation")(
M("Create", m="create",
restrict=AUTHENTICATED),
),
M(INDIVIDUALS, c="hrm", f=("staff", "person"),
t="hrm_human_resource")(
#M("Search"),
M("Create", m="create"),
),
M("Service Locations", c="org", f="service_location",
m="summary")(
M("Search", m="summary"),
),
M("Administration", c=("org", "hrm"),
link=False, restrict=ADMIN)(
M("Organisation Types", c="org", f="organisation_type"),
M("Sectors", c="org", f="sector"),
M("Service Types", c="org", f="service"),
M("Facility Types", c="org", f="facility_type"),
M("Job Title Catalog", c="hrm", f="job_title"),
),
)
# -------------------------------------------------------------------------
@classmethod
def pr(cls):
""" Person Registry """
if not current.auth.is_logged_in():
# No Side Menu
return None
return cls.org()
# -------------------------------------------------------------------------
@staticmethod
def project():
""" Project Management """
if not current.auth.s3_has_role("ADMIN"):
# No Side Menu
return None
ADMIN = current.session.s3.system_roles.ADMIN
return M(c="project")(
M("Activities (4W)", f="activity", m="summary")(
M("Create", m="create"),
M("Map", m="summary", vars={"t": "2"}),
),
M("Projects", f="project")(
M("Create", m="create"),
M("Map", f="location", m="map"),
),
M("Administration", link=False, restrict=ADMIN)(
M("Hazards", f="hazard"),
M("Status", f="status"),
),
)
# -------------------------------------------------------------------------
@staticmethod
def req():
""" REQ / Request Management """
if not current.auth.is_logged_in():
# No Side Menu
return None
ADMIN = current.session.s3.system_roles.ADMIN
#settings = current.deployment_settings
#types = settings.get_req_req_type()
#if len(types) == 1:
# t = types[0]
# if t == "Stock":
# create_menu = M("Create", m="create", vars={"type": 1})
# elif t == "People":
# create_menu = M("Create", m="create", vars={"type": 2})
# else:
# create_menu = M("Create", m="create")
#else:
# create_menu = M("Create", m="create")
#recurring = lambda i: settings.get_req_recurring()
#use_commit = lambda i: settings.get_req_use_commit()
#req_items = lambda i: "Stock" in types
#req_skills = lambda i: "People" in types
return M(c="req")(
M("Requests", f="req")(
M("Create", m="create", vars={"type": 1}),
#M("List Recurring Requests", f="req_template", check=recurring),
#M("Map", m="map"),
#M("Report", m="report"),
M("Search All Requested Items", f="req_item",
#check=req_items
),
#M("Search All Requested Skills", f="req_skill",
# check=req_skills),
),
#M("Commitments", f="commit", check=use_commit)(
#),
M("Items", c="supply", f="item")(
M("Create", m="create"),
M("Report", m="report"),
M("Import", m="import", p="create"),
),
# Catalog Items moved to be next to the Item Categories
#M("Catalog Items", c="supply", f="catalog_item")(
#M("Create", m="create"),
#),
M("Catalogs", c="supply", f="catalog")(
M("Create", m="create"),
),
M("Item Categories", c="supply", f="item_category",
restrict=[ADMIN])(
M("Create", m="create"),
),
)
# END =========================================================================
|
the-stack_0_7750 | class Solution(object):
def isSubsequence(self, s, t):
"""
:type s: str
:type t: str
:rtype: bool
"""
d = collections.defaultdict(list)
for i, c in enumerate(t):
d[c].append(i)
start = 0
for c in s:
idx = bisect.bisect_left(d[c], start)
if len(d[c]) == 0 or idx >= len(d[c]):
return False
start = d[c][idx] + 1
return True |
the-stack_0_7751 | import numpy as np
import pandas as pd
from typing import List, Optional
import yaml
import re
def load_yaml(config_fname: str) -> dict:
"""Load in YAML config file.
Args:
config_fname (str): Filename to load.
Returns:
dict: Return yaml dictionary.
"""
loader = yaml.SafeLoader
loader.add_implicit_resolver(
"tag:yaml.org,2002:float",
re.compile(
"""^(?:
[-+]?(?:[0-9][0-9_]*)\\.[0-9_]*(?:[eE][-+]?[0-9]+)?
|[-+]?(?:[0-9][0-9_]*)(?:[eE][-+]?[0-9]+)
|\\.[0-9_]+(?:[eE][-+][0-9]+)?
|[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\\.[0-9_]*
|[-+]?\\.(?:inf|Inf|INF)
|\\.(?:nan|NaN|NAN))$""",
re.X,
),
list("-+0123456789."),
)
with open(config_fname) as file:
yaml_config = yaml.load(file, Loader=loader)
return yaml_config
def load_search_log(log_fname: str) -> pd.core.frame.DataFrame:
"""Reload the stored log yaml file.
Args:
log_fname (str): Filename to load.
Returns:
pd.core.frame.DataFrame: Reloaded log as pandas dataframe.
"""
log_dict = load_yaml(log_fname)
log_list = []
for k in log_dict.keys():
log_list.append(log_dict[k])
# Load in json format for nested dictionaries
df = pd.json_normalize(log_list)
# Rename columns and get rid of 'params.'
new_cols = [df.columns[i].split(".")[-1] for i in range(len(df.columns))]
df.columns = new_cols
return df
def visualize_2D_grid(
hyper_df: pd.core.frame.DataFrame,
fixed_params: Optional[dict] = None,
params_to_plot: list = [],
target_to_plot: str = "objective",
plot_title: str = "Temp Title",
plot_subtitle: Optional[str] = None,
xy_labels: Optional[List[str]] = ["x-label", "y-label"],
variable_name: Optional[str] = "Performance",
every_nth_tick: int = 1,
plot_colorbar: bool = True,
text_in_cell: bool = False,
max_heat: Optional[float] = None,
min_heat: Optional[float] = None,
norm_cols: bool = False,
norm_rows: bool = False,
return_array: bool = False,
round_ticks: int = 1,
fig=None,
ax=None,
figsize: tuple = (10, 8),
cmap="magma",
fname: Optional[str] = None,
):
"""Fix certain params & visualize grid target value over two selected ones.
Args:
hyper_df (pd.core.frame.DataFrame):
Dataframe with variable values and target values.
fixed_params (Union[None, dict], optional):
Dictionary of key, value pairs to fix/slice by. Defaults to None.
params_to_plot (list, optional):
List of two variables to plot on x/y axis of heatmap. Defaults to [].
target_to_plot (str, optional):
Target variable name to plot. Defaults to "target".
plot_title (str, optional):
Title of the plot. Defaults to "Temp Title".
plot_subtitle (Union[None, str], optional):
Subtitle of the plot. Defaults to None.
xy_labels (Union[None, List[str]], optional):
List of x/y labels. Defaults to ["x-label", "y-label"].
variable_name (Union[None, str], optional):
Variable name shown in heatmap colorbar. Defaults to "Var Label".
every_nth_tick (int, optional):
Spacing between x/y ticks. Defaults to 1.
plot_colorbar (bool, optional):
Option to plot colorbar. Defaults to True.
text_in_cell (bool, optional):
Option to plot text in heat cells. Defaults to False.
max_heat (Union[None, float], optional):
Heat clipping max value. Defaults to None.
min_heat (Union[None, float], optional):
Heat clipping min value. Defaults to None.
norm_cols (bool, optional):
Option to normalize columns to max 1. Defaults to False.
norm_rows (bool, optional):
Option to normalize rows to max 1. Defaults to False.
return_array (bool, optional):
Option to return extracted heat array. Defaults to False.
round_ticks (int, optional):
Decimals to round ticks to. Defaults to 1.
fig (_type_, optional):
Figure object to manipulate. Defaults to None.
ax (_type_, optional):
Axis object to manipulate. Defaults to None.
figsize (tuple, optional):
Size of figure. Defaults to (10, 8).
cmap (str, optional):
Choice of colormap. Defaults to "magma".
fname (Union[None, str], optional):
Optional filename to store figure in. Defaults to None.
Returns:
_type_: Heat arrays or figure and axis matplotlib objects.
"""
assert len(params_to_plot) == 2, "You can only plot 2 variables!"
# Select the data to plot - max. fix 2 other vars
p_to_plot = params_to_plot + [target_to_plot]
try:
sub_log = hyper_df.hyper_log.copy()
except Exception:
sub_log = hyper_df.copy()
if fixed_params is not None:
for k, v in fixed_params.items():
if type(v) == float or type(v) == int:
sub_log = sub_log[sub_log[k].astype(float) == v]
elif type(v) == str:
sub_log = sub_log[sub_log[k].astype(str) == v]
# Subselect the desired params from the pd df
temp_df = sub_log[p_to_plot]
# Construct the 2D array using helper function
range_x = np.unique(temp_df[p_to_plot[0]])
range_y = np.unique(temp_df[p_to_plot[1]])
heat_array = get_heatmap_array(
range_x, range_y, temp_df.to_numpy(), norm_cols, norm_rows
)
if return_array:
return heat_array, range_x, range_y
else:
# Construct the plot
fig, ax = plot_2D_heatmap(
range_x,
range_y,
heat_array,
plot_title,
plot_subtitle,
xy_labels,
variable_name,
every_nth_tick,
plot_colorbar,
text_in_cell,
max_heat,
min_heat,
round_ticks,
figsize=figsize,
fig=fig,
ax=ax,
cmap=cmap,
)
# Save the figure if a filename was provided
if fname is not None:
fig.savefig(fname, dpi=300)
else:
return fig, ax
def get_heatmap_array(
range_x: np.ndarray,
range_y: np.ndarray,
results_df: np.ndarray,
norm_cols: bool = False,
norm_rows: bool = False,
) -> np.ndarray:
"""Construct the 2D array to plot in heatmap.
Args:
range_x (np.ndarray): Discrete range on x-axis.
range_y (np.ndarray): Discrete range on y-axis.
results_df (np.ndarray): Flat array with results [x, y, target].
norm_cols (bool, optional):
Option to normalize columns to max 1. Defaults to False.
norm_rows (bool, optional):
Option to normalize rows to max 1. Defaults to False.
Returns:
np.ndarray: 2D array of shape [|X|, |Y|] containing target values.
"""
bring_the_heat = np.zeros((len(range_y), len(range_x)))
for i, val_x in enumerate(range_x):
for j, val_y in enumerate(range_y):
case_at_hand = np.where(
(results_df[:, 0] == val_x) & (results_df[:, 1] == val_y)
)
results_temp = results_df[case_at_hand, 2]
# Reverse index so that small in bottom left corner
bring_the_heat[len(range_y) - 1 - j, i] = results_temp
# Normalize the rows and/or columns by the maximum
if norm_cols:
bring_the_heat /= bring_the_heat.max(axis=0)
if norm_rows:
bring_the_heat /= bring_the_heat.max(axis=1)[:, np.newaxis]
return bring_the_heat
def plot_2D_heatmap(
range_x: np.ndarray,
range_y: np.ndarray,
heat_array: np.ndarray,
title: str = "Placeholder Title",
subtitle: Optional[str] = None,
xy_labels: list = ["x-label", "y-label"],
variable_name: Optional[str] = None,
every_nth_tick: int = 1,
plot_colorbar: bool = True,
text_in_cell: bool = False,
max_heat: Optional[float] = None,
min_heat: Optional[float] = None,
round_ticks: int = 1,
fig=None,
ax=None,
figsize: tuple = (10, 8),
cmap="magma",
):
"""Plot the 2D heatmap.
Args:
range_x (np.ndarray): Discrete range on x-axis.
range_y (np.ndarray): Discrete range on y-axis.
heat_array (np.ndarray): 2D array of shape [|X|,|Y|] containing targets.
title (str, optional):
Title of the plot. Defaults to "Temp Title".
subtitle (Union[None, str], optional):
Subtitle of the plot. Defaults to None.
xy_labels (Union[None, List[str]], optional):
List of x/y labels. Defaults to ["x-label", "y-label"].
variable_name (Union[None, str], optional):
Variable name shown in heatmap colorbar. Defaults to "Var Label".
every_nth_tick (int, optional):
Spacing between x/y ticks. Defaults to 1.
plot_colorbar (bool, optional):
Option to plot colorbar. Defaults to True.
text_in_cell (bool, optional):
Option to plot text in heat cells. Defaults to False.
max_heat (Union[None, float], optional):
Heat clipping max value. Defaults to None.
min_heat (Union[None, float], optional):
Heat clipping min value. Defaults to None.
round_ticks (int, optional):
Decimals to round ticks to. Defaults to 1.
fig (_type_, optional):
Figure object to manipulate. Defaults to None.
ax (_type_, optional):
Axis object to manipulate. Defaults to None.
figsize (tuple, optional):
Size of figure. Defaults to (10, 8).
cmap (str, optional):
Choice of colormap. Defaults to "magma".
Returns:
_type_: Figure and axis matplotlib objects.
"""
try:
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
import seaborn as sns
# Set overall plots appearance sns style
sns.set(
context="poster",
style="white",
palette="Paired",
font="sans-serif",
font_scale=1.05,
color_codes=True,
rc=None,
)
except ImportError:
raise ImportError(
"You need to install `matplotlib` and `seaborn` to "
"use `mle-hyperopt`'s plotting utilities."
)
if fig is None or ax is None:
fig, ax = plt.subplots(figsize=figsize)
if max_heat is None and min_heat is None:
im = ax.imshow(
heat_array,
cmap=cmap,
vmax=np.max(heat_array),
vmin=np.min(heat_array),
)
elif max_heat is not None and min_heat is None:
im = ax.imshow(heat_array, cmap=cmap, vmax=max_heat)
elif max_heat is None and min_heat is not None:
im = ax.imshow(heat_array, cmap=cmap, vmin=min_heat)
else:
im = ax.imshow(heat_array, cmap=cmap, vmin=min_heat, vmax=max_heat)
ax.set_yticks(np.arange(len(range_y)))
if len(range_y) != 0:
if type(range_y[-1]) is not str:
if round_ticks != 0:
yticklabels = [
str(round(float(label), round_ticks))
for label in range_y[::-1]
]
else:
yticklabels = [str(int(label)) for label in range_y[::-1]]
else:
yticklabels = [str(label) for label in range_y[::-1]]
else:
yticklabels = []
ax.set_yticklabels(yticklabels)
for n, label in enumerate(ax.yaxis.get_ticklabels()):
if n % every_nth_tick != 0:
label.set_visible(False)
ax.set_xticks(np.arange(len(range_x)))
if len(range_x) != 0:
if type(range_x[-1]) is not str:
if round_ticks != 0:
xticklabels = [
str(round(float(label), round_ticks)) for label in range_x
]
else:
xticklabels = [str(int(label)) for label in range_x]
else:
xticklabels = [str(label) for label in range_x]
else:
xticklabels = []
ax.set_xticklabels(xticklabels)
for n, label in enumerate(ax.xaxis.get_ticklabels()):
if n % every_nth_tick != 0:
label.set_visible(False)
# Rotate the tick labels and set their alignment.
plt.setp(
ax.get_xticklabels(), rotation=45, ha="right", rotation_mode="anchor"
)
if subtitle is None:
ax.set_title(title)
else:
ax.set_title(title + "\n" + str(subtitle))
if len(range_x) != 0:
ax.set_xlabel(xy_labels[0])
if len(range_y) != 0:
ax.set_ylabel(xy_labels[1])
if plot_colorbar:
# fig.subplots_adjust(right=0.8)
# cbar_ax = fig.add_axes([0.85, 0.25, 0.05, 0.5])
# cbar = fig.colorbar(im, cax=cbar_ax)
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="7%", pad=0.15)
cbar = fig.colorbar(im, cax=cax)
if variable_name is not None:
cbar.set_label(variable_name, rotation=270, labelpad=30)
fig.tight_layout()
if text_in_cell:
for y in range(heat_array.shape[0]):
for x in range(heat_array.shape[1]):
ax.text(
x,
y,
"%.2f" % heat_array[y, x],
horizontalalignment="center",
verticalalignment="center",
)
return fig, ax
|
the-stack_0_7752 | import os, sys, signal, time, timeit
import cv2
import numpy as np
from multiprocessing import Process, Queue
from multiprocessing.sharedctypes import Value, Array
#from queue import Queue
#from Queue import Queue
from c_camera import ImgCap, initCamera
import copy
np.set_printoptions(threshold=sys.maxsize)
class iTask(Process):
def __init__(self, queue, signal):
Process.__init__(self)
self.qf = queue
self.signal = signal
self.frame_count = 0
def run(self):
while (True):
img = self.qf.get()
if img is None:
break
self.frame_count += 1
#print("{}:{}".format(self.name, self.frame_count))
'''
Note:
Only one cv2 windows can be enabled for multi-thread
'''
#showimg = copy.copy(img)
#cv2.imshow(self.name, showimg)
#if cv2.waitKey(1) & 0xFF == ord('q'):
# break
print("Quit {}, frames {}".format(self.name, self.frame_count))
def terminate(self):
print("{} OFF".format(self.name))
self.signal = "OFF"
def count_frame(self):
return self.frame_count
'''
def singer(pd):
global sign
while (True):
#print("singing..., %s" %sign)
time.sleep(2)
if sign == "OFF":
pd.terminate()
break
print("Quit singer")
'''
sign = "ON"
pdd = None
def signal_handler(signum, frame):
global sign
sign = "OFF"
print("Get signal: %s" %signal)
pdd.terminate()
def demo(FLAGS):
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
print("init camera")
cap = cv2.VideoCapture(FLAGS.vdev)
initCamera(cap, FLAGS.fps)
time.sleep(1)
global sign
queue_list = []
consumer_list = []
job_list = []
print("start consumer")
for i in range(FLAGS.jobs):
queue = Queue(maxsize=128)
queue_list.append(queue)
consumer = iTask(queue, signal)
consumer_list.append(consumer)
job_list.append(consumer)
#consumer.setDaemon(True)
consumer.start()
print("timeit")
time_start = timeit.default_timer()
global pdd
print("start producer")
f = Value('i', 0)
producer = ImgCap(cap, queue_list, signal, f)
pdd = producer
job_list.append(producer)
#producer.setDaemon(True)
producer.start()
'''
print("start singing")
sing = Thread(name="Singer", target=singer, args=(producer,))
sing.setDaemon(True)
sing.start()
'''
'''
while True:
alive = False
#alive = alive or sing.isAlive()
for t in job_list:
alive = alive or t.isAlive()
if not alive:
break
'''
for t in job_list:
t.join()
time_end = timeit.default_timer()
cap_frame_count = producer.count_frame()
print("Produce frames: %d" %cap_frame_count)
inference_frame_count = 0
for consumer in consumer_list:
fcnt = consumer.count_frame()
inference_frame_count += fcnt
print("Consume frames: %d" %inference_frame_count)
cv2.destroyAllWindows()
cap.release()
time_used = time_end - time_start
print("Time Used: %d second!" %time_used)
print("FPS: %.6f" %(inference_frame_count / time_used))
print("FPS: %.6f" %(f.value / time_used))
|
the-stack_0_7753 | from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='pyEp',
version='0.9.4.4',
description='pyEp: EnergyPlus cosimulation in Python',
long_description=long_description,
url='',
author='Derek Nong',
author_email='[email protected]',
license='MIT',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5'
],
keywords='EnergyPlus simulation',
packages=['pyEp'],
package_dir={'pyEp': 'pyEp'},
include_package_data=True
) |
the-stack_0_7754 | import sublime, sublime_plugin
import json
import re
import os
from os.path import dirname, realpath
# Define Path to JSON Cache
__FUNCTIONS_MERCHANT_PATH__ = dirname(realpath(__file__)) + os.sep + '/functions-merchant.json'
class MvtDoCompletions(sublime_plugin.EventListener):
"""
MvtDO File / Function Attribute Completions
| Smartly determine which "Functions" to autocomplete based on chosen "File"
| <mvt:do file="g.Module_Library_DB" value="Product_Load_ID(), Category_Load_ID() ..." />
"""
def __init__(self):
self.functions_merchant_data = self.read_data_file(__FUNCTIONS_MERCHANT_PATH__)
self.quick_panel_data = {}
def on_query_completions(self, view, prefix, locations):
# Only trigger in an <mvt:do> Tag
if not view.match_selector(locations[0], 'text.mvt text.html.basic meta.tag.inline.do.mvt'):
return []
# determine what <mvt:do> attribute you're in
if (view.match_selector(locations[0], 'text.mvt text.html.basic meta.tag.inline.do.mvt attribute-value.file.mvt')):
mvtdo_attribute = 'file'
elif (view.match_selector(locations[0], 'text.mvt text.html.basic meta.tag.inline.do.mvt attribute-value.value.mvt')):
prev_pt = max(0, locations[0] - 1)
is_variable = view.match_selector(prev_pt, 'variable.language')
if (is_variable):
return []
mvtdo_attribute = 'value'
else:
return []
return self.get_completions(view, prefix, locations, mvtdo_attribute)
def on_post_text_command(self, view, command_name, *args):
if (command_name == 'commit_completion' or command_name == 'insert_best_completion'):
for r in view.sel():
in_value_attribute = view.match_selector(r.begin(), 'text.mvt text.html.basic meta.tag.inline.do.mvt attribute-value.value.mvt')
if (in_value_attribute):
prev_pt = max(0, r.begin() - 1)
is_variable = view.match_selector(prev_pt, 'variable.language')
if (is_variable is False):
file_attribute_val = self.get_current_file_attribute_val(view, r.begin(), '')
if (file_attribute_val == ''):
value_attribute_val = self.get_current_value_attribute_val(view, r.begin(), '')
function_name = self.get_function_name(view, value_attribute_val)
if function_name is not False:
file_name = self.get_file_name(view, function_name)
if file_name is not False:
if type(file_name) is list:
self.quick_panel_data = { "view": view, "pt": r.begin(), "file_name": file_name }
view.window().show_quick_panel(file_name, self.choose_file_name, sublime.MONOSPACE_FONT)
elif type(file_name) is str:
self.insert_file_name(view, r.begin(), file_name)
def get_completions(self, view, prefix, locations, mvtdo_attribute):
completion_list = []
if (mvtdo_attribute == 'file'):
completion_list = self.get_file_completions(view, locations[0], prefix)
elif (mvtdo_attribute == 'value'):
file_attribute_val = self.get_current_file_attribute_val(view, locations[0], prefix)
completion_list = self.get_value_completions(view, locations[0], prefix, file_attribute_val)
return (completion_list, sublime.INHIBIT_WORD_COMPLETIONS | sublime.INHIBIT_EXPLICIT_COMPLETIONS)
"""
Custom Methods
"""
def read_data_file(self, path):
with open( path ) as data_file:
data = json.load(data_file)
return data
def get_file_completions(self, view, pt, prefix):
file_completions = [ ( file['distro_path'] + '\tFile', file['distro_path'].replace('$', '\\$') ) for file in self.functions_merchant_data ]
return set(file_completions)
def get_value_completions(self, view, pt, prefix, file_attribute_val):
value_completions = []
for file in self.functions_merchant_data:
if (file_attribute_val == file['distro_path'] or file_attribute_val == ''):
for function in file['functions']:
parameters = self.build_function_parameters(function['parameters'])
value_completions.append( (function['name'] + '\tFunc', function['name'] + parameters) )
return set(value_completions)
def build_function_parameters(self, parameters):
if (len(parameters) == 0):
return '()'
parameters_map = []
count = 0
for parameter in parameters:
count += 1
if (count == len(parameters)):
count = 0
parameters_map.append( '${' + str(count) + ':' + parameter + '}' )
sep = ', '
return '( ' + sep.join(parameters_map) + ' )'
def get_current_file_attribute_val(self, view, pt, prefix):
mvtdo_tag_region = self.get_mvtdo_tag_region(view, pt, prefix)
if (mvtdo_tag_region is False):
return ''
file_attribute_all_locations = view.find_by_selector( 'text.mvt text.html.basic meta.tag.inline.do.mvt attribute-value.file.mvt' )
file_attribute_val = ''
for file_attribute_location in file_attribute_all_locations:
if (mvtdo_tag_region.contains(file_attribute_location)):
file_attribute_val = view.substr(file_attribute_location)
file_attribute_val = file_attribute_val.replace('"', '')
return file_attribute_val
def get_current_value_attribute_val(self, view, pt, prefix):
mvtdo_tag_region = self.get_mvtdo_tag_region(view, pt, prefix)
if (mvtdo_tag_region is False):
return ''
value_attribute_all_locations = view.find_by_selector( 'text.mvt text.html.basic meta.tag.inline.do.mvt attribute-value.value.mvt' )
value_attribute_val = ''
for attribute_value_location in value_attribute_all_locations:
if (mvtdo_tag_region.contains(attribute_value_location)):
value_attribute_val = view.substr(attribute_value_location)
value_attribute_val = value_attribute_val.replace('"', '')
return value_attribute_val
def get_mvtdo_tag_region(self, view, pt, prefix):
# limit the search left/right to 500 characters
_LIMIT = 500
# left side of the string
left_start = pt
left_end = max(0, left_start - _LIMIT)
left_angle_pos = False
i = left_start
while i >= left_end:
c = view.substr(i)
if (c == '<'):
left_angle_pos = i
break
i -= 1
# right side of the string
right_start = pt + len(prefix)
right_end = right_start + _LIMIT
right_angle_pos = False
i = right_start
while i <= right_end:
c = view.substr(i)
if (c == '>'):
right_angle_pos = i
break
i += 1
if (left_angle_pos is False or right_angle_pos is False):
return False
return sublime.Region(left_angle_pos, right_angle_pos)
def get_function_name(self, view, value_attribute_val):
match = re.match(r'([a-z0-9_]+)\s*?\(', value_attribute_val, re.I)
if match:
return match.group(1)
else:
return False
def get_file_name(self, view, function_name):
files = []
for file in self.functions_merchant_data:
for function in file['functions']:
if function_name == function['name']:
files.append(file['distro_path'])
files = set(files)
if (len(files) == 0):
return False
elif (len(files) == 1):
return next(iter(files))
else:
return list(files)
def choose_file_name(self, index):
self.insert_file_name(self.quick_panel_data['view'], self.quick_panel_data['pt'], self.quick_panel_data['file_name'][index])
self.quick_panel_data = {}
def insert_file_name(self, view, pt, file_name):
mvtdo_tag_region = self.get_mvtdo_tag_region(view, pt, '')
if (mvtdo_tag_region is False):
return ''
file_attribute_all_locations = view.find_by_selector( 'text.mvt text.html.basic meta.tag.inline.do.mvt attribute-value.file.mvt' )
for file_attribute_location in file_attribute_all_locations:
if (mvtdo_tag_region.contains(file_attribute_location)):
file_attribute_pt = file_attribute_location.begin() + 1
view.run_command('insert_file_name', {
"args": {
"file_attribute_pt": file_attribute_pt,
"file_name": file_name
}
})
class InsertFileNameCommand(sublime_plugin.TextCommand):
def run(self, edit, args):
self.view.insert(edit, args['file_attribute_pt'], args['file_name'])
|
the-stack_0_7756 | import logging
import re
from typing import Iterator
logger = logging.getLogger(__name__)
def filter_platform_selectors(content: str, platform: str) -> Iterator[str]:
""" """
# we support a very limited set of selectors that adhere to platform only
platform_sel = {
"linux-64": {"linux64", "unix", "linux"},
"linux-aarch64": {"aarch64", "unix", "linux"},
"linux-ppc64le": {"ppc64le", "unix", "linux"},
"osx-64": {"osx", "osx64", "unix"},
"osx-arm64": {"arm64", "osx", "unix"},
"win-64": {"win", "win64"},
}
# This code is adapted from conda-build
sel_pat = re.compile(r"(.+?)\s*(#.*)\[([^\[\]]+)\](?(2)[^\(\)]*)$")
for line in content.splitlines(keepends=False):
if line.lstrip().startswith("#"):
continue
m = sel_pat.match(line)
if m:
cond = m.group(3)
if platform and (cond in platform_sel[platform]):
yield line
else:
logger.warning(
"filtered out line `%s` due to unmatchable selector", line
)
else:
yield line
|
the-stack_0_7761 | with open(__file__, encoding='utf-8') as f:
source = f.read()
exec(source[source.find("# =L=I=B=""R=A=R=Y=@="):])
problem = extract_problem(__file__)
Check.initialize(problem['parts'])
# =============================================================================
# Brez naslova
# # =====================================================================@000000=
# # To je predloga za novo podnalogo. Če želite ustvariti novo podnalogo,
# # pobrišite komentarje ter vsebino zamenjajte s svojo.
# #
# # Sestavite funkcijo `zmnozi(x, y)`, ki vrne produkt števil `x` in `y`.
# # Na primer:
# #
# # >>> zmnozi(3, 7)
# # 21
# # >>> zmnozi(6, 7)
# # 42
# # =============================================================================
#
# def zmnozi(x, y):
# return x * y
#
# Check.part()
#
# Check.equal('zmnozi(3, 7)', 21)
# Check.equal('zmnozi(6, 7)', 42)
# Check.equal('zmnozi(10, 10)', 100)
# Check.secret(zmnozi(100, 100))
# Check.secret(zmnozi(500, 123))
# ===========================================================================@=
# Ne spreminjajte te vrstice ali česarkoli pod njo.
# =============================================================================
if __name__ == '__main__':
_validate_current_file()
# =L=I=B=R=A=R=Y=@=
import io, json, os, re, sys, shutil, traceback, urllib.error, urllib.request
from contextlib import contextmanager
class Check:
@staticmethod
def has_solution(part):
return part['solution'].strip() != ''
@staticmethod
def initialize(parts):
Check.parts = parts
for part in Check.parts:
part['valid'] = True
part['feedback'] = []
part['secret'] = []
Check.current_part = None
Check.part_counter = None
@staticmethod
def part():
if Check.part_counter is None:
Check.part_counter = 0
else:
Check.part_counter += 1
Check.current_part = Check.parts[Check.part_counter]
return Check.has_solution(Check.current_part)
@staticmethod
def feedback(message, *args, **kwargs):
Check.current_part['feedback'].append(message.format(*args, **kwargs))
@staticmethod
def error(message, *args, **kwargs):
Check.current_part['valid'] = False
Check.feedback(message, *args, **kwargs)
@staticmethod
def clean(x, digits=6, typed=False):
t = type(x)
if t is float:
x = round(x, digits)
# Since -0.0 differs from 0.0 even after rounding,
# we change it to 0.0 abusing the fact it behaves as False.
v = x if x else 0.0
elif t is complex:
v = complex(Check.clean(x.real, digits, typed), Check.clean(x.imag, digits, typed))
elif t is list:
v = list([Check.clean(y, digits, typed) for y in x])
elif t is tuple:
v = tuple([Check.clean(y, digits, typed) for y in x])
elif t is dict:
v = sorted([(Check.clean(k, digits, typed), Check.clean(v, digits, typed)) for (k, v) in x.items()])
elif t is set:
v = sorted([Check.clean(y, digits, typed) for y in x])
else:
v = x
return (t, v) if typed else v
@staticmethod
def secret(x, hint=None, clean=None):
clean = clean or Check.clean
Check.current_part['secret'].append((str(clean(x)), hint))
@staticmethod
def equal(expression, expected_result, clean=None, env={}):
local_env = locals()
local_env.update(env)
clean = clean or Check.clean
actual_result = eval(expression, globals(), local_env)
if clean(actual_result) != clean(expected_result):
Check.error('Izraz {0} vrne {1!r} namesto {2!r}.',
expression, actual_result, expected_result)
return False
else:
return True
@staticmethod
def run(statements, expected_state, clean=None, env={}):
code = "\n".join(statements)
statements = " >>> " + "\n >>> ".join(statements)
s = {}
s.update(env)
clean = clean or Check.clean
exec(code, globals(), s)
errors = []
for (x, v) in expected_state.items():
if x not in s:
errors.append('morajo nastaviti spremenljivko {0}, vendar je ne'.format(x))
elif clean(s[x]) != clean(v):
errors.append('nastavijo {0} na {1!r} namesto na {2!r}'.format(x, s[x], v))
if errors:
Check.error('Ukazi\n{0}\n{1}.', statements, ";\n".join(errors))
return False
else:
return True
@staticmethod
@contextmanager
def in_file(filename, content, encoding=None):
with open(filename, 'w', encoding=encoding) as f:
for line in content:
print(line, file=f)
old_feedback = Check.current_part['feedback'][:]
yield
new_feedback = Check.current_part['feedback'][len(old_feedback):]
Check.current_part['feedback'] = old_feedback
if new_feedback:
new_feedback = ['\n '.join(error.split('\n')) for error in new_feedback]
Check.error('Pri vhodni datoteki {0} z vsebino\n {1}\nso se pojavile naslednje napake:\n- {2}', filename, '\n '.join(content), '\n- '.join(new_feedback))
@staticmethod
@contextmanager
def input(content, encoding=None):
old_stdin = sys.stdin
old_feedback = Check.current_part['feedback'][:]
sys.stdin = io.StringIO('\n'.join(content))
try:
yield
finally:
sys.stdin = old_stdin
new_feedback = Check.current_part['feedback'][len(old_feedback):]
Check.current_part['feedback'] = old_feedback
if new_feedback:
new_feedback = ['\n '.join(error.split('\n')) for error in new_feedback]
Check.error('Pri vhodu\n {0}\nso se pojavile naslednje napake:\n- {1}', '\n '.join(content), '\n- '.join(new_feedback))
@staticmethod
def out_file(filename, content, encoding=None):
with open(filename, encoding=encoding) as f:
out_lines = f.readlines()
equal, diff, line_width = Check.difflines(out_lines, content)
if equal:
return True
else:
Check.error('Izhodna datoteka {0}\n je enaka{1} namesto:\n {2}', filename, (line_width - 7) * ' ', '\n '.join(diff))
return False
@staticmethod
def output(expression, content, use_globals=False):
old_stdout = sys.stdout
sys.stdout = io.StringIO()
try:
def visible_input(prompt):
inp = input(prompt)
print(inp)
return inp
exec(expression, globals() if use_globals else {'input': visible_input})
finally:
output = sys.stdout.getvalue().strip().splitlines()
sys.stdout = old_stdout
equal, diff, line_width = Check.difflines(output, content)
if equal:
return True
else:
Check.error('Program izpiše{0} namesto:\n {1}', (line_width - 13) * ' ', '\n '.join(diff))
return False
@staticmethod
def difflines(actual_lines, expected_lines):
actual_len, expected_len = len(actual_lines), len(expected_lines)
if actual_len < expected_len:
actual_lines += (expected_len - actual_len) * ['\n']
else:
expected_lines += (actual_len - expected_len) * ['\n']
equal = True
line_width = max(len(actual_line.rstrip()) for actual_line in actual_lines + ['Program izpiše'])
diff = []
for out, given in zip(actual_lines, expected_lines):
out, given = out.rstrip(), given.rstrip()
if out != given:
equal = False
diff.append('{0} {1} {2}'.format(out.ljust(line_width), '|' if out == given else '*', given))
return equal, diff, line_width
@staticmethod
def generator(expression, expected_values, should_stop=False, further_iter=0, env={}, clean=None):
from types import GeneratorType
local_env = locals()
local_env.update(env)
clean = clean or Check.clean
gen = eval(expression, globals(), local_env)
if not isinstance(gen, GeneratorType):
Check.error("Izraz {0} ni generator.", expression)
return False
try:
for iteration, expected_value in enumerate(expected_values):
actual_value = next(gen)
if clean(actual_value) != clean(expected_value):
Check.error("Vrednost #{0}, ki jo vrne generator {1} je {2!r} namesto {3!r}.",
iteration, expression, actual_value, expected_value)
return False
for _ in range(further_iter):
next(gen) # we will not validate it
except StopIteration:
Check.error("Generator {0} se prehitro izteče.", expression)
return False
if should_stop:
try:
next(gen)
Check.error("Generator {0} se ne izteče (dovolj zgodaj).", expression)
except StopIteration:
pass # this is fine
return True
@staticmethod
def summarize():
for i, part in enumerate(Check.parts):
if not Check.has_solution(part):
print('{0}. podnaloga je brez rešitve.'.format(i + 1))
elif not part['valid']:
print('{0}. podnaloga nima veljavne rešitve.'.format(i + 1))
else:
print('{0}. podnaloga ima veljavno rešitev.'.format(i + 1))
for message in part['feedback']:
print(' - {0}'.format('\n '.join(message.splitlines())))
def extract_problem(filename):
def strip_hashes(description):
if description is None:
return ''
else:
lines = description.strip().splitlines()
return "\n".join(line[line.index('#')+2:] for line in lines)
with open(filename, encoding='utf-8') as f:
source = f.read()
part_regex = re.compile(
r'# ===+@(?P<part>\d+)=\s*\n' # beginning of part header
r'(?P<description>(\s*#( [^\n]*)?\n)+?)' # description
r'(\s*# ---+\s*\n' # optional beginning of template
r'(?P<template>(\s*#( [^\n]*)?\n)*))?' # solution template
r'\s*# ===+\s*?\n' # end of part header
r'(?P<solution>.*?)' # solution
r'^Check\s*\.\s*part\s*\(\s*\)\s*?(?=\n)' # beginning of validation
r'(?P<validation>.*?)' # validation
r'(?=\n\s*(# )?# =+@)', # beginning of next part
flags=re.DOTALL | re.MULTILINE
)
parts = [{
'part': int(match.group('part')),
'description': strip_hashes(match.group('description')),
'solution': match.group('solution').strip(),
'template': strip_hashes(match.group('template')),
'validation': match.group('validation').strip(),
'problem': 5847
} for match in part_regex.finditer(source)]
problem_match = re.search(
r'^\s*# =+\s*\n' # beginning of header
r'^\s*# (?P<title>[^\n]*)\n' # title
r'(?P<description>(^\s*#( [^\n]*)?\n)*)' # description
r'(?=\s*(# )?# =+@)', # beginning of first part
source, flags=re.DOTALL | re.MULTILINE)
return {
'title': problem_match.group('title').strip(),
'description': strip_hashes(problem_match.group('description')),
'parts': parts,
'id': 5847,
'problem_set': 1409
}
def _validate_current_file():
def backup(filename):
backup_filename = None
suffix = 1
while not backup_filename or os.path.exists(backup_filename):
backup_filename = '{0}.{1}'.format(filename, suffix)
suffix += 1
shutil.copy(filename, backup_filename)
return backup_filename
def submit_problem(problem, url, token):
for part in problem['parts']:
part['secret'] = [x for (x, _) in part['secret']]
if part['part']:
part['id'] = part['part']
del part['part']
del part['feedback']
del part['valid']
data = json.dumps(problem).encode('utf-8')
headers = {
'Authorization': token,
'content-type': 'application/json'
}
request = urllib.request.Request(url, data=data, headers=headers)
response = urllib.request.urlopen(request)
return json.loads(response.read().decode('utf-8'))
Check.summarize()
if all(part['valid'] for part in problem['parts']):
print('Naloga je pravilno sestavljena.')
if input('Ali naj jo shranim na strežnik [da/NE]') == 'da':
print('Shranjujem nalogo na strežnik...', end="")
try:
url = 'https://www.projekt-tomo.si/api/problems/submit/'
token = 'Token 5e3e4f392b1d4a0678cc3bff8aa5ed986de0c2af'
response = submit_problem(problem, url, token)
if 'update' in response:
print('Posodabljam datoteko... ', end="")
backup_filename = backup(__file__)
with open(__file__, 'w', encoding='utf-8') as f:
f.write(response['update'])
print('Stara datoteka je bila preimenovana v {0}.'.format(backup_filename))
print('Če se datoteka v urejevalniku ni osvežila, jo zaprite ter ponovno odprite.')
except urllib.error.URLError as response:
message = json.loads(response.read().decode('utf-8'))
print('\nPRI SHRANJEVANJU JE PRIŠLO DO NAPAKE!')
if message:
print(' ' + '\n '.join(message.splitlines()))
print('Prosimo, poskusite znova.')
else:
print('Naloga je shranjena.')
else:
print('Naloga ni bila shranjena.')
else:
print('Naloga ni pravilno sestavljena.')
|
the-stack_0_7763 | import tensorflow as tf
class SumSquaredLoss(tf.keras.losses.Loss):
def __init__(self, coord = 5, noobj = .5):
super(SumSquaredLoss, self).__init__()
self.name = "sum_squared_loss"
self.lambda_coord = coord
self.lambda_noobj = noobj
def _neg_sqrt(self, num):
if num < 0:
return -1 * tf.sqrt(tf.abs(num))
else:
return tf.sqrt(num)
def __call__(self, y_true, y_pred, sample_weight = None):
y_pred = tf.cast(tf.reshape(y_pred, (tf.shape(y_pred)[0], 7, 7, 30)), tf.float64)
loss = sum1 = sum2 = sum3 = sum4 = sum5 = tf.constant(0.0, dtype = tf.float64)
(bbox, label, size) = y_true
for i in tf.range(tf.shape(y_pred)[0]):
for j in tf.range(size[i]):
b, l = bbox[i, j], label[i, j]
# center, weight, height of the bbox w.r.t. whole image
cx_real = (b[1] + b[3]) / 2
cy_real = (b[0] + b[2]) / 2
w_real = b[3] - b[1]
h_real = b[2] - b[0]
# grid that contains the center of the bbox
grid_x = int(cx_real * 7)
grid_y = int(cy_real * 7)
grid_pred = y_pred[i, grid_x, grid_y]
grid_x = tf.cast(grid_x, tf.float64)
grid_y = tf.cast(grid_y, tf.float64)
# center w.r.t. the grid
cx_real = 7 * cx_real - grid_x
cy_real = 7 * cy_real - grid_y
bbox_pred = tf.reshape(grid_pred[:10],[2, 5])
label_pred = grid_pred[10:]
real_area = w_real * h_real
pred_area = bbox_pred[:, 2] * bbox_pred[:, 3]
xsmall = tf.maximum((bbox_pred[:, 0] + grid_x) / 7 - bbox_pred[:, 2] / 2,
[(cx_real + grid_x) / 7 - w_real / 2])
xbig = tf.minimum((bbox_pred[:, 0] + grid_x) / 7 + bbox_pred[:, 2] / 2,
[(cx_real + grid_x) / 7 + w_real / 2])
ysmall = tf.maximum((bbox_pred[:, 1] + grid_y) / 7 - bbox_pred[:, 3] / 2,
[(cy_real + grid_y) / 7 - h_real / 2])
ybig = tf.minimum((bbox_pred[:, 1] + grid_y) / 7 + bbox_pred[:, 3] / 2,
[(cy_real + grid_y) / 7 + h_real / 2])
pred_area = bbox_pred[:, 2] * bbox_pred[:, 3]
intersect = tf.maximum(tf.constant(0, dtype = tf.float64), xbig - xsmall) * \
tf.maximum(tf.constant(0, dtype = tf.float64), ybig - ysmall)
iou = intersect / (pred_area + real_area - intersect)
max_idx = tf.argmax(iou)
sum1 += (bbox_pred[max_idx, 0] - cx_real) ** 2 + (bbox_pred[max_idx, 1] - cy_real) ** 2
sum2 += (self._neg_sqrt(bbox_pred[max_idx, 2]) - tf.sqrt(w_real)) ** 2 + \
(self._neg_sqrt(bbox_pred[max_idx, 3]) - tf.sqrt(h_real)) ** 2
sum3 += (bbox_pred[max_idx, 4] - label_pred[l] * iou[max_idx]) ** 2
sum4 -= bbox_pred[max_idx, 4] ** 2
sum5 += tf.reduce_sum(label_pred ** 2) - label_pred[l] ** 2 + (1 - label_pred[l]) ** 2
sum4 = tf.reduce_sum(y_pred[i, :, :, 4] ** 2, axis = [0, 1]) + tf.reduce_sum(y_pred[i, :, :, 9] ** 2, axis = [0, 1])
loss += self.lambda_coord * sum1 + self.lambda_coord * sum2 + sum3 + self.lambda_noobj * sum4 + sum5
return loss / tf.cast(tf.shape(y_pred)[0], tf.float64) |
the-stack_0_7764 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=4
# total number=19
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
class Opty(cirq.PointOptimizer):
def optimization_at(
self,
circuit: 'cirq.Circuit',
index: int,
op: 'cirq.Operation'
) -> Optional[cirq.PointOptimizationSummary]:
if (isinstance(op, cirq.ops.GateOperation) and isinstance(op.gate, cirq.CZPowGate)):
return cirq.PointOptimizationSummary(
clear_span=1,
clear_qubits=op.qubits,
new_operations=[
cirq.CZ(*op.qubits),
cirq.X.on_each(*op.qubits),
cirq.X.on_each(*op.qubits),
]
)
#thatsNoCode
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=1
c.append(cirq.H.on(input_qubit[1])) # number=2
c.append(cirq.CNOT.on(input_qubit[3],input_qubit[2])) # number=15
c.append(cirq.H.on(input_qubit[1])) # number=7
c.append(cirq.H.on(input_qubit[2])) # number=14
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[1])) # number=16
c.append(cirq.X.on(input_qubit[1])) # number=17
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[1])) # number=18
c.append(cirq.H.on(input_qubit[2])) # number=3
c.append(cirq.H.on(input_qubit[3])) # number=4
c.append(cirq.H.on(input_qubit[0])) # number=11
c.append(cirq.CZ.on(input_qubit[3],input_qubit[0])) # number=12
c.append(cirq.H.on(input_qubit[0])) # number=13
c.append(cirq.CNOT.on(input_qubit[3],input_qubit[0])) # number=6
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=8
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=9
# circuit end
c.append(cirq.measure(*input_qubit, key='result'))
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 4
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =4000
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=circuit_sample_count)
frequencies = result.histogram(key='result', fold_func=bitstring)
writefile = open("../data/startCirq_pragma673.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close() |
the-stack_0_7765 | """
Generic data algorithms. This module is experimental at the moment and not
intended for public consumption
"""
from __future__ import division
from textwrap import dedent
from warnings import catch_warnings, simplefilter, warn
import numpy as np
from pandas._libs import algos, hashtable as htable, lib
from pandas._libs.tslib import iNaT
from pandas.util._decorators import Appender, Substitution, deprecate_kwarg
from pandas.core.dtypes.cast import (
construct_1d_object_array_from_listlike, maybe_promote)
from pandas.core.dtypes.common import (
ensure_float64, ensure_int64, ensure_object, ensure_platform_int,
ensure_uint64, is_array_like, is_bool_dtype, is_categorical_dtype,
is_complex_dtype, is_datetime64_any_dtype, is_datetime64tz_dtype,
is_datetimelike, is_extension_array_dtype, is_float_dtype,
is_integer_dtype, is_interval_dtype, is_list_like, is_numeric_dtype,
is_object_dtype, is_period_dtype, is_scalar, is_signed_integer_dtype,
is_sparse, is_timedelta64_dtype, is_unsigned_integer_dtype,
needs_i8_conversion)
from pandas.core.dtypes.generic import ABCIndex, ABCIndexClass, ABCSeries
from pandas.core.dtypes.missing import isna, na_value_for_dtype
from pandas.core import common as com
_shared_docs = {}
# --------------- #
# dtype access #
# --------------- #
def _ensure_data(values, dtype=None):
"""
routine to ensure that our data is of the correct
input dtype for lower-level routines
This will coerce:
- ints -> int64
- uint -> uint64
- bool -> uint64 (TODO this should be uint8)
- datetimelike -> i8
- datetime64tz -> i8 (in local tz)
- categorical -> codes
Parameters
----------
values : array-like
dtype : pandas_dtype, optional
coerce to this dtype
Returns
-------
(ndarray, pandas_dtype, algo dtype as a string)
"""
# we check some simple dtypes first
try:
if is_object_dtype(dtype):
return ensure_object(np.asarray(values)), 'object', 'object'
if is_bool_dtype(values) or is_bool_dtype(dtype):
# we are actually coercing to uint64
# until our algos support uint8 directly (see TODO)
return np.asarray(values).astype('uint64'), 'bool', 'uint64'
elif is_signed_integer_dtype(values) or is_signed_integer_dtype(dtype):
return ensure_int64(values), 'int64', 'int64'
elif (is_unsigned_integer_dtype(values) or
is_unsigned_integer_dtype(dtype)):
return ensure_uint64(values), 'uint64', 'uint64'
elif is_float_dtype(values) or is_float_dtype(dtype):
return ensure_float64(values), 'float64', 'float64'
elif is_object_dtype(values) and dtype is None:
return ensure_object(np.asarray(values)), 'object', 'object'
elif is_complex_dtype(values) or is_complex_dtype(dtype):
# ignore the fact that we are casting to float
# which discards complex parts
with catch_warnings():
simplefilter("ignore", np.ComplexWarning)
values = ensure_float64(values)
return values, 'float64', 'float64'
except (TypeError, ValueError, OverflowError):
# if we are trying to coerce to a dtype
# and it is incompat this will fall thru to here
return ensure_object(values), 'object', 'object'
# datetimelike
if (needs_i8_conversion(values) or
is_period_dtype(dtype) or
is_datetime64_any_dtype(dtype) or
is_timedelta64_dtype(dtype)):
if is_period_dtype(values) or is_period_dtype(dtype):
from pandas import PeriodIndex
values = PeriodIndex(values)
dtype = values.dtype
elif is_timedelta64_dtype(values) or is_timedelta64_dtype(dtype):
from pandas import TimedeltaIndex
values = TimedeltaIndex(values)
dtype = values.dtype
else:
# Datetime
from pandas import DatetimeIndex
values = DatetimeIndex(values)
dtype = values.dtype
return values.asi8, dtype, 'int64'
elif (is_categorical_dtype(values) and
(is_categorical_dtype(dtype) or dtype is None)):
values = getattr(values, 'values', values)
values = values.codes
dtype = 'category'
# we are actually coercing to int64
# until our algos support int* directly (not all do)
values = ensure_int64(values)
return values, dtype, 'int64'
# we have failed, return object
values = np.asarray(values, dtype=np.object)
return ensure_object(values), 'object', 'object'
def _reconstruct_data(values, dtype, original):
"""
reverse of _ensure_data
Parameters
----------
values : ndarray
dtype : pandas_dtype
original : ndarray-like
Returns
-------
Index for extension types, otherwise ndarray casted to dtype
"""
from pandas import Index
if is_extension_array_dtype(dtype):
values = dtype.construct_array_type()._from_sequence(values)
elif is_datetime64tz_dtype(dtype) or is_period_dtype(dtype):
values = Index(original)._shallow_copy(values, name=None)
elif is_bool_dtype(dtype):
values = values.astype(dtype)
# we only support object dtypes bool Index
if isinstance(original, Index):
values = values.astype(object)
elif dtype is not None:
values = values.astype(dtype)
return values
def _ensure_arraylike(values):
"""
ensure that we are arraylike if not already
"""
if not is_array_like(values):
inferred = lib.infer_dtype(values)
if inferred in ['mixed', 'string', 'unicode']:
if isinstance(values, tuple):
values = list(values)
values = construct_1d_object_array_from_listlike(values)
else:
values = np.asarray(values)
return values
_hashtables = {
'float64': (htable.Float64HashTable, htable.Float64Vector),
'uint64': (htable.UInt64HashTable, htable.UInt64Vector),
'int64': (htable.Int64HashTable, htable.Int64Vector),
'string': (htable.StringHashTable, htable.ObjectVector),
'object': (htable.PyObjectHashTable, htable.ObjectVector)
}
def _get_hashtable_algo(values):
"""
Parameters
----------
values : arraylike
Returns
-------
tuples(hashtable class,
vector class,
values,
dtype,
ndtype)
"""
values, dtype, ndtype = _ensure_data(values)
if ndtype == 'object':
# its cheaper to use a String Hash Table than Object
if lib.infer_dtype(values) in ['string']:
ndtype = 'string'
else:
ndtype = 'object'
htable, table = _hashtables[ndtype]
return (htable, table, values, dtype, ndtype)
def _get_data_algo(values, func_map):
if is_categorical_dtype(values):
values = values._values_for_rank()
values, dtype, ndtype = _ensure_data(values)
if ndtype == 'object':
# its cheaper to use a String Hash Table than Object
if lib.infer_dtype(values) in ['string']:
ndtype = 'string'
f = func_map.get(ndtype, func_map['object'])
return f, values
# --------------- #
# top-level algos #
# --------------- #
def match(to_match, values, na_sentinel=-1):
"""
Compute locations of to_match into values
Parameters
----------
to_match : array-like
values to find positions of
values : array-like
Unique set of values
na_sentinel : int, default -1
Value to mark "not found"
Examples
--------
Returns
-------
match : ndarray of integers
"""
values = com.asarray_tuplesafe(values)
htable, _, values, dtype, ndtype = _get_hashtable_algo(values)
to_match, _, _ = _ensure_data(to_match, dtype)
table = htable(min(len(to_match), 1000000))
table.map_locations(values)
result = table.lookup(to_match)
if na_sentinel != -1:
# replace but return a numpy array
# use a Series because it handles dtype conversions properly
from pandas import Series
result = Series(result.ravel()).replace(-1, na_sentinel)
result = result.values.reshape(result.shape)
return result
def unique(values):
"""
Hash table-based unique. Uniques are returned in order
of appearance. This does NOT sort.
Significantly faster than numpy.unique. Includes NA values.
Parameters
----------
values : 1d array-like
Returns
-------
unique values.
- If the input is an Index, the return is an Index
- If the input is a Categorical dtype, the return is a Categorical
- If the input is a Series/ndarray, the return will be an ndarray
Examples
--------
>>> pd.unique(pd.Series([2, 1, 3, 3]))
array([2, 1, 3])
>>> pd.unique(pd.Series([2] + [1] * 5))
array([2, 1])
>>> pd.unique(pd.Series([pd.Timestamp('20160101'),
... pd.Timestamp('20160101')]))
array(['2016-01-01T00:00:00.000000000'], dtype='datetime64[ns]')
>>> pd.unique(pd.Series([pd.Timestamp('20160101', tz='US/Eastern'),
... pd.Timestamp('20160101', tz='US/Eastern')]))
array([Timestamp('2016-01-01 00:00:00-0500', tz='US/Eastern')],
dtype=object)
>>> pd.unique(pd.Index([pd.Timestamp('20160101', tz='US/Eastern'),
... pd.Timestamp('20160101', tz='US/Eastern')]))
DatetimeIndex(['2016-01-01 00:00:00-05:00'],
... dtype='datetime64[ns, US/Eastern]', freq=None)
>>> pd.unique(list('baabc'))
array(['b', 'a', 'c'], dtype=object)
An unordered Categorical will return categories in the
order of appearance.
>>> pd.unique(pd.Series(pd.Categorical(list('baabc'))))
[b, a, c]
Categories (3, object): [b, a, c]
>>> pd.unique(pd.Series(pd.Categorical(list('baabc'),
... categories=list('abc'))))
[b, a, c]
Categories (3, object): [b, a, c]
An ordered Categorical preserves the category ordering.
>>> pd.unique(pd.Series(pd.Categorical(list('baabc'),
... categories=list('abc'),
... ordered=True)))
[b, a, c]
Categories (3, object): [a < b < c]
An array of tuples
>>> pd.unique([('a', 'b'), ('b', 'a'), ('a', 'c'), ('b', 'a')])
array([('a', 'b'), ('b', 'a'), ('a', 'c')], dtype=object)
See Also
--------
pandas.Index.unique
pandas.Series.unique
"""
values = _ensure_arraylike(values)
if is_extension_array_dtype(values):
# Dispatch to extension dtype's unique.
return values.unique()
original = values
htable, _, values, dtype, ndtype = _get_hashtable_algo(values)
table = htable(len(values))
uniques = table.unique(values)
uniques = _reconstruct_data(uniques, dtype, original)
if isinstance(original, ABCSeries) and is_datetime64tz_dtype(dtype):
# we are special casing datetime64tz_dtype
# to return an object array of tz-aware Timestamps
# TODO: it must return DatetimeArray with tz in pandas 2.0
uniques = uniques.astype(object).values
return uniques
unique1d = unique
def isin(comps, values):
"""
Compute the isin boolean array
Parameters
----------
comps : array-like
values : array-like
Returns
-------
boolean array same length as comps
"""
if not is_list_like(comps):
raise TypeError("only list-like objects are allowed to be passed"
" to isin(), you passed a [{comps_type}]"
.format(comps_type=type(comps).__name__))
if not is_list_like(values):
raise TypeError("only list-like objects are allowed to be passed"
" to isin(), you passed a [{values_type}]"
.format(values_type=type(values).__name__))
if not isinstance(values, (ABCIndex, ABCSeries, np.ndarray)):
values = construct_1d_object_array_from_listlike(list(values))
if is_categorical_dtype(comps):
# TODO(extension)
# handle categoricals
return comps._values.isin(values)
comps = com.values_from_object(comps)
comps, dtype, _ = _ensure_data(comps)
values, _, _ = _ensure_data(values, dtype=dtype)
# faster for larger cases to use np.in1d
f = lambda x, y: htable.ismember_object(x, values)
# GH16012
# Ensure np.in1d doesn't get object types or it *may* throw an exception
if len(comps) > 1000000 and not is_object_dtype(comps):
f = lambda x, y: np.in1d(x, y)
elif is_integer_dtype(comps):
try:
values = values.astype('int64', copy=False)
comps = comps.astype('int64', copy=False)
f = lambda x, y: htable.ismember_int64(x, y)
except (TypeError, ValueError, OverflowError):
values = values.astype(object)
comps = comps.astype(object)
elif is_float_dtype(comps):
try:
values = values.astype('float64', copy=False)
comps = comps.astype('float64', copy=False)
f = lambda x, y: htable.ismember_float64(x, y)
except (TypeError, ValueError):
values = values.astype(object)
comps = comps.astype(object)
return f(comps, values)
def _factorize_array(values, na_sentinel=-1, size_hint=None,
na_value=None):
"""Factorize an array-like to labels and uniques.
This doesn't do any coercion of types or unboxing before factorization.
Parameters
----------
values : ndarray
na_sentinel : int, default -1
size_hint : int, optional
Passsed through to the hashtable's 'get_labels' method
na_value : object, optional
A value in `values` to consider missing. Note: only use this
parameter when you know that you don't have any values pandas would
consider missing in the array (NaN for float data, iNaT for
datetimes, etc.).
Returns
-------
labels, uniques : ndarray
"""
(hash_klass, _), values = _get_data_algo(values, _hashtables)
table = hash_klass(size_hint or len(values))
uniques, labels = table.factorize(values, na_sentinel=na_sentinel,
na_value=na_value)
labels = ensure_platform_int(labels)
return labels, uniques
_shared_docs['factorize'] = """
Encode the object as an enumerated type or categorical variable.
This method is useful for obtaining a numeric representation of an
array when all that matters is identifying distinct values. `factorize`
is available as both a top-level function :func:`pandas.factorize`,
and as a method :meth:`Series.factorize` and :meth:`Index.factorize`.
Parameters
----------
%(values)s%(sort)s%(order)s
na_sentinel : int, default -1
Value to mark "not found".
%(size_hint)s\
Returns
-------
labels : ndarray
An integer ndarray that's an indexer into `uniques`.
``uniques.take(labels)`` will have the same values as `values`.
uniques : ndarray, Index, or Categorical
The unique valid values. When `values` is Categorical, `uniques`
is a Categorical. When `values` is some other pandas object, an
`Index` is returned. Otherwise, a 1-D ndarray is returned.
.. note ::
Even if there's a missing value in `values`, `uniques` will
*not* contain an entry for it.
See Also
--------
cut : Discretize continuous-valued array.
unique : Find the unique value in an array.
Examples
--------
These examples all show factorize as a top-level method like
``pd.factorize(values)``. The results are identical for methods like
:meth:`Series.factorize`.
>>> labels, uniques = pd.factorize(['b', 'b', 'a', 'c', 'b'])
>>> labels
array([0, 0, 1, 2, 0])
>>> uniques
array(['b', 'a', 'c'], dtype=object)
With ``sort=True``, the `uniques` will be sorted, and `labels` will be
shuffled so that the relationship is the maintained.
>>> labels, uniques = pd.factorize(['b', 'b', 'a', 'c', 'b'], sort=True)
>>> labels
array([1, 1, 0, 2, 1])
>>> uniques
array(['a', 'b', 'c'], dtype=object)
Missing values are indicated in `labels` with `na_sentinel`
(``-1`` by default). Note that missing values are never
included in `uniques`.
>>> labels, uniques = pd.factorize(['b', None, 'a', 'c', 'b'])
>>> labels
array([ 0, -1, 1, 2, 0])
>>> uniques
array(['b', 'a', 'c'], dtype=object)
Thus far, we've only factorized lists (which are internally coerced to
NumPy arrays). When factorizing pandas objects, the type of `uniques`
will differ. For Categoricals, a `Categorical` is returned.
>>> cat = pd.Categorical(['a', 'a', 'c'], categories=['a', 'b', 'c'])
>>> labels, uniques = pd.factorize(cat)
>>> labels
array([0, 0, 1])
>>> uniques
[a, c]
Categories (3, object): [a, b, c]
Notice that ``'b'`` is in ``uniques.categories``, despite not being
present in ``cat.values``.
For all other pandas objects, an Index of the appropriate type is
returned.
>>> cat = pd.Series(['a', 'a', 'c'])
>>> labels, uniques = pd.factorize(cat)
>>> labels
array([0, 0, 1])
>>> uniques
Index(['a', 'c'], dtype='object')
"""
@Substitution(
values=dedent("""\
values : sequence
A 1-D sequence. Sequences that aren't pandas objects are
coerced to ndarrays before factorization.
"""),
order=dedent("""\
order
.. deprecated:: 0.23.0
This parameter has no effect and is deprecated.
"""),
sort=dedent("""\
sort : bool, default False
Sort `uniques` and shuffle `labels` to maintain the
relationship.
"""),
size_hint=dedent("""\
size_hint : int, optional
Hint to the hashtable sizer.
"""),
)
@Appender(_shared_docs['factorize'])
@deprecate_kwarg(old_arg_name='order', new_arg_name=None)
def factorize(values, sort=False, order=None, na_sentinel=-1, size_hint=None):
# Implementation notes: This method is responsible for 3 things
# 1.) coercing data to array-like (ndarray, Index, extension array)
# 2.) factorizing labels and uniques
# 3.) Maybe boxing the output in an Index
#
# Step 2 is dispatched to extension types (like Categorical). They are
# responsible only for factorization. All data coercion, sorting and boxing
# should happen here.
values = _ensure_arraylike(values)
original = values
if is_extension_array_dtype(values):
values = getattr(values, '_values', values)
labels, uniques = values.factorize(na_sentinel=na_sentinel)
dtype = original.dtype
else:
values, dtype, _ = _ensure_data(values)
if (is_datetime64_any_dtype(original) or
is_timedelta64_dtype(original) or
is_period_dtype(original)):
na_value = na_value_for_dtype(original.dtype)
else:
na_value = None
labels, uniques = _factorize_array(values,
na_sentinel=na_sentinel,
size_hint=size_hint,
na_value=na_value)
if sort and len(uniques) > 0:
from pandas.core.sorting import safe_sort
try:
order = uniques.argsort()
order2 = order.argsort()
labels = take_1d(order2, labels, fill_value=na_sentinel)
uniques = uniques.take(order)
except TypeError:
# Mixed types, where uniques.argsort fails.
uniques, labels = safe_sort(uniques, labels,
na_sentinel=na_sentinel,
assume_unique=True)
uniques = _reconstruct_data(uniques, dtype, original)
# return original tenor
if isinstance(original, ABCIndexClass):
uniques = original._shallow_copy(uniques, name=None)
elif isinstance(original, ABCSeries):
from pandas import Index
uniques = Index(uniques)
return labels, uniques
def value_counts(values, sort=True, ascending=False, normalize=False,
bins=None, dropna=True):
"""
Compute a histogram of the counts of non-null values.
Parameters
----------
values : ndarray (1-d)
sort : boolean, default True
Sort by values
ascending : boolean, default False
Sort in ascending order
normalize: boolean, default False
If True then compute a relative histogram
bins : integer, optional
Rather than count values, group them into half-open bins,
convenience for pd.cut, only works with numeric data
dropna : boolean, default True
Don't include counts of NaN
Returns
-------
value_counts : Series
"""
from pandas.core.series import Series, Index
name = getattr(values, 'name', None)
if bins is not None:
try:
from pandas.core.reshape.tile import cut
values = Series(values)
ii = cut(values, bins, include_lowest=True)
except TypeError:
raise TypeError("bins argument only works with numeric data.")
# count, remove nulls (from the index), and but the bins
result = ii.value_counts(dropna=dropna)
result = result[result.index.notna()]
result.index = result.index.astype('interval')
result = result.sort_index()
# if we are dropna and we have NO values
if dropna and (result.values == 0).all():
result = result.iloc[0:0]
# normalizing is by len of all (regardless of dropna)
counts = np.array([len(ii)])
else:
if is_extension_array_dtype(values) or is_sparse(values):
# handle Categorical and sparse,
result = Series(values)._values.value_counts(dropna=dropna)
result.name = name
counts = result.values
else:
keys, counts = _value_counts_arraylike(values, dropna)
if not isinstance(keys, Index):
keys = Index(keys)
result = Series(counts, index=keys, name=name)
if sort:
result = result.sort_values(ascending=ascending)
if normalize:
result = result / float(counts.sum())
return result
def _value_counts_arraylike(values, dropna):
"""
Parameters
----------
values : arraylike
dropna : boolean
Returns
-------
(uniques, counts)
"""
values = _ensure_arraylike(values)
original = values
values, dtype, ndtype = _ensure_data(values)
if needs_i8_conversion(dtype):
# i8
keys, counts = htable.value_count_int64(values, dropna)
if dropna:
msk = keys != iNaT
keys, counts = keys[msk], counts[msk]
else:
# ndarray like
# TODO: handle uint8
f = getattr(htable, "value_count_{dtype}".format(dtype=ndtype))
keys, counts = f(values, dropna)
mask = isna(values)
if not dropna and mask.any():
if not isna(keys).any():
keys = np.insert(keys, 0, np.NaN)
counts = np.insert(counts, 0, mask.sum())
keys = _reconstruct_data(keys, original.dtype, original)
return keys, counts
def duplicated(values, keep='first'):
"""
Return boolean ndarray denoting duplicate values.
.. versionadded:: 0.19.0
Parameters
----------
values : ndarray-like
Array over which to check for duplicate values.
keep : {'first', 'last', False}, default 'first'
- ``first`` : Mark duplicates as ``True`` except for the first
occurrence.
- ``last`` : Mark duplicates as ``True`` except for the last
occurrence.
- False : Mark all duplicates as ``True``.
Returns
-------
duplicated : ndarray
"""
values, dtype, ndtype = _ensure_data(values)
f = getattr(htable, "duplicated_{dtype}".format(dtype=ndtype))
return f(values, keep=keep)
def mode(values, dropna=True):
"""
Returns the mode(s) of an array.
Parameters
----------
values : array-like
Array over which to check for duplicate values.
dropna : boolean, default True
Don't consider counts of NaN/NaT.
.. versionadded:: 0.24.0
Returns
-------
mode : Series
"""
from pandas import Series
values = _ensure_arraylike(values)
original = values
# categorical is a fast-path
if is_categorical_dtype(values):
if isinstance(values, Series):
return Series(values.values.mode(dropna=dropna), name=values.name)
return values.mode(dropna=dropna)
if dropna and is_datetimelike(values):
mask = values.isnull()
values = values[~mask]
values, dtype, ndtype = _ensure_data(values)
f = getattr(htable, "mode_{dtype}".format(dtype=ndtype))
result = f(values, dropna=dropna)
try:
result = np.sort(result)
except TypeError as e:
warn("Unable to sort modes: {error}".format(error=e))
result = _reconstruct_data(result, original.dtype, original)
return Series(result)
def rank(values, axis=0, method='average', na_option='keep',
ascending=True, pct=False):
"""
Rank the values along a given axis.
Parameters
----------
values : array-like
Array whose values will be ranked. The number of dimensions in this
array must not exceed 2.
axis : int, default 0
Axis over which to perform rankings.
method : {'average', 'min', 'max', 'first', 'dense'}, default 'average'
The method by which tiebreaks are broken during the ranking.
na_option : {'keep', 'top'}, default 'keep'
The method by which NaNs are placed in the ranking.
- ``keep``: rank each NaN value with a NaN ranking
- ``top``: replace each NaN with either +/- inf so that they
there are ranked at the top
ascending : boolean, default True
Whether or not the elements should be ranked in ascending order.
pct : boolean, default False
Whether or not to the display the returned rankings in integer form
(e.g. 1, 2, 3) or in percentile form (e.g. 0.333..., 0.666..., 1).
"""
if values.ndim == 1:
f, values = _get_data_algo(values, _rank1d_functions)
ranks = f(values, ties_method=method, ascending=ascending,
na_option=na_option, pct=pct)
elif values.ndim == 2:
f, values = _get_data_algo(values, _rank2d_functions)
ranks = f(values, axis=axis, ties_method=method,
ascending=ascending, na_option=na_option, pct=pct)
else:
raise TypeError("Array with ndim > 2 are not supported.")
return ranks
def checked_add_with_arr(arr, b, arr_mask=None, b_mask=None):
"""
Perform array addition that checks for underflow and overflow.
Performs the addition of an int64 array and an int64 integer (or array)
but checks that they do not result in overflow first. For elements that
are indicated to be NaN, whether or not there is overflow for that element
is automatically ignored.
Parameters
----------
arr : array addend.
b : array or scalar addend.
arr_mask : boolean array or None
array indicating which elements to exclude from checking
b_mask : boolean array or boolean or None
array or scalar indicating which element(s) to exclude from checking
Returns
-------
sum : An array for elements x + b for each element x in arr if b is
a scalar or an array for elements x + y for each element pair
(x, y) in (arr, b).
Raises
------
OverflowError if any x + y exceeds the maximum or minimum int64 value.
"""
# For performance reasons, we broadcast 'b' to the new array 'b2'
# so that it has the same size as 'arr'.
b2 = np.broadcast_to(b, arr.shape)
if b_mask is not None:
# We do the same broadcasting for b_mask as well.
b2_mask = np.broadcast_to(b_mask, arr.shape)
else:
b2_mask = None
# For elements that are NaN, regardless of their value, we should
# ignore whether they overflow or not when doing the checked add.
if arr_mask is not None and b2_mask is not None:
not_nan = np.logical_not(arr_mask | b2_mask)
elif arr_mask is not None:
not_nan = np.logical_not(arr_mask)
elif b_mask is not None:
not_nan = np.logical_not(b2_mask)
else:
not_nan = np.empty(arr.shape, dtype=bool)
not_nan.fill(True)
# gh-14324: For each element in 'arr' and its corresponding element
# in 'b2', we check the sign of the element in 'b2'. If it is positive,
# we then check whether its sum with the element in 'arr' exceeds
# np.iinfo(np.int64).max. If so, we have an overflow error. If it
# it is negative, we then check whether its sum with the element in
# 'arr' exceeds np.iinfo(np.int64).min. If so, we have an overflow
# error as well.
mask1 = b2 > 0
mask2 = b2 < 0
if not mask1.any():
to_raise = ((np.iinfo(np.int64).min - b2 > arr) & not_nan).any()
elif not mask2.any():
to_raise = ((np.iinfo(np.int64).max - b2 < arr) & not_nan).any()
else:
to_raise = (((np.iinfo(np.int64).max -
b2[mask1] < arr[mask1]) & not_nan[mask1]).any() or
((np.iinfo(np.int64).min -
b2[mask2] > arr[mask2]) & not_nan[mask2]).any())
if to_raise:
raise OverflowError("Overflow in int64 addition")
return arr + b
_rank1d_functions = {
'float64': algos.rank_1d_float64,
'int64': algos.rank_1d_int64,
'uint64': algos.rank_1d_uint64,
'object': algos.rank_1d_object
}
_rank2d_functions = {
'float64': algos.rank_2d_float64,
'int64': algos.rank_2d_int64,
'uint64': algos.rank_2d_uint64,
'object': algos.rank_2d_object
}
def quantile(x, q, interpolation_method='fraction'):
"""
Compute sample quantile or quantiles of the input array. For example, q=0.5
computes the median.
The `interpolation_method` parameter supports three values, namely
`fraction` (default), `lower` and `higher`. Interpolation is done only,
if the desired quantile lies between two data points `i` and `j`. For
`fraction`, the result is an interpolated value between `i` and `j`;
for `lower`, the result is `i`, for `higher` the result is `j`.
Parameters
----------
x : ndarray
Values from which to extract score.
q : scalar or array
Percentile at which to extract score.
interpolation_method : {'fraction', 'lower', 'higher'}, optional
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`:
- fraction: `i + (j - i)*fraction`, where `fraction` is the
fractional part of the index surrounded by `i` and `j`.
-lower: `i`.
- higher: `j`.
Returns
-------
score : float
Score at percentile.
Examples
--------
>>> from scipy import stats
>>> a = np.arange(100)
>>> stats.scoreatpercentile(a, 50)
49.5
"""
x = np.asarray(x)
mask = isna(x)
x = x[~mask]
values = np.sort(x)
def _interpolate(a, b, fraction):
"""Returns the point at the given fraction between a and b, where
'fraction' must be between 0 and 1.
"""
return a + (b - a) * fraction
def _get_score(at):
if len(values) == 0:
return np.nan
idx = at * (len(values) - 1)
if idx % 1 == 0:
score = values[int(idx)]
else:
if interpolation_method == 'fraction':
score = _interpolate(values[int(idx)], values[int(idx) + 1],
idx % 1)
elif interpolation_method == 'lower':
score = values[np.floor(idx)]
elif interpolation_method == 'higher':
score = values[np.ceil(idx)]
else:
raise ValueError("interpolation_method can only be 'fraction' "
", 'lower' or 'higher'")
return score
if is_scalar(q):
return _get_score(q)
else:
q = np.asarray(q, np.float64)
return algos.arrmap_float64(q, _get_score)
# --------------- #
# select n #
# --------------- #
class SelectN(object):
def __init__(self, obj, n, keep):
self.obj = obj
self.n = n
self.keep = keep
if self.keep not in ('first', 'last', 'all'):
raise ValueError('keep must be either "first", "last" or "all"')
def nlargest(self):
return self.compute('nlargest')
def nsmallest(self):
return self.compute('nsmallest')
@staticmethod
def is_valid_dtype_n_method(dtype):
"""
Helper function to determine if dtype is valid for
nsmallest/nlargest methods
"""
return ((is_numeric_dtype(dtype) and not is_complex_dtype(dtype)) or
needs_i8_conversion(dtype))
class SelectNSeries(SelectN):
"""
Implement n largest/smallest for Series
Parameters
----------
obj : Series
n : int
keep : {'first', 'last'}, default 'first'
Returns
-------
nordered : Series
"""
def compute(self, method):
n = self.n
dtype = self.obj.dtype
if not self.is_valid_dtype_n_method(dtype):
raise TypeError("Cannot use method '{method}' with "
"dtype {dtype}".format(method=method,
dtype=dtype))
if n <= 0:
return self.obj[[]]
dropped = self.obj.dropna()
# slow method
if n >= len(self.obj):
reverse_it = (self.keep == 'last' or method == 'nlargest')
ascending = method == 'nsmallest'
slc = np.s_[::-1] if reverse_it else np.s_[:]
return dropped[slc].sort_values(ascending=ascending).head(n)
# fast method
arr, pandas_dtype, _ = _ensure_data(dropped.values)
if method == 'nlargest':
arr = -arr
if is_integer_dtype(pandas_dtype):
# GH 21426: ensure reverse ordering at boundaries
arr -= 1
if self.keep == 'last':
arr = arr[::-1]
narr = len(arr)
n = min(n, narr)
kth_val = algos.kth_smallest(arr.copy(), n - 1)
ns, = np.nonzero(arr <= kth_val)
inds = ns[arr[ns].argsort(kind='mergesort')]
if self.keep != 'all':
inds = inds[:n]
if self.keep == 'last':
# reverse indices
inds = narr - 1 - inds
return dropped.iloc[inds]
class SelectNFrame(SelectN):
"""
Implement n largest/smallest for DataFrame
Parameters
----------
obj : DataFrame
n : int
keep : {'first', 'last'}, default 'first'
columns : list or str
Returns
-------
nordered : DataFrame
"""
def __init__(self, obj, n, keep, columns):
super(SelectNFrame, self).__init__(obj, n, keep)
if not is_list_like(columns) or isinstance(columns, tuple):
columns = [columns]
columns = list(columns)
self.columns = columns
def compute(self, method):
from pandas import Int64Index
n = self.n
frame = self.obj
columns = self.columns
for column in columns:
dtype = frame[column].dtype
if not self.is_valid_dtype_n_method(dtype):
raise TypeError((
"Column {column!r} has dtype {dtype}, cannot use method "
"{method!r} with this dtype"
).format(column=column, dtype=dtype, method=method))
def get_indexer(current_indexer, other_indexer):
"""Helper function to concat `current_indexer` and `other_indexer`
depending on `method`
"""
if method == 'nsmallest':
return current_indexer.append(other_indexer)
else:
return other_indexer.append(current_indexer)
# Below we save and reset the index in case index contains duplicates
original_index = frame.index
cur_frame = frame = frame.reset_index(drop=True)
cur_n = n
indexer = Int64Index([])
for i, column in enumerate(columns):
# For each column we apply method to cur_frame[column].
# If it's the last column or if we have the number of
# results desired we are done.
# Otherwise there are duplicates of the largest/smallest
# value and we need to look at the rest of the columns
# to determine which of the rows with the largest/smallest
# value in the column to keep.
series = cur_frame[column]
is_last_column = len(columns) - 1 == i
values = getattr(series, method)(
cur_n,
keep=self.keep if is_last_column else 'all')
if is_last_column or len(values) <= cur_n:
indexer = get_indexer(indexer, values.index)
break
# Now find all values which are equal to
# the (nsmallest: largest)/(nlarrgest: smallest)
# from our series.
border_value = values == values[values.index[-1]]
# Some of these values are among the top-n
# some aren't.
unsafe_values = values[border_value]
# These values are definitely among the top-n
safe_values = values[~border_value]
indexer = get_indexer(indexer, safe_values.index)
# Go on and separate the unsafe_values on the remaining
# columns.
cur_frame = cur_frame.loc[unsafe_values.index]
cur_n = n - len(indexer)
frame = frame.take(indexer)
# Restore the index on frame
frame.index = original_index.take(indexer)
# If there is only one column, the frame is already sorted.
if len(columns) == 1:
return frame
ascending = method == 'nsmallest'
return frame.sort_values(
columns,
ascending=ascending,
kind='mergesort')
# ------- ## ---- #
# take #
# ---- #
def _view_wrapper(f, arr_dtype=None, out_dtype=None, fill_wrap=None):
def wrapper(arr, indexer, out, fill_value=np.nan):
if arr_dtype is not None:
arr = arr.view(arr_dtype)
if out_dtype is not None:
out = out.view(out_dtype)
if fill_wrap is not None:
fill_value = fill_wrap(fill_value)
f(arr, indexer, out, fill_value=fill_value)
return wrapper
def _convert_wrapper(f, conv_dtype):
def wrapper(arr, indexer, out, fill_value=np.nan):
arr = arr.astype(conv_dtype)
f(arr, indexer, out, fill_value=fill_value)
return wrapper
def _take_2d_multi_object(arr, indexer, out, fill_value, mask_info):
# this is not ideal, performance-wise, but it's better than raising
# an exception (best to optimize in Cython to avoid getting here)
row_idx, col_idx = indexer
if mask_info is not None:
(row_mask, col_mask), (row_needs, col_needs) = mask_info
else:
row_mask = row_idx == -1
col_mask = col_idx == -1
row_needs = row_mask.any()
col_needs = col_mask.any()
if fill_value is not None:
if row_needs:
out[row_mask, :] = fill_value
if col_needs:
out[:, col_mask] = fill_value
for i in range(len(row_idx)):
u_ = row_idx[i]
for j in range(len(col_idx)):
v = col_idx[j]
out[i, j] = arr[u_, v]
def _take_nd_object(arr, indexer, out, axis, fill_value, mask_info):
if mask_info is not None:
mask, needs_masking = mask_info
else:
mask = indexer == -1
needs_masking = mask.any()
if arr.dtype != out.dtype:
arr = arr.astype(out.dtype)
if arr.shape[axis] > 0:
arr.take(ensure_platform_int(indexer), axis=axis, out=out)
if needs_masking:
outindexer = [slice(None)] * arr.ndim
outindexer[axis] = mask
out[tuple(outindexer)] = fill_value
_take_1d_dict = {
('int8', 'int8'): algos.take_1d_int8_int8,
('int8', 'int32'): algos.take_1d_int8_int32,
('int8', 'int64'): algos.take_1d_int8_int64,
('int8', 'float64'): algos.take_1d_int8_float64,
('int16', 'int16'): algos.take_1d_int16_int16,
('int16', 'int32'): algos.take_1d_int16_int32,
('int16', 'int64'): algos.take_1d_int16_int64,
('int16', 'float64'): algos.take_1d_int16_float64,
('int32', 'int32'): algos.take_1d_int32_int32,
('int32', 'int64'): algos.take_1d_int32_int64,
('int32', 'float64'): algos.take_1d_int32_float64,
('int64', 'int64'): algos.take_1d_int64_int64,
('int64', 'float64'): algos.take_1d_int64_float64,
('float32', 'float32'): algos.take_1d_float32_float32,
('float32', 'float64'): algos.take_1d_float32_float64,
('float64', 'float64'): algos.take_1d_float64_float64,
('object', 'object'): algos.take_1d_object_object,
('bool', 'bool'): _view_wrapper(algos.take_1d_bool_bool, np.uint8,
np.uint8),
('bool', 'object'): _view_wrapper(algos.take_1d_bool_object, np.uint8,
None),
('datetime64[ns]', 'datetime64[ns]'): _view_wrapper(
algos.take_1d_int64_int64, np.int64, np.int64, np.int64)
}
_take_2d_axis0_dict = {
('int8', 'int8'): algos.take_2d_axis0_int8_int8,
('int8', 'int32'): algos.take_2d_axis0_int8_int32,
('int8', 'int64'): algos.take_2d_axis0_int8_int64,
('int8', 'float64'): algos.take_2d_axis0_int8_float64,
('int16', 'int16'): algos.take_2d_axis0_int16_int16,
('int16', 'int32'): algos.take_2d_axis0_int16_int32,
('int16', 'int64'): algos.take_2d_axis0_int16_int64,
('int16', 'float64'): algos.take_2d_axis0_int16_float64,
('int32', 'int32'): algos.take_2d_axis0_int32_int32,
('int32', 'int64'): algos.take_2d_axis0_int32_int64,
('int32', 'float64'): algos.take_2d_axis0_int32_float64,
('int64', 'int64'): algos.take_2d_axis0_int64_int64,
('int64', 'float64'): algos.take_2d_axis0_int64_float64,
('float32', 'float32'): algos.take_2d_axis0_float32_float32,
('float32', 'float64'): algos.take_2d_axis0_float32_float64,
('float64', 'float64'): algos.take_2d_axis0_float64_float64,
('object', 'object'): algos.take_2d_axis0_object_object,
('bool', 'bool'): _view_wrapper(algos.take_2d_axis0_bool_bool, np.uint8,
np.uint8),
('bool', 'object'): _view_wrapper(algos.take_2d_axis0_bool_object,
np.uint8, None),
('datetime64[ns]', 'datetime64[ns]'):
_view_wrapper(algos.take_2d_axis0_int64_int64, np.int64, np.int64,
fill_wrap=np.int64)
}
_take_2d_axis1_dict = {
('int8', 'int8'): algos.take_2d_axis1_int8_int8,
('int8', 'int32'): algos.take_2d_axis1_int8_int32,
('int8', 'int64'): algos.take_2d_axis1_int8_int64,
('int8', 'float64'): algos.take_2d_axis1_int8_float64,
('int16', 'int16'): algos.take_2d_axis1_int16_int16,
('int16', 'int32'): algos.take_2d_axis1_int16_int32,
('int16', 'int64'): algos.take_2d_axis1_int16_int64,
('int16', 'float64'): algos.take_2d_axis1_int16_float64,
('int32', 'int32'): algos.take_2d_axis1_int32_int32,
('int32', 'int64'): algos.take_2d_axis1_int32_int64,
('int32', 'float64'): algos.take_2d_axis1_int32_float64,
('int64', 'int64'): algos.take_2d_axis1_int64_int64,
('int64', 'float64'): algos.take_2d_axis1_int64_float64,
('float32', 'float32'): algos.take_2d_axis1_float32_float32,
('float32', 'float64'): algos.take_2d_axis1_float32_float64,
('float64', 'float64'): algos.take_2d_axis1_float64_float64,
('object', 'object'): algos.take_2d_axis1_object_object,
('bool', 'bool'): _view_wrapper(algos.take_2d_axis1_bool_bool, np.uint8,
np.uint8),
('bool', 'object'): _view_wrapper(algos.take_2d_axis1_bool_object,
np.uint8, None),
('datetime64[ns]', 'datetime64[ns]'):
_view_wrapper(algos.take_2d_axis1_int64_int64, np.int64, np.int64,
fill_wrap=np.int64)
}
_take_2d_multi_dict = {
('int8', 'int8'): algos.take_2d_multi_int8_int8,
('int8', 'int32'): algos.take_2d_multi_int8_int32,
('int8', 'int64'): algos.take_2d_multi_int8_int64,
('int8', 'float64'): algos.take_2d_multi_int8_float64,
('int16', 'int16'): algos.take_2d_multi_int16_int16,
('int16', 'int32'): algos.take_2d_multi_int16_int32,
('int16', 'int64'): algos.take_2d_multi_int16_int64,
('int16', 'float64'): algos.take_2d_multi_int16_float64,
('int32', 'int32'): algos.take_2d_multi_int32_int32,
('int32', 'int64'): algos.take_2d_multi_int32_int64,
('int32', 'float64'): algos.take_2d_multi_int32_float64,
('int64', 'int64'): algos.take_2d_multi_int64_int64,
('int64', 'float64'): algos.take_2d_multi_int64_float64,
('float32', 'float32'): algos.take_2d_multi_float32_float32,
('float32', 'float64'): algos.take_2d_multi_float32_float64,
('float64', 'float64'): algos.take_2d_multi_float64_float64,
('object', 'object'): algos.take_2d_multi_object_object,
('bool', 'bool'): _view_wrapper(algos.take_2d_multi_bool_bool, np.uint8,
np.uint8),
('bool', 'object'): _view_wrapper(algos.take_2d_multi_bool_object,
np.uint8, None),
('datetime64[ns]', 'datetime64[ns]'):
_view_wrapper(algos.take_2d_multi_int64_int64, np.int64, np.int64,
fill_wrap=np.int64)
}
def _get_take_nd_function(ndim, arr_dtype, out_dtype, axis=0, mask_info=None):
if ndim <= 2:
tup = (arr_dtype.name, out_dtype.name)
if ndim == 1:
func = _take_1d_dict.get(tup, None)
elif ndim == 2:
if axis == 0:
func = _take_2d_axis0_dict.get(tup, None)
else:
func = _take_2d_axis1_dict.get(tup, None)
if func is not None:
return func
tup = (out_dtype.name, out_dtype.name)
if ndim == 1:
func = _take_1d_dict.get(tup, None)
elif ndim == 2:
if axis == 0:
func = _take_2d_axis0_dict.get(tup, None)
else:
func = _take_2d_axis1_dict.get(tup, None)
if func is not None:
func = _convert_wrapper(func, out_dtype)
return func
def func(arr, indexer, out, fill_value=np.nan):
indexer = ensure_int64(indexer)
_take_nd_object(arr, indexer, out, axis=axis, fill_value=fill_value,
mask_info=mask_info)
return func
def take(arr, indices, axis=0, allow_fill=False, fill_value=None):
"""
Take elements from an array.
.. versionadded:: 0.23.0
Parameters
----------
arr : sequence
Non array-likes (sequences without a dtype) are coerced
to an ndarray.
indices : sequence of integers
Indices to be taken.
axis : int, default 0
The axis over which to select values.
allow_fill : bool, default False
How to handle negative values in `indices`.
* False: negative values in `indices` indicate positional indices
from the right (the default). This is similar to :func:`numpy.take`.
* True: negative values in `indices` indicate
missing values. These values are set to `fill_value`. Any other
other negative values raise a ``ValueError``.
fill_value : any, optional
Fill value to use for NA-indices when `allow_fill` is True.
This may be ``None``, in which case the default NA value for
the type (``self.dtype.na_value``) is used.
For multi-dimensional `arr`, each *element* is filled with
`fill_value`.
Returns
-------
ndarray or ExtensionArray
Same type as the input.
Raises
------
IndexError
When `indices` is out of bounds for the array.
ValueError
When the indexer contains negative values other than ``-1``
and `allow_fill` is True.
Notes
-----
When `allow_fill` is False, `indices` may be whatever dimensionality
is accepted by NumPy for `arr`.
When `allow_fill` is True, `indices` should be 1-D.
See Also
--------
numpy.take
Examples
--------
>>> from pandas.api.extensions import take
With the default ``allow_fill=False``, negative numbers indicate
positional indices from the right.
>>> take(np.array([10, 20, 30]), [0, 0, -1])
array([10, 10, 30])
Setting ``allow_fill=True`` will place `fill_value` in those positions.
>>> take(np.array([10, 20, 30]), [0, 0, -1], allow_fill=True)
array([10., 10., nan])
>>> take(np.array([10, 20, 30]), [0, 0, -1], allow_fill=True,
... fill_value=-10)
array([ 10, 10, -10])
"""
from pandas.core.indexing import validate_indices
if not is_array_like(arr):
arr = np.asarray(arr)
indices = np.asarray(indices, dtype=np.intp)
if allow_fill:
# Pandas style, -1 means NA
validate_indices(indices, len(arr))
result = take_1d(arr, indices, axis=axis, allow_fill=True,
fill_value=fill_value)
else:
# NumPy style
result = arr.take(indices, axis=axis)
return result
def take_nd(arr, indexer, axis=0, out=None, fill_value=np.nan, mask_info=None,
allow_fill=True):
"""
Specialized Cython take which sets NaN values in one pass
This dispatches to ``take`` defined on ExtensionArrays. It does not
currently dispatch to ``SparseArray.take`` for sparse ``arr``.
Parameters
----------
arr : array-like
Input array.
indexer : ndarray
1-D array of indices to take, subarrays corresponding to -1 value
indices are filed with fill_value
axis : int, default 0
Axis to take from
out : ndarray or None, default None
Optional output array, must be appropriate type to hold input and
fill_value together, if indexer has any -1 value entries; call
_maybe_promote to determine this type for any fill_value
fill_value : any, default np.nan
Fill value to replace -1 values with
mask_info : tuple of (ndarray, boolean)
If provided, value should correspond to:
(indexer != -1, (indexer != -1).any())
If not provided, it will be computed internally if necessary
allow_fill : boolean, default True
If False, indexer is assumed to contain no -1 values so no filling
will be done. This short-circuits computation of a mask. Result is
undefined if allow_fill == False and -1 is present in indexer.
Returns
-------
subarray : array-like
May be the same type as the input, or cast to an ndarray.
"""
# TODO(EA): Remove these if / elifs as datetimeTZ, interval, become EAs
# dispatch to internal type takes
if is_extension_array_dtype(arr):
return arr.take(indexer, fill_value=fill_value, allow_fill=allow_fill)
elif is_datetime64tz_dtype(arr):
return arr.take(indexer, fill_value=fill_value, allow_fill=allow_fill)
elif is_interval_dtype(arr):
return arr.take(indexer, fill_value=fill_value, allow_fill=allow_fill)
if is_sparse(arr):
arr = arr.get_values()
elif isinstance(arr, (ABCIndexClass, ABCSeries)):
arr = arr.values
arr = np.asarray(arr)
if indexer is None:
indexer = np.arange(arr.shape[axis], dtype=np.int64)
dtype, fill_value = arr.dtype, arr.dtype.type()
else:
indexer = ensure_int64(indexer, copy=False)
if not allow_fill:
dtype, fill_value = arr.dtype, arr.dtype.type()
mask_info = None, False
else:
# check for promotion based on types only (do this first because
# it's faster than computing a mask)
dtype, fill_value = maybe_promote(arr.dtype, fill_value)
if dtype != arr.dtype and (out is None or out.dtype != dtype):
# check if promotion is actually required based on indexer
if mask_info is not None:
mask, needs_masking = mask_info
else:
mask = indexer == -1
needs_masking = mask.any()
mask_info = mask, needs_masking
if needs_masking:
if out is not None and out.dtype != dtype:
raise TypeError('Incompatible type for fill_value')
else:
# if not, then depromote, set fill_value to dummy
# (it won't be used but we don't want the cython code
# to crash when trying to cast it to dtype)
dtype, fill_value = arr.dtype, arr.dtype.type()
flip_order = False
if arr.ndim == 2:
if arr.flags.f_contiguous:
flip_order = True
if flip_order:
arr = arr.T
axis = arr.ndim - axis - 1
if out is not None:
out = out.T
# at this point, it's guaranteed that dtype can hold both the arr values
# and the fill_value
if out is None:
out_shape = list(arr.shape)
out_shape[axis] = len(indexer)
out_shape = tuple(out_shape)
if arr.flags.f_contiguous and axis == arr.ndim - 1:
# minor tweak that can make an order-of-magnitude difference
# for dataframes initialized directly from 2-d ndarrays
# (s.t. df.values is c-contiguous and df._data.blocks[0] is its
# f-contiguous transpose)
out = np.empty(out_shape, dtype=dtype, order='F')
else:
out = np.empty(out_shape, dtype=dtype)
func = _get_take_nd_function(arr.ndim, arr.dtype, out.dtype, axis=axis,
mask_info=mask_info)
func(arr, indexer, out, fill_value)
if flip_order:
out = out.T
return out
take_1d = take_nd
def take_2d_multi(arr, indexer, out=None, fill_value=np.nan, mask_info=None,
allow_fill=True):
"""
Specialized Cython take which sets NaN values in one pass
"""
if indexer is None or (indexer[0] is None and indexer[1] is None):
row_idx = np.arange(arr.shape[0], dtype=np.int64)
col_idx = np.arange(arr.shape[1], dtype=np.int64)
indexer = row_idx, col_idx
dtype, fill_value = arr.dtype, arr.dtype.type()
else:
row_idx, col_idx = indexer
if row_idx is None:
row_idx = np.arange(arr.shape[0], dtype=np.int64)
else:
row_idx = ensure_int64(row_idx)
if col_idx is None:
col_idx = np.arange(arr.shape[1], dtype=np.int64)
else:
col_idx = ensure_int64(col_idx)
indexer = row_idx, col_idx
if not allow_fill:
dtype, fill_value = arr.dtype, arr.dtype.type()
mask_info = None, False
else:
# check for promotion based on types only (do this first because
# it's faster than computing a mask)
dtype, fill_value = maybe_promote(arr.dtype, fill_value)
if dtype != arr.dtype and (out is None or out.dtype != dtype):
# check if promotion is actually required based on indexer
if mask_info is not None:
(row_mask, col_mask), (row_needs, col_needs) = mask_info
else:
row_mask = row_idx == -1
col_mask = col_idx == -1
row_needs = row_mask.any()
col_needs = col_mask.any()
mask_info = (row_mask, col_mask), (row_needs, col_needs)
if row_needs or col_needs:
if out is not None and out.dtype != dtype:
raise TypeError('Incompatible type for fill_value')
else:
# if not, then depromote, set fill_value to dummy
# (it won't be used but we don't want the cython code
# to crash when trying to cast it to dtype)
dtype, fill_value = arr.dtype, arr.dtype.type()
# at this point, it's guaranteed that dtype can hold both the arr values
# and the fill_value
if out is None:
out_shape = len(row_idx), len(col_idx)
out = np.empty(out_shape, dtype=dtype)
func = _take_2d_multi_dict.get((arr.dtype.name, out.dtype.name), None)
if func is None and arr.dtype != out.dtype:
func = _take_2d_multi_dict.get((out.dtype.name, out.dtype.name), None)
if func is not None:
func = _convert_wrapper(func, out.dtype)
if func is None:
def func(arr, indexer, out, fill_value=np.nan):
_take_2d_multi_object(arr, indexer, out, fill_value=fill_value,
mask_info=mask_info)
func(arr, indexer, out=out, fill_value=fill_value)
return out
# ---- #
# diff #
# ---- #
_diff_special = {
'float64': algos.diff_2d_float64,
'float32': algos.diff_2d_float32,
'int64': algos.diff_2d_int64,
'int32': algos.diff_2d_int32,
'int16': algos.diff_2d_int16,
'int8': algos.diff_2d_int8,
}
def diff(arr, n, axis=0):
"""
difference of n between self,
analogous to s-s.shift(n)
Parameters
----------
arr : ndarray
n : int
number of periods
axis : int
axis to shift on
Returns
-------
shifted
"""
n = int(n)
na = np.nan
dtype = arr.dtype
is_timedelta = False
if needs_i8_conversion(arr):
dtype = np.float64
arr = arr.view('i8')
na = iNaT
is_timedelta = True
elif is_bool_dtype(dtype):
dtype = np.object_
elif is_integer_dtype(dtype):
dtype = np.float64
dtype = np.dtype(dtype)
out_arr = np.empty(arr.shape, dtype=dtype)
na_indexer = [slice(None)] * arr.ndim
na_indexer[axis] = slice(None, n) if n >= 0 else slice(n, None)
out_arr[tuple(na_indexer)] = na
if arr.ndim == 2 and arr.dtype.name in _diff_special:
f = _diff_special[arr.dtype.name]
f(arr, out_arr, n, axis)
else:
res_indexer = [slice(None)] * arr.ndim
res_indexer[axis] = slice(n, None) if n >= 0 else slice(None, n)
res_indexer = tuple(res_indexer)
lag_indexer = [slice(None)] * arr.ndim
lag_indexer[axis] = slice(None, -n) if n > 0 else slice(-n, None)
lag_indexer = tuple(lag_indexer)
# need to make sure that we account for na for datelike/timedelta
# we don't actually want to subtract these i8 numbers
if is_timedelta:
res = arr[res_indexer]
lag = arr[lag_indexer]
mask = (arr[res_indexer] == na) | (arr[lag_indexer] == na)
if mask.any():
res = res.copy()
res[mask] = 0
lag = lag.copy()
lag[mask] = 0
result = res - lag
result[mask] = na
out_arr[res_indexer] = result
else:
out_arr[res_indexer] = arr[res_indexer] - arr[lag_indexer]
if is_timedelta:
from pandas import TimedeltaIndex
out_arr = TimedeltaIndex(out_arr.ravel().astype('int64')).asi8.reshape(
out_arr.shape).astype('timedelta64[ns]')
return out_arr
|
the-stack_0_7766 | def biggest_palindrome(digits):
"""
Finds the largest palindrome from the product of
numbers with `digits` digits each.
:param digits:
:return: the palindromic number
"""
bound_1 = 10 ** (digits - 1)
bound_2 = 10 ** digits
palindromes = []
for i in range(bound_1, bound_2):
for j in range(i, bound_2):
if str(i * j) == str(i * j)[::-1]:
palindromes.append(i * j)
return max(palindromes)
if __name__ == "__main__":
print(biggest_palindrome(3))
|
the-stack_0_7767 | #
# Copyright (c) 2019-2021, ETH Zurich. All rights reserved.
#
# Please, refer to the LICENSE file in the root directory.
# SPDX-License-Identifier: BSD-3-Clause
#
from flask import Flask, jsonify, request, g
import requests
from logging.handlers import TimedRotatingFileHandler
import logging
import multiprocessing as mp
# common modules
from cscs_api_common import check_auth_header, get_boolean_var, LogRequestFormatter, get_username
import paramiko
import socket
import os
from flask_opentracing import FlaskTracing
from jaeger_client import Config
import opentracing
AUTH_HEADER_NAME = 'Authorization'
SYSTEMS_PUBLIC = os.environ.get("F7T_SYSTEMS_PUBLIC").strip('\'"').split(";")
# ; separated for system (related with SYSTEMS_PUBLIC length, and for each filesystem mounted inside each system, separated with ":")
# example: let's suppose SYSTEMS_PUBLIC="cluster1;cluster2", cluster1 has "/fs-c1-1" and "/fs-c1-2", and cluster2 has mounted "/fs-c2-1":
# FILESYSTEMS = "/fs-c1-1,/fs-c1-2;fs-c2-1"
FILESYSTEMS = os.environ.get("F7T_FILESYSTEMS").strip('\'"').split(";")
SERVICES = os.environ.get("F7T_STATUS_SERVICES").strip('\'"').split(";") # ; separated service names
SYSTEMS = os.environ.get("F7T_STATUS_SYSTEMS").strip('\'"').split(";") # ; separated systems names
STATUS_PORT = os.environ.get("F7T_STATUS_PORT", 5000)
UTILITIES_URL = os.environ.get("F7T_UTILITIES_URL","")
SERVICES_DICT = {}
### SSL parameters
USE_SSL = get_boolean_var(os.environ.get("F7T_USE_SSL", False))
SSL_CRT = os.environ.get("F7T_SSL_CRT", "")
SSL_KEY = os.environ.get("F7T_SSL_KEY", "")
### parameters
UTILITIES_MAX_FILE_SIZE = os.environ.get("F7T_UTILITIES_MAX_FILE_SIZE")
UTILITIES_TIMEOUT = os.environ.get("F7T_UTILITIES_TIMEOUT")
STORAGE_TEMPURL_EXP_TIME = os.environ.get("F7T_STORAGE_TEMPURL_EXP_TIME")
STORAGE_MAX_FILE_SIZE = os.environ.get("F7T_STORAGE_MAX_FILE_SIZE")
OBJECT_STORAGE=os.environ.get("F7T_OBJECT_STORAGE")
TRACER_HEADER = "uber-trace-id"
# debug on console
debug = get_boolean_var(os.environ.get("F7T_DEBUG_MODE", False))
app = Flask(__name__)
JAEGER_AGENT = os.environ.get("F7T_JAEGER_AGENT", "").strip('\'"')
if JAEGER_AGENT != "":
config = Config(
config={'sampler': {'type': 'const', 'param': 1 },
'local_agent': {'reporting_host': JAEGER_AGENT, 'reporting_port': 6831 },
'logging': True,
'reporter_batch_size': 1},
service_name = "status")
jaeger_tracer = config.initialize_tracer()
tracing = FlaskTracing(jaeger_tracer, True, app)
else:
jaeger_tracer = None
tracing = None
def get_tracing_headers(req):
"""
receives a requests object, returns headers suitable for RPC and ID for logging
"""
new_headers = {}
if JAEGER_AGENT != "":
try:
jaeger_tracer.inject(tracing.get_span(req), opentracing.Format.TEXT_MAP, new_headers)
except Exception as e:
app.logger.error(e)
new_headers[AUTH_HEADER_NAME] = req.headers[AUTH_HEADER_NAME]
ID = new_headers.get(TRACER_HEADER, '')
return new_headers, ID
def set_services():
for servicename in SERVICES:
URL_ENV_VAR = f"F7T_{servicename.upper()}_URL"
serviceurl = os.environ.get(URL_ENV_VAR)
if serviceurl:
SERVICES_DICT[servicename] = serviceurl
# test individual service function
def test_service(servicename, status_list, trace_header=None):
app.logger.info(f"Testing {servicename} microservice status")
try:
serviceurl = SERVICES_DICT[servicename]
#timeout set to 5 seconds
req = requests.get(f"{serviceurl}/status", headers=trace_header, timeout=5, verify=(SSL_CRT if USE_SSL else False))
# if status_code is 200 OK:
if req.status_code == 200:
status_list.append({"status": 0, "service": servicename})
return
except KeyError:
status_list.append({"status":-1, "service":servicename})
return
# connection errors: server down
except requests.ConnectionError as e:
app.logger.error(type(e))
app.logger.error(e)
status_list.append( {"status": -2, "service": servicename} )
return
except requests.exceptions.InvalidSchema as e:
logging.error(e, exc_info=True)
app.logger.error(type(e))
app.logger.error(e.errno)
app.logger.error(e.strerror)
app.logger.error(e)
status_list.append( {"status": -2, "service": servicename})
return
# another status_code means server is reached but flask is not functional
status_list.append( {"status":-1, "service":servicename} )
# test individual system function
def test_system(machinename, headers, status_list=[]):
app.logger.info(f"Testing {machinename} system status")
if machinename not in SYSTEMS_PUBLIC:
status_list.append( {"status": -3, "system": machinename} )
return
for i in range(len(SYSTEMS_PUBLIC)):
if SYSTEMS_PUBLIC[i] == machinename:
machine = SYSTEMS[i]
filesystems = FILESYSTEMS[i]
break
# try to connect (unsuccesfully) with dummy user and pwd, catching SSH exception
try:
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ipaddr = machine.split(':')
host = ipaddr[0]
if len(ipaddr) == 1:
port = 22
else:
port = int(ipaddr[1])
client.connect(hostname=host, port=port,
username="dummycscs", password="dummycscs",
timeout=10)
except paramiko.ssh_exception.AuthenticationException as e:
# host up and SSH working, but returns (with reasons) authentication error
app.logger.error(type(e))
app.logger.error(e)
## TESTING FILESYSTEMS
headers["X-Machine-Name"] = machinename
username = get_username(headers[AUTH_HEADER_NAME])
for fs in filesystems.split(","):
r = requests.get(f"{UTILITIES_URL}/ls",
params={"targetPath":f"{fs}/{username}"},
headers=headers,
verify=(SSL_CRT if USE_SSL else False))
if not r.ok:
app.logger.error("Status: -4")
status_list.append({"status": -4, "system": machinename, "filesystem": fs})
return
status_list.append({"status": 0, "system": machinename})
except paramiko.ssh_exception.NoValidConnectionsError as e:
# host up but SSH not working
app.logger.error(type(e))
app.logger.error(e)
app.logger.error(e.strerror)
app.logger.error(e.errno)
app.logger.error(e.errors)
status_list.append({"status": -1, "system": machinename})
except socket.gaierror as e:
# system down
app.logger.error(type(e))
app.logger.error(e)
app.logger.error(e.strerror)
app.logger.error(e.errno)
status_list.append({"status": -2, "system": machinename})
except Exception as e:
app.logger.error(type(e))
app.logger.error(e)
status_list.append({"status": -2, "system": machinename})
finally:
client.close()
return
# get service information about a particular servicename
@app.route("/systems/<machinename>", methods=["GET"])
@check_auth_header
def status_system(machinename):
[headers, ID] = get_tracing_headers(request)
status_list = []
test_system(machinename,headers,status_list)
# possible responses:
# 0: host up and SSH running
# -1: host up but no SSH running
# -2: host down
# -3: host not in the list (does not exist)
# -4: host up but Filesystem not ready
status = status_list[0]["status"]
if status == -4:
filesystem = status_list[0]["filesystem"]
out={"system":machinename, "status":"not available", "description": f"Filesystem {filesystem} is not available"}
return jsonify(description="Filesystem is not available.", out=out), 200
if status == -3:
return jsonify(description="System does not exists."), 404
if status == -2:
out={"system":machinename, "status":"not available", "description":"System down"}
return jsonify(description="System information", out=out), 200
if status == -1:
out={"system":machinename, "status":"not available", "description":"System does not accept connections"}
return jsonify(description="System information", out=out), 200
out = {"system": machinename, "status": "available", "description": "System ready"}
return jsonify(description="System information", out=out), 200
@app.route("/systems",methods=["GET"])
@check_auth_header
def status_systems():
[headers, ID] = get_tracing_headers(request)
# resp_list list to fill with responses from each service
resp_list = []
# list of processes
process_list = []
# memory manager
mgr = mp.Manager()
# create cross memory (between processes) list
status_list = mgr.list()
# for each servicename, creates a process
for machinename in SYSTEMS_PUBLIC:
p = mp.Process(target=test_system, args=(machinename, headers, status_list))
process_list.append(p)
p.start()
# wait for all processes to end
for p in process_list:
p.join()
for res in status_list:
status = res["status"]
system = res["system"]
# possible responses:
# 0: host up and SSH running
# -1: host up but no SSH running
# -2: host down
#
if status == -4:
filesystem = status_list[0]["filesystem"]
ret_dict={"system":machinename, "status":"not available", "description": f"Filesystem {filesystem} is not available"}
elif status == -2:
ret_dict = {"system": system, "status": "not available", "description": "System down"}
elif status == -1:
ret_dict = {"system": system, "status": "not available",
"description": "System does not accept connections"}
else:
ret_dict = {"system": system, "status": "available", "description": "System ready"}
resp_list.append(ret_dict)
#
return jsonify(description="List of systems with status and description.",
out=resp_list), 200
# get service information about a particular servicename
@app.route("/services/<servicename>",methods=["GET"])
@check_auth_header
def status_service(servicename):
if servicename not in SERVICES_DICT.keys():
return jsonify(description="Service does not exists"), 404
# needs a list to be pass as reference
# in compatibility with test all services
status_list = []
[headers, ID] = get_tracing_headers(request)
test_service(servicename, status_list, headers)
# as it's just 1 service tested, 0 index is always valid
serv_status = status_list[0]["status"]
if serv_status == -2:
status = "not available"
description = "server down"
return jsonify(service=servicename,status=status,description=description), 200
elif serv_status == -1:
status = "not available"
description = "server up, flask down"
return jsonify(service=servicename,status=status,description=description), 200
status="available"
description="server up & flask running"
return jsonify(service=servicename,status=status,description=description), 200
# get service information about all services
@app.route("/services", methods=["GET"])
@check_auth_header
def status_services():
# resp_list list to fill with responses from each service
resp_list=[]
# list of processes
process_list = []
# memory manager
mgr = mp.Manager()
# create cross memory (between processes) list
status_list = mgr.list()
[headers, ID] = get_tracing_headers(request)
# for each servicename, creates a process
for servicename,serviceurl in SERVICES_DICT.items():
p = mp.Process(target=test_service, args=(servicename, status_list, headers))
process_list.append(p)
p.start()
# wait for all processes to end
for p in process_list:
p.join()
# iterate between status_list
for res in status_list:
retval = res["status"]
servicename = res["service"]
if retval == -2:
status = "not available"
description = "server down"
elif retval == -1:
status = "not available"
description = "server up, flask down"
else:
status = "available"
description = "server up & flask running"
resp_dict={"service":servicename,
"status" :status,
"description":description}
resp_list.append(resp_dict)
return jsonify(description="List of services with status and description.",
out=resp_list), 200
# get service information about all services
@app.route("/parameters", methods=["GET"])
@check_auth_header
def parameters():
# { <microservice>: [ "name": <parameter>, "value": <value>, "unit": <unit> } , ... ] }
systems = SYSTEMS_PUBLIC # list of systems
filesystems = FILESYSTEMS # list of filesystems, position related with SYSTEMS_PUBLIC
fs_list = []
for i in range(len(systems)):
mounted = filesystems[i].split(",")
fs_list.append({"system": systems[i], "mounted": mounted})
parameters_list = { "utilities": [
{"name": "UTILITIES_MAX_FILE_SIZE", "value": UTILITIES_MAX_FILE_SIZE, "unit": "MB" },
{"name" : "UTILITIES_TIMEOUT", "value": UTILITIES_TIMEOUT, "unit": "seconds"}
] ,
"storage": [
{"name":"OBJECT_STORAGE" ,"value":OBJECT_STORAGE, "unit": ""},
{"name":"STORAGE_TEMPURL_EXP_TIME", "value":STORAGE_TEMPURL_EXP_TIME, "unit": "seconds"},
{"name":"STORAGE_MAX_FILE_SIZE", "value":STORAGE_MAX_FILE_SIZE, "unit": "MB"},
{"name":"FILESYSTEMS", "value":fs_list, "unit": ""}
]
}
return jsonify(description="Firecrest's parameters", out=parameters_list), 200
@app.before_request
def f_before_request():
new_headers = {}
if JAEGER_AGENT != "":
try:
jaeger_tracer.inject(tracing.get_span(request), opentracing.Format.TEXT_MAP, new_headers)
except Exception as e:
logging.error(e)
g.TID = new_headers.get(TRACER_HEADER, '')
@app.after_request
def after_request(response):
# LogRequestFormatetter is used, this messages will get time, thread, etc
logger.info('%s %s %s %s %s', request.remote_addr, request.method, request.scheme, request.full_path, response.status)
return response
if __name__ == "__main__":
LOG_PATH = os.environ.get("F7T_LOG_PATH", '/var/log').strip('\'"')
# timed rotation: 1 (interval) rotation per day (when="D")
logHandler=TimedRotatingFileHandler(f'{LOG_PATH}/status.log', when='D', interval=1)
logFormatter = LogRequestFormatter('%(asctime)s,%(msecs)d %(thread)s [%(TID)s] %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s',
'%Y-%m-%dT%H:%M:%S')
logHandler.setFormatter(logFormatter)
# get app log (Flask+werkzeug+python)
logger = logging.getLogger()
# set handler to logger
logger.addHandler(logHandler)
logging.getLogger().setLevel(logging.INFO)
# create services list
set_services()
if USE_SSL:
app.run(debug=debug, host='0.0.0.0', port=STATUS_PORT, ssl_context=(SSL_CRT, SSL_KEY))
else:
app.run(debug=debug, host='0.0.0.0', port=STATUS_PORT)
|
the-stack_0_7769 | import datetime
import unittest
from unittest import mock
from django.contrib.auth.models import AnonymousUser, User
from django.core.exceptions import ValidationError
from django.core.paginator import Paginator
from django.http import Http404, HttpRequest, QueryDict
from django.test import (
RequestFactory, TestCase as DjangoTestCase, override_settings
)
from wagtail.core.models import Site
from model_bakery import baker
from core.testutils.mock_cache_backend import CACHE_PURGED_URLS
from regulations3k.documents import SectionParagraphDocument
from regulations3k.models.django import (
EffectiveVersion, Part, Section, SectionParagraph, Subpart,
effective_version_saved, section_saved, sortable_label, validate_label
)
from regulations3k.models.pages import (
RegulationLandingPage, RegulationPage, RegulationsSearchPage,
get_next_section, get_previous_section, get_secondary_nav_items,
get_section_url, validate_num_results, validate_order,
validate_page_number, validate_regs_list
)
class RegModelTests(DjangoTestCase):
def setUp(self):
from v1.models import HomePage
self.factory = RequestFactory()
self.superuser = User.objects.create_superuser(
username='supertest', password='test', email='[email protected]'
)
self.site = Site.objects.get(is_default_site=True)
self.ROOT_PAGE = HomePage.objects.get(slug='cfgov')
self.landing_page = RegulationLandingPage(
title='Reg Landing', slug='reg-landing')
self.ROOT_PAGE.add_child(instance=self.landing_page)
self.part_1002 = baker.make(
Part,
cfr_title_number=12,
part_number='1002',
title='Equal Credit Opportunity Act',
short_name='Regulation B',
chapter='X'
)
self.part_1030 = baker.make(
Part,
cfr_title_number=12,
part_number='1030',
title='Truth In Savings',
short_name='Regulation DD',
chapter='X'
)
self.effective_version = baker.make(
EffectiveVersion,
effective_date=datetime.date(2014, 1, 18),
part=self.part_1002
)
self.old_effective_version = baker.make(
EffectiveVersion,
effective_date=datetime.date(2011, 1, 1),
part=self.part_1002,
)
self.draft_effective_version = baker.make(
EffectiveVersion,
effective_date=datetime.date(2020, 1, 1),
part=self.part_1002,
draft=True,
)
self.subpart = baker.make(
Subpart,
label='Subpart General',
title='Subpart A - General',
subpart_type=Subpart.BODY,
version=self.effective_version
)
self.subpart_appendices = baker.make(
Subpart,
label='Appendices',
title='Appendices',
subpart_type=Subpart.APPENDIX,
version=self.effective_version
)
self.subpart_interps = baker.make(
Subpart,
label='Official Interpretations',
title='Supplement I to Part 1002',
subpart_type=Subpart.INTERPRETATION,
version=self.effective_version
)
self.subpart_orphan = baker.make(
Subpart,
label='General Mistake',
title='An orphan subpart with no sections for testing',
version=self.effective_version
)
self.old_subpart = baker.make(
Subpart,
label='Subpart General',
title='General',
subpart_type=Subpart.BODY,
version=self.old_effective_version
)
self.section_num4 = baker.make(
Section,
label='4',
title='\xa7\xa01002.4 General rules.',
contents=(
'{a}\n(a) Regdown paragraph a.\n'
'{b}\n(b) Paragraph b\n'
'\nsee(4-b-Interp)\n'
'{c}\n(c) Paragraph c.\n'
'{c-1}\n \n'
'{d}\n(1) General rule. A creditor that provides in writing.\n'
),
subpart=self.subpart,
)
self.graph_to_keep = baker.make(
SectionParagraph,
section=self.section_num4,
paragraph_id='d',
paragraph=(
'(1) General rule. A creditor that provides in writing.')
)
self.graph_to_delete = baker.make(
SectionParagraph,
section=self.section_num4,
paragraph_id='x',
paragraph='(x) Non-existent graph that should get deleted.'
)
self.section_num15 = baker.make(
Section,
label='15',
title='\xa7\xa01002.15 Rules concerning requests for information.',
contents='regdown content.',
subpart=self.subpart,
)
self.section_alpha = baker.make(
Section,
label='A',
title=('Appendix A to Part 1002-Federal Agencies '
'To Be Listed in Adverse Action Notices'),
contents='regdown content.',
subpart=self.subpart_appendices,
)
self.section_beta = baker.make(
Section,
label='B',
title=('Appendix B to Part 1002-Errata'),
contents='regdown content.',
subpart=self.subpart_appendices,
)
self.section_interps = baker.make(
Section,
label='Interp-A',
title=('Official interpretations for Appendix A to Part 1002'),
contents='interp content.',
subpart=self.subpart_interps,
)
self.old_section_num4 = baker.make(
Section,
label='4',
title='\xa7\xa01002.4 General rules.',
contents='regdown contents',
subpart=self.old_subpart,
)
self.reg_page = RegulationPage(
regulation=self.part_1002,
title='Reg B',
slug='1002')
self.reg_search_page = RegulationsSearchPage(
title="Search regulations",
slug='search-regulations')
self.landing_page.add_child(instance=self.reg_page)
self.landing_page.add_child(instance=self.reg_search_page)
self.reg_page.save()
self.reg_search_page.save()
CACHE_PURGED_URLS[:] = []
def get_request(self, path='', data={}):
request = self.factory.get(path, data=data)
request.user = AnonymousUser()
return request
def test_part_string_method(self):
self.assertEqual(
self.part_1002.__str__(),
'12 CFR Part 1002 (Regulation B)'
)
def test_part_cfr_title_method(self):
part = self.part_1002
self.assertEqual(
part.cfr_title,
"{} CFR Part {} ({})".format(
part.cfr_title_number,
part.part_number,
part.short_name))
def test_subpart_string_method(self):
self.assertEqual(
self.subpart.__str__(),
'12 CFR Part 1002 (Regulation B), Effective on 2014-01-18, '
'Subpart A - General'
)
def test_section_string_method(self):
self.assertEqual(
self.section_num4.__str__(),
'12 CFR Part 1002 (Regulation B), Effective on 2014-01-18, '
'Subpart A - General, \xa7\xa01002.4 General rules.'
)
def test_section_export_graphs(self):
test_counts = self.section_num4.extract_graphs()
self.assertEqual(test_counts['section'], "1002-4")
self.assertEqual(test_counts['created'], 4)
self.assertEqual(test_counts['deleted'], 1)
self.assertEqual(test_counts['kept'], 1)
def test_section_paragraph_str(self):
self.assertEqual(
self.graph_to_keep.__str__(),
"Section 1002-4 paragraph d")
def test_subpart_headings(self):
for each in Subpart.objects.all():
self.assertEqual(each.subpart_heading, '')
def test_type(self):
self.assertEqual(self.section_num15.subpart.type, 'Regulation Body')
self.assertEqual(self.section_alpha.subpart.type, 'Appendix')
self.assertEqual(self.section_interps.subpart.type, 'Interpretation')
def test_effective_version_string_method(self):
self.assertEqual(
self.effective_version.__str__(),
'12 CFR Part 1002 (Regulation B), Effective on 2014-01-18'
)
def test_live_version_true(self):
self.assertTrue(self.effective_version.live_version)
def test_status_is_live(self):
self.assertEqual(self.effective_version.status, 'LIVE')
def test_status_is_draft(self):
self.effective_version.draft = True
self.effective_version.save()
self.assertEqual(self.effective_version.status, 'Unapproved draft')
self.effective_version.draft = False
self.effective_version.effective_date = (
datetime.datetime.today().date() + datetime.timedelta(days=5))
self.effective_version.save()
self.assertEqual(self.effective_version.status, 'Future version')
self.effective_version.effective_date = datetime.date(2014, 1, 18)
self.effective_version.save()
def test_status_is_previous_version(self):
self.assertEqual(self.old_effective_version.status, 'Previous version')
def test_landing_page_get_context(self):
test_context = self.landing_page.get_context(self.get_request())
self.assertIn('get_secondary_nav_items', test_context)
def test_search_page_get_template(self):
self.assertEqual(
self.reg_search_page.get_template(self.get_request()),
'regulations3k/search-regulations.html')
def test_search_results_page_get_template(self):
request = self.get_request(data={'partial': 'true'})
self.assertEqual(
self.reg_search_page.get_template(request),
'regulations3k/search-regulations-results.html')
# Should return partial results even if no value is provided
request = self.get_request(data={'partial': ''})
self.assertEqual(
self.reg_search_page.get_template(request),
'regulations3k/search-regulations-results.html')
def test_routable_reg_page_get_context(self):
test_context = self.reg_page.get_context(self.get_request())
self.assertEqual(
test_context['regulation'],
self.reg_page.regulation)
def test_get_secondary_nav_items(self):
request = self.get_request()
request.path = '/regulations/1002/4/'
sections = list(self.reg_page.get_section_query(request).all())
test_nav_items = get_secondary_nav_items(
request, self.reg_page, sections
)[0]
self.assertEqual(
len(test_nav_items),
Subpart.objects.filter(
version=self.effective_version
).exclude(
sections=None
).count()
)
def test_get_section_url(self):
url = get_section_url(self.reg_page, self.section_num4)
self.assertEqual(url, '/reg-landing/1002/4/')
def test_get_section_url_no_section(self):
url = get_section_url(self.reg_page, None)
self.assertIsNone(url)
def test_section_page_view(self):
response = self.client.get('/reg-landing/1002/4/')
self.assertEqual(response.status_code, 200)
def test_section_page_view_section_does_not_exist(self):
response = self.client.get('/reg-landing/1002/82/')
self.assertRedirects(
response,
'/reg-landing/1002/',
fetch_redirect_response=False
)
def test_section_page_view_section_does_not_exist_with_date(self):
response = self.client.get('/reg-landing/1002/2011-01-01/82/')
self.assertRedirects(
response,
'/reg-landing/1002/2011-01-01/',
fetch_redirect_response=False
)
def test_sortable_label(self):
self.assertEqual(sortable_label('1-A-Interp'), ('0001', 'A', 'interp'))
def test_render_interp(self):
result = self.reg_page.render_interp({}, 'some contents')
self.assertIn('some contents', result)
def test_render_interp_with_title(self):
result = self.reg_page.render_interp(
{},
'# A title\n\nsome contents'
)
self.assertIn('Official interpretation of A title', result)
self.assertIn('some contents', result)
def test_section_ranges(self):
self.assertEqual(self.subpart_orphan.section_range, '')
self.assertEqual(self.subpart_appendices.section_range, '')
self.assertEqual(self.subpart_interps.section_range, '')
self.assertEqual(
self.subpart.section_range,
'\xa7\xa01002.4\u2013\xa7\xa01002.15')
def test_section_title_content(self):
self.assertEqual(
self.section_num15.title_content,
'Rules concerning requests for information.')
def test_section_part(self):
self.assertEqual(self.section_num4.part, '1002')
def test_section_section_number(self):
self.assertEqual(self.section_num4.section_number, '4')
def test_section_numeric_label(self):
self.assertEqual(self.section_num4.numeric_label, '\xa7\xa01002.4')
def test_section_numeric_label_not_digits(self):
self.assertEqual(self.section_alpha.numeric_label, '')
def test_section_title_content_not_digits(self):
self.assertEqual(
self.section_beta.title_content,
'Appendix B to Part 1002-Errata'
)
@mock.patch.object(SectionParagraphDocument, 'search')
def test_routable_search_page_calls_elasticsearch(self, mock_search):
mock_hit = mock.Mock()
mock_hit.text = (
'i. Mortgage escrow accounts for collecting taxes',
'and property insurance premiums.')
mock_hit.title = 'Comment for 1030.2 - Definitions'
mock_hit.part = '1030'
mock_hit.date = datetime.datetime(2011, 12, 30, 0, 0)
mock_hit.section_order = 'interp-0002'
mock_hit.section_label = 'Interp-2'
mock_hit.short_name = 'Regulation DD'
mock_hit.paragraph_id = '2-a-Interp-2-i'
mock_hit.meta.highlight.text = ["<strong>Mortgage</strong> highlight"]
mock_search().query() \
.highlight().filter().sort() \
.__getitem__().execute.return_value = [mock_hit]
mock_count = mock.Mock(return_value=1)
mock_search().query().highlight().count = mock_count
mock_search().query() \
.highlight().filter().sort() \
.__getitem__().count = mock_count
response = self.client.get(
self.reg_search_page.url + self.reg_search_page.reverse_subpage(
'regulation_results_page'),
{'q': 'mortgage',
'regs': '1030',
'order': 'regulation',
'results': '50'})
self.assertEqual(mock_search.call_count, 4)
self.assertEqual(response.status_code, 200)
@mock.patch.object(SectionParagraphDocument, 'search')
def test_routable_search_page_handles_null_highlights(self, mock_search): # noqa: E501
mock_hit = mock.Mock()
mock_hit.text = (
'i. Mortgage escrow accounts for collecting',
' taxes and property insurance premiums.')
mock_hit.title = 'Comment for 1030.2 - Definitions'
mock_hit.part = '1030'
mock_hit.date = datetime.datetime(2011, 12, 30, 0, 0)
mock_hit.section_order = 'interp-0002'
mock_hit.section_label = 'Interp-2'
mock_hit.short_name = 'Regulation DD'
mock_hit.paragraph_id = '2-a-Interp-2-i'
mock_search().query() \
.highlight().filter().sort() \
.__getitem__().execute.return_value = [mock_hit]
mock_count = mock.Mock(return_value=1)
mock_search().query().highlight().count = mock_count
mock_search().query() \
.highlight().filter().sort() \
.__getitem__().count = mock_count
response = self.client.get(
self.reg_search_page.url +
self.reg_search_page.reverse_subpage(
'regulation_results_page'),
{'q': 'mortgage',
'regs': '1030',
'order': 'regulation',
'results': '50'})
self.assertEqual(mock_search.call_count, 4)
self.assertEqual(response.status_code, 200)
@mock.patch.object(SectionParagraphDocument, 'search')
def test_search_page_refuses_single_character_search_elasticsearch(self, mock_search): # noqa: E501
response = self.client.get(
self.reg_search_page.url +
self.reg_search_page.reverse_subpage(
'regulation_results_page'),
{'q': '%21', 'regs': '1002', 'order': 'regulation'})
self.assertEqual(mock_search.call_count, 0)
self.assertEqual(response.status_code, 200)
def test_get_breadcrumbs_section(self):
crumbs = self.reg_page.get_breadcrumbs(
self.get_request(),
section=self.section_num4
)
self.assertEqual(
crumbs,
[
{
'href': '/reg-landing/1002/',
'title': '12 CFR Part 1002 (Regulation B)'
},
]
)
@mock.patch('regulations3k.models.pages.requests.get')
def test_landing_page_recent_notices(self, mock_requests_get):
mock_response = mock.Mock()
mock_response.json.return_value = {'some': 'json'}
mock_response.status_code = 200
mock_requests_get.return_value = mock_response
response = self.client.get(
self.landing_page.url +
self.landing_page.reverse_subpage('recent_notices')
)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'{"some": "json"}')
@mock.patch('regulations3k.models.pages.requests.get')
def test_landing_page_recent_notices_error(self, mock_requests_get):
mock_response = mock.Mock()
mock_response.status_code = 500
mock_requests_get.return_value = mock_response
response = self.client.get(
self.landing_page.url +
self.landing_page.reverse_subpage('recent_notices')
)
self.assertEqual(response.status_code, 500)
def test_get_effective_version_not_draft(self):
request = self.get_request()
effective_version = self.reg_page.get_effective_version(
request, date_str='2014-01-18'
)
self.assertEqual(effective_version, self.effective_version)
def test_get_effective_version_draft_with_perm(self):
request = self.get_request()
request.user = self.superuser
effective_version = self.reg_page.get_effective_version(
request, date_str='2020-01-01'
)
self.assertEqual(effective_version, self.draft_effective_version)
def test_get_effective_version_draft_without_perm(self):
request = self.get_request()
with self.assertRaises(Http404):
self.reg_page.get_effective_version(
request, date_str='2020-01-01'
)
def test_get_effective_version_dne(self):
request = self.get_request()
with self.assertRaises(Http404):
self.reg_page.get_effective_version(
request, date_str='2050-01-01'
)
def test_index_page_with_effective_date(self):
response = self.client.get('/reg-landing/1002/2011-01-01/')
self.assertEqual(response.status_code, 200)
self.assertIn(b'This version is not the current regulation',
response.content)
self.assertIn(b'Jan. 1, 2011', response.content)
def test_index_page_without_effective_date(self):
response = self.client.get('/reg-landing/1002/')
self.assertEqual(response.status_code, 200)
self.assertIn(b'Most recently amended Jan. 18, 2014', response.content)
def test_section_page_with_effective_date(self):
response = self.client.get('/reg-landing/1002/2011-01-01/4/')
self.assertEqual(response.status_code, 200)
self.assertIn(b'This version is not the current regulation',
response.content)
def test_section_page_without_effective_date(self):
response = self.client.get('/reg-landing/1002/4/')
self.assertEqual(response.status_code, 200)
self.assertIn(b'This version is the current regulation',
response.content)
self.assertIn(b'Search this regulation', response.content)
def test_versions_page_view_without_section(self):
response = self.client.get('/reg-landing/1002/versions/')
self.assertEqual(response.status_code, 200)
self.assertIn(b'Jan. 18, 2014', response.content)
self.assertIn(b'(current regulation)', response.content)
self.assertIn(b'Jan. 1, 2011', response.content)
self.assertNotIn(b'Jan. 1, 2020', response.content)
def test_versions_page_view_with_section(self):
response = self.client.get('/reg-landing/1002/versions/4/')
self.assertEqual(response.status_code, 200)
self.assertIn(
b'href="/reg-landing/1002/2011-01-01/4/"',
response.content
)
def test_get_breadcrumbs_section_with_date(self):
crumbs = self.reg_page.get_breadcrumbs(
self.get_request(),
section=self.section_num4,
date_str='2011-01-01'
)
self.assertEqual(
crumbs,
[
{
'href': '/reg-landing/1002/2011-01-01/',
'title': '12 CFR Part 1002 (Regulation B)'
},
]
)
def test_effective_version_date_unique(self):
new_effective_version = baker.make(
EffectiveVersion,
effective_date=datetime.date(2020, 1, 1),
part=self.part_1002,
draft=True,
)
with self.assertRaises(ValidationError):
new_effective_version.validate_unique()
def test_get_urls_for_version(self):
urls = list(self.reg_page.get_urls_for_version(self.effective_version))
self.assertIn('http://localhost/reg-landing/1002/', urls)
self.assertIn('http://localhost/reg-landing/1002/4/', urls)
self.assertIn('http://localhost/reg-landing/1002/versions/', urls)
urls = list(self.reg_page.get_urls_for_version(
self.effective_version, section=self.section_num4))
self.assertEqual(['http://localhost/reg-landing/1002/4/'], urls)
urls = list(self.reg_page.get_urls_for_version(
self.old_effective_version))
self.assertIn('http://localhost/reg-landing/1002/2011-01-01/', urls)
self.assertIn('http://localhost/reg-landing/1002/2011-01-01/4/', urls)
@override_settings(WAGTAILFRONTENDCACHE={
'varnish': {
'BACKEND': 'core.testutils.mock_cache_backend.MockCacheBackend',
},
})
def test_effective_version_saved(self):
effective_version_saved(None, self.effective_version)
self.assertIn(
'http://localhost/reg-landing/1002/',
CACHE_PURGED_URLS
)
self.assertIn(
'http://localhost/reg-landing/1002/4/',
CACHE_PURGED_URLS
)
self.assertIn(
'http://localhost/reg-landing/1002/versions/',
CACHE_PURGED_URLS
)
@override_settings(WAGTAILFRONTENDCACHE={
'varnish': {
'BACKEND': 'core.testutils.mock_cache_backend.MockCacheBackend',
},
})
def test_section_saved(self):
section_saved(None, self.section_num4)
self.assertEqual(
CACHE_PURGED_URLS,
['http://localhost/reg-landing/1002/4/']
)
def test_reg_page_can_serve_draft_versions(self):
request = self.get_request()
request.served_by_wagtail_sharing = True
self.assertTrue(self.reg_page.can_serve_draft_versions(request))
def test_reg_page_num_versions_on_sharing(self):
request = self.get_request()
request.served_by_wagtail_sharing = True
test_context = self.reg_page.get_context(request)
self.assertEqual(test_context['num_versions'], 3)
def test_reg_page_num_versions_off_sharing(self):
test_context = self.reg_page.get_context(self.get_request())
self.assertEqual(test_context['num_versions'], 2)
def test_reg_page_next_version_none(self):
response = self.client.get('/reg-landing/1002/4/')
self.assertIsNone(response.context_data['next_version'])
def test_validate_order(self):
request = HttpRequest()
self.assertEqual(validate_order(request), 'relevance')
request.GET.update({'order': 'regulation'})
self.assertEqual(validate_order(request), 'regulation')
def test_reg_page_next_version(self):
response = self.client.get('/reg-landing/1002/2011-01-01/4/')
self.assertEqual(
response.context_data['next_version'],
self.effective_version
)
def test_validate_label(self):
with self.assertRaises(ValidationError):
validate_label('label with spaces')
with self.assertRaises(ValidationError):
validate_label('')
with self.assertRaises(ValidationError):
validate_label('-')
validate_label('a')
validate_label('a-good-label')
validate_label('Interp-2')
validate_label('ünicode-labels')
def test_section_label_uniqueness(self):
dup_section = baker.make(
Section,
label='4',
subpart=self.subpart,
)
with self.assertRaises(ValidationError):
dup_section.full_clean()
non_dup_section = baker.make(
Section,
label='4',
subpart=baker.make(
Subpart,
version=baker.make(
EffectiveVersion,
part=self.part_1030
),
),
)
non_dup_section.full_clean()
class SectionNavTests(unittest.TestCase):
def test_get_next_section(self):
section_list = ['1002.1', '1002.2']
current_index = 0
self.assertEqual(
get_next_section(section_list, current_index), '1002.2')
def test_get_next_section_none(self):
section_list = ['1002.1', '1002.2']
current_index = 1
self.assertIs(
get_next_section(section_list, current_index), None)
def test_get_previous_section(self):
section_list = ['1002.1', '1002.2']
current_index = 1
self.assertEqual(
get_previous_section(section_list, current_index), '1002.1')
def test_get_previous_section_none(self):
section_list = ['1002.1', '1002.2']
current_index = 0
self.assertIs(
get_previous_section(section_list, current_index), None)
def test_validate_page_number(self):
paginator = Paginator([{'fake': 'results'}] * 30, 25)
request = HttpRequest()
self.assertEqual(validate_page_number(request, paginator), 1)
request.GET.update({'page': '2'})
self.assertEqual(validate_page_number(request, paginator), 2)
request = HttpRequest()
request.GET.update({'page': '1000'})
self.assertEqual(validate_page_number(request, paginator), 1)
request = HttpRequest()
request.GET.update({'page': '<script>Boo</script>'})
self.assertEqual(validate_page_number(request, paginator), 1)
def test_validate_num_results(self):
request = HttpRequest()
self.assertEqual(validate_num_results(request), 25)
request.GET.update({'results': '50'})
self.assertEqual(validate_num_results(request), 50)
request = HttpRequest()
request.GET.update({'results': '100'})
self.assertEqual(validate_num_results(request), 100)
request = HttpRequest()
request.GET.update({'results': '25'})
self.assertEqual(validate_num_results(request), 25)
request = HttpRequest()
request.GET.update({'results': '<script>'})
self.assertEqual(validate_num_results(request), 25)
request = HttpRequest()
request.GET.update({'results': '10'})
self.assertEqual(validate_num_results(request), 25)
def test_validate_regs_input_list(self):
request = HttpRequest()
request.GET.update(QueryDict('regs=one®s=2®s=three33'))
self.assertEqual(validate_regs_list(request), ['one', '2', 'three33'])
request2 = HttpRequest()
request2.GET.update(QueryDict('regs=1@#$5567'))
self.assertEqual(validate_regs_list(request2), [])
request3 = HttpRequest()
request3.GET.update(QueryDict('regs=one®s=734^*^®s=2'))
self.assertEqual(validate_regs_list(request3), ['one', '2'])
request4 = HttpRequest()
self.assertEqual(validate_regs_list(request4), [])
|
the-stack_0_7770 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# imm documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
sp = os.path.abspath('../imm')
#print(">>>> source path: %s" % sp)
sys.path.insert(0, sp)
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, project_root)
#print(sys.path)
import imm
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
'sphinx.ext.todo',
'sphinx.ext.doctest',
'sphinx.ext.autosummary',
'sphinx.ext.coverage',
'alabaster'
]
#print(">>>> EXTENSIONS: %s" % extensions)
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'IMM'
copyright = u'2016, E.R. Uber'
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = imm.__version__
# The full version, including alpha/beta/rc tags.
release = imm.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to
# some non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built
# documents.
#keep_warnings = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# html_theme = 'default'
# html_theme = 'agogo'
# html_theme = 'nature'
# html_theme = 'traditional'
import alabaster
html_theme_path = [alabaster.get_path()]
html_theme = 'alabaster'
html_sidebars = {
'**': [
'about.html',
'navigation.html',
'relations.html',
'searchbox.html',
'donate.html',
]
}
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
html_theme_options = {
'logo': 'logo.png',
'logo_name': 'false',
'github_user': 'eruber',
'github_repo': 'imm',
'github_button': 'true',
'github_banner': 'true',
'travis_button': 'true',
'codecov_button': 'true',
'show_powered_by': 'true',
#'show_related': 'true',
#'pypi_button': 'true', #ERU
#'rtd_button': 'true', #ERU
}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as
# html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the
# top of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon
# of the docs. This file should be a Windows icon file (.ico) being
# 16x16 or 32x32 pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets)
# here, relative to this directory. They are copied after the builtin
# static files, so a file named "default.css" will overwrite the builtin
# "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names
# to template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer.
# Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer.
# Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages
# will contain a <link> tag referring to it. The value of this option
# must be the base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'immdoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'imm.tex',
u'imm Documentation',
u'E.R. Uber', 'manual'),
]
# The name of an image file (relative to this directory) to place at
# the top of the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings
# are parts, not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'imm',
u'imm Documentation',
[u'E.R. Uber'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'imm',
u'imm Documentation',
u'E.R. Uber',
'imm',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
the-stack_0_7771 | # Lint as: python2, python3
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tfx.components.transform.executor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
import tensorflow as tf
import tensorflow_transform as tft
from tfx import types
from tfx.components.testdata.module_file import transform_module
from tfx.components.transform import executor
from tfx.types import standard_artifacts
# TODO(b/122478841): Add more detailed tests.
class ExecutorTest(tf.test.TestCase):
def _get_output_data_dir(self, sub_dir=None):
test_dir = self._testMethodName
if sub_dir is not None:
test_dir = os.path.join(test_dir, sub_dir)
return os.path.join(
os.environ.get('TEST_UNDECLARED_OUTPUTS_DIR', self.get_temp_dir()),
test_dir)
def _make_base_do_params(self, source_data_dir, output_data_dir):
# Create input dict.
train_artifact = standard_artifacts.Examples(split='train')
train_artifact.uri = os.path.join(source_data_dir, 'csv_example_gen/train/')
eval_artifact = standard_artifacts.Examples(split='eval')
eval_artifact.uri = os.path.join(source_data_dir, 'csv_example_gen/eval/')
schema_artifact = standard_artifacts.Schema()
schema_artifact.uri = os.path.join(source_data_dir, 'schema_gen/')
self._input_dict = {
'input_data': [train_artifact, eval_artifact],
'schema': [schema_artifact],
}
# Create output dict.
self._transformed_output = standard_artifacts.TransformGraph()
self._transformed_output.uri = os.path.join(output_data_dir,
'transformed_output')
self._transformed_train_examples = standard_artifacts.Examples(
split='train')
self._transformed_train_examples.uri = os.path.join(output_data_dir,
'train')
self._transformed_eval_examples = standard_artifacts.Examples(split='eval')
self._transformed_eval_examples.uri = os.path.join(output_data_dir, 'eval')
temp_path_output = types.Artifact('TempPath')
temp_path_output.uri = tempfile.mkdtemp()
self._output_dict = {
'transform_output': [self._transformed_output],
'transformed_examples': [
self._transformed_train_examples, self._transformed_eval_examples
],
'temp_path': [temp_path_output],
}
# Create exec properties skeleton.
self._exec_properties = {}
def setUp(self):
super(ExecutorTest, self).setUp()
self._source_data_dir = os.path.join(
os.path.dirname(os.path.dirname(__file__)), 'testdata')
self._output_data_dir = self._get_output_data_dir()
self._make_base_do_params(self._source_data_dir, self._output_data_dir)
# Create exec properties skeleton.
self._module_file = os.path.join(self._source_data_dir,
'module_file/transform_module.py')
self._preprocessing_fn = '%s.%s' % (
transform_module.preprocessing_fn.__module__,
transform_module.preprocessing_fn.__name__)
# Executor for test.
self._transform_executor = executor.Executor()
def _verify_transform_outputs(self):
self.assertNotEqual(
0, len(tf.io.gfile.listdir(self._transformed_train_examples.uri)))
self.assertNotEqual(
0, len(tf.io.gfile.listdir(self._transformed_eval_examples.uri)))
path_to_saved_model = os.path.join(
self._transformed_output.uri, tft.TFTransformOutput.TRANSFORM_FN_DIR,
tf.saved_model.SAVED_MODEL_FILENAME_PB)
self.assertTrue(tf.io.gfile.exists(path_to_saved_model))
def testDoWithModuleFile(self):
self._exec_properties['module_file'] = self._module_file
self._transform_executor.Do(self._input_dict, self._output_dict,
self._exec_properties)
self._verify_transform_outputs()
def testDoWithPreprocessingFn(self):
self._exec_properties['preprocessing_fn'] = self._preprocessing_fn
self._transform_executor.Do(self._input_dict, self._output_dict,
self._exec_properties)
self._verify_transform_outputs()
def testDoWithNoPreprocessingFn(self):
with self.assertRaises(ValueError):
self._transform_executor.Do(self._input_dict, self._output_dict,
self._exec_properties)
def testDoWithDuplicatePreprocessingFn(self):
self._exec_properties['module_file'] = self._module_file
self._exec_properties['preprocessing_fn'] = self._preprocessing_fn
with self.assertRaises(ValueError):
self._transform_executor.Do(self._input_dict, self._output_dict,
self._exec_properties)
def testDoWithCache(self):
# First run that creates cache.
output_cache_artifact = types.Artifact('OutputCache')
output_cache_artifact.uri = os.path.join(self._output_data_dir, 'CACHE/')
self._output_dict['cache_output_path'] = [output_cache_artifact]
self._exec_properties['module_file'] = self._module_file
self._transform_executor.Do(self._input_dict, self._output_dict,
self._exec_properties)
self._verify_transform_outputs()
self.assertNotEqual(0,
len(tf.io.gfile.listdir(output_cache_artifact.uri)))
# Second run from cache.
self._output_data_dir = self._get_output_data_dir('2nd_run')
input_cache_artifact = types.Artifact('InputCache')
input_cache_artifact.uri = output_cache_artifact.uri
output_cache_artifact = types.Artifact('OutputCache')
output_cache_artifact.uri = os.path.join(self._output_data_dir, 'CACHE/')
self._make_base_do_params(self._source_data_dir, self._output_data_dir)
self._input_dict['cache_input_path'] = [input_cache_artifact]
self._output_dict['cache_output_path'] = [output_cache_artifact]
self._exec_properties['module_file'] = self._module_file
self._transform_executor.Do(self._input_dict, self._output_dict,
self._exec_properties)
self._verify_transform_outputs()
self.assertNotEqual(0,
len(tf.io.gfile.listdir(output_cache_artifact.uri)))
if __name__ == '__main__':
tf.test.main()
|
the-stack_0_7772 | import boto3
import itertools
import json
import os
import re
from base64 import b64decode, b64encode
from contextlib import contextmanager
from math import ceil
from subprocess import Popen
from time import sleep, time
from botocore.exceptions import ClientError
from botocore.vendored.requests.packages import urllib3
from botocore.vendored.requests.utils import get_encoding_from_headers
FILES_BUCKET = os.environ['FILES_BUCKET']
FILES_PREFIX = os.environ['FILES_PREFIX']
FILES_TABLE = os.environ['FILES_TABLE']
FILES_ID = '1'
LOCK_TABLE = os.environ['LOCK_TABLE']
LOCK_ID = '1'
PATH_PREFIX_RE = re.compile('^/grafana')
GRAFANA_HOME = os.path.join(os.path.dirname(__file__), 'grafana')
GRAFANA_BIN = os.path.join(GRAFANA_HOME, 'bin', 'grafana-server')
GRAFANA_DATA = '/tmp/grafana/data'
GRAFANA_PLUGINS = '/tmp/grafana/plugins'
GRAFANA_CONFIG = '/tmp/grafana.conf'
GRAFANA_CONFIG_TEMPLATE = '''
[server]
domain = {domain}
root_url = %(protocol)s://%(domain)s:/{stage}/grafana
[paths]
data = {data}
logs = /tmp/grafana/logs
plugins = {plugins}
'''.lstrip()
GRAFANA_PIDFILE = '/tmp/grafana.pid'
GRAFANA_PROCESS = None
# Use retries when proxying requests to the Grafana process,
# because it can take a moment for it to start listening.
http = urllib3.PoolManager()
retry_settings = urllib3.Retry(
connect=20,
backoff_factor=0.1,
)
dynamodb = boto3.client('dynamodb')
s3 = boto3.client('s3')
@contextmanager
def dynamodb_lock(context):
"""
Lock the data so that only 1 Lambda function can read/write at a time.
"""
dynamodb_lock_acquire(context)
try:
yield
finally:
dynamodb_lock_release()
def dynamodb_lock_acquire(context):
"""
Acquires the DynamoDB lock.
"""
while True:
now = int(ceil(time()))
seconds_remaining = int(ceil(
context.get_remaining_time_in_millis() / 1000
))
expire = now + seconds_remaining
print('Acquiring DynamoDB lock')
try:
response = dynamodb.put_item(
TableName=LOCK_TABLE,
Item={
'Id': {
'S': LOCK_ID,
},
'Expire': {
'N': str(expire),
},
},
ConditionExpression='attribute_not_exists(Id) OR :Now > Expire',
ExpressionAttributeValues={
':Now': {
'N': str(now),
},
},
)
except ClientError as error:
code = error.response['Error']['Code']
if code == 'ConditionalCheckFailedException':
print('Waiting for lock')
sleep(0.1)
elif code == 'ProvisionedThroughputExceededException':
print('Waiting for throttle')
sleep(0.2)
else:
raise
else:
if response['ResponseMetadata']['HTTPStatusCode'] != 200:
raise Exception('ERROR: {}'.format(response))
else:
break
def dynamodb_lock_release():
"""
Releases the DynamoDB lock.
"""
attempts = 5
while True:
print('Releasing DynamoDB lock')
try:
response = dynamodb.delete_item(
TableName=LOCK_TABLE,
Key={
'Id': {
'S': LOCK_ID,
},
},
)
except ClientError as error:
code = error.response['Error']['Code']
if code == 'ProvisionedThroughputExceededException':
print('Waiting for throttle')
sleep(0.2)
else:
raise
else:
if response['ResponseMetadata']['HTTPStatusCode'] != 200:
if attempts:
print('WARNING: {}'.format(response))
attempts -= 1
sleep(0.2)
else:
raise Exception('ERROR: {}'.format(response))
else:
break
def dynamodb_get_files():
"""
Gets file data.
"""
while True:
print('Getting file data')
try:
response = dynamodb.get_item(
TableName=FILES_TABLE,
Key={
'Id': {
'S': FILES_ID,
}
},
ConsistentRead=True,
)
except ClientError as error:
code = error.response['Error']['Code']
if code == 'ProvisionedThroughputExceededException':
print('Waiting for throttle')
sleep(0.2)
else:
raise
else:
if response['ResponseMetadata']['HTTPStatusCode'] != 200:
raise Exception('ERROR: {}'.format(response))
else:
item = response.get('Item')
if item:
return json.loads(item['Files']['S'])
else:
print('No file data found!')
return {}
def dynamodb_put_files(files):
"""
Stores file data.
"""
files_json = json.dumps(files)
while True:
try:
response = dynamodb.put_item(
TableName=FILES_TABLE,
Item={
'Id': {
'S': FILES_ID,
},
'Files': {
'S': files_json,
},
}
)
except ClientError as error:
code = error.response['Error']['Code']
if code == 'ProvisionedThroughputExceededException':
print('Waiting for throttle')
sleep(0.2)
else:
raise
else:
if response['ResponseMetadata']['HTTPStatusCode'] != 200:
raise Exception('ERROR: {}'.format(response))
else:
break
def proxy_request(path, event):
url = 'http://127.0.0.1:3000' + path
if event['isBase64Encoded']:
request_body = b64decode(event['body'])
else:
request_body = None
response = http.request(
method=event['httpMethod'],
url=url,
headers=event['headers'],
body=request_body,
redirect=False,
retries=retry_settings,
)
headers = {}
response.headers.discard('Content-Length')
response.headers.discard('Transfer-Encoding')
for key in response.headers:
# The Set-Cookie header appears multiple times. Use a mix of uppercase
# and lowercase to allow multiple headers in the same dictionary.
unique_keys = map(
''.join,
itertools.product(*zip(key.lower(), key.upper()))
)
values = response.headers.getlist(key)
for key, value in zip(unique_keys, values):
headers[key] = value
encoding = get_encoding_from_headers(response.headers)
if encoding:
body = response.data.decode(encoding)
is_binary = False
print('Text response:', headers)
else:
body = b64encode(response.data).decode('utf-8')
is_binary = True
print('Base 64 encoded response:', headers)
return {
'body': body,
'headers': dict(headers),
'statusCode': response.status,
'isBase64Encoded': is_binary,
}
def start_grafana(event):
"""
Configures Grafana and then starts it, unless it is already running.
"""
global GRAFANA_PROCESS
if GRAFANA_PROCESS and not GRAFANA_PROCESS.poll():
print('Grafana is already running')
return
with open(GRAFANA_CONFIG, 'wt') as config_file:
config_file.write(GRAFANA_CONFIG_TEMPLATE.format(
domain=event['headers']['Host'],
stage=event['requestContext']['stage'],
data=GRAFANA_DATA,
plugins=GRAFANA_PLUGINS,
))
print('Starting Grafana')
GRAFANA_PROCESS = Popen((
GRAFANA_BIN,
'-homepath', GRAFANA_HOME,
'-config', GRAFANA_CONFIG,
'-pidfile', GRAFANA_PIDFILE,
))
def stop_grafana():
"""
Stops Grafana if it is running.
"""
global GRAFANA_PROCESS
if GRAFANA_PROCESS:
print('Stopping Grafana')
GRAFANA_PROCESS.terminate()
GRAFANA_PROCESS.wait(timeout=5)
GRAFANA_PROCESS = None
def sync_data(download=False, upload=False, _versions={}, _times={}):
if download:
files = dynamodb_get_files()
created_dirs = set()
for relative_path, details in files.items():
file_version, file_time = details
absolute_path = '/tmp/grafana/' + relative_path
dir_path = os.path.dirname(absolute_path)
if _versions.get(relative_path) == file_version:
print('Already have {}'.format(relative_path))
created_dirs.add(dir_path)
else:
print('Downloading {}'.format(relative_path))
if dir_path not in created_dirs:
os.makedirs(dir_path, exist_ok=True)
created_dirs.add(dir_path)
s3.download_file(
Bucket=FILES_BUCKET,
Key=FILES_PREFIX + '/' + relative_path,
Filename=absolute_path,
ExtraArgs={
'VersionId': file_version,
},
)
_versions[relative_path] = file_version
_times[relative_path] = os.stat(absolute_path).st_mtime_ns
if upload:
for grafana_path in (GRAFANA_DATA, GRAFANA_PLUGINS):
for root, sub_dirs, files in os.walk(grafana_path):
for file_name in files:
absolute_path = os.path.join(root, file_name)
relative_path = os.path.relpath(
absolute_path, '/tmp/grafana'
)
file_time = os.stat(absolute_path).st_mtime_ns
if file_time == _times.get(relative_path):
print('Unchanged', relative_path)
else:
print('Uploading {}'.format(relative_path))
with open(absolute_path, 'rb') as open_file:
response = s3.put_object(
Body=open_file,
Bucket=FILES_BUCKET,
Key=FILES_PREFIX + '/' + relative_path,
)
if response['ResponseMetadata']['HTTPStatusCode'] != 200:
raise Exception('ERROR: {}'.format(response))
_versions[relative_path] = response['VersionId']
_times[relative_path] = file_time
files = {}
for key in _versions:
files[key] = [_versions[key], _times[key]]
dynamodb_put_files(files)
def lambda_handler(event, context):
print('Request:', event)
if not os.path.exists(GRAFANA_HOME):
raise NotImplementedError('not built yet')
path = PATH_PREFIX_RE.sub('', event['path'])
if path.startswith('/public/'):
# Static media does not require a data sync, so bypass the lock
# and reuse the running Grafana process if there is one.
start_grafana(event)
response = proxy_request(path, event)
else:
# Regular paths might change the state on disk, including the SQLite
# database, so use a lock and sync data for the request.
stop_grafana()
with dynamodb_lock(context):
sync_data(download=True)
start_grafana(event)
response = proxy_request(path, event)
stop_grafana()
sync_data(upload=True)
return response
|
the-stack_0_7779 | from django.urls import path
from . import views
app_name = 'web'
urlpatterns = [
path('', views.index, name='index'),
path('api/roi_annotations', views.roi_annotations, name='annotations'),
path('api/create_label', views.create_label, name='create_label'),
path('api/roi_list', views.roi_list, name='roi_list'),
path('api/create_or_verify_annotations', views.create_or_verify_annotations, name='create_or_verify_annotations'),
path('api/move_or_copy_to_collection', views.move_or_copy_to_collection, name='move_or_copy_to_collection'),
path('api/get_labels', views.get_labels, name='get_labels'),
path('api/get_collections', views.get_collections, name='get_collections'),
# external REST endpoints
path('api/winning_annotations/<collection_name>', views.api_winning_annotations, name='winning_annotations'),
]
|
the-stack_0_7783 | from itertools import zip_longest
import numpy as np
class ChunkedGenerator:
"""
Batched data generator, used for training.
The sequences are split into equal-length chunks and padded as necessary.
Arguments:
batch_size -- the batch size to use for training
cameras -- list of cameras, one element for each video (optional, used for semi-supervised training)
poses_3d -- list of ground-truth 3D poses, one element for each video (optional, used for supervised training)
poses_2d -- list of input 2D keypoints, one element for each video
chunk_length -- number of output frames to predict for each training example (usually 1)
pad -- 2D input padding to compensate for valid convolutions, per side (depends on the receptive field)
causal_shift -- asymmetric padding offset when causal convolutions are used (usually 0 or "pad")
shuffle -- randomly shuffle the dataset before each epoch
random_seed -- initial seed to use for the random generator
augment -- augment the dataset by flipping poses horizontally
kps_left and kps_right -- list of left/right 2D keypoints if flipping is enabled
joints_left and joints_right -- list of left/right 3D joints if flipping is enabled
"""
def __init__(self, batch_size, cameras, poses_3d, poses_2d,
chunk_length, pad=0, causal_shift=0,
shuffle=True, random_seed=1234,
augment=False, kps_left=None, kps_right=None, joints_left=None, joints_right=None,
endless=False):
assert poses_3d is None or len(poses_3d) == len(poses_2d), (len(poses_3d), len(poses_2d))
assert cameras is None or len(cameras) == len(poses_2d)
# Build lineage info
pairs = [] # (seq_idx, start_frame, end_frame, flip) tuples
for i in range(len(poses_2d)):
assert poses_3d is None or poses_3d[i].shape[0] == poses_2d[i].shape[0]
n_chunks = (poses_2d[i].shape[0] + chunk_length - 1) // chunk_length
offset = (n_chunks * chunk_length - poses_2d[i].shape[0]) // 2
bounds = np.arange(n_chunks + 1) * chunk_length - offset
augment_vector = np.full(len(bounds)-1, False, dtype=bool)
pairs += zip(np.repeat(i, len(bounds)-1), bounds[:-1], bounds[1:], augment_vector)
if augment:
pairs += zip(np.repeat(i, len(bounds)-1), bounds[:-1], bounds[1:], ~augment_vector)
# Initialize buffers
if cameras is not None:
self.batch_cam = np.empty((batch_size, cameras[0].shape[-1]))
if poses_3d is not None:
self.batch_3d = np.empty((batch_size, chunk_length, poses_3d[0].shape[-2], poses_3d[0].shape[-1]))
self.batch_2d = np.empty((batch_size, chunk_length + 2*pad, poses_2d[0].shape[-2], poses_2d[0].shape[-1]))
self.num_batches = (len(pairs) + batch_size - 1) // batch_size
self.batch_size = batch_size
self.random = np.random.RandomState(random_seed)
self.pairs = pairs
self.shuffle = shuffle
self.pad = pad
self.causal_shift = causal_shift
self.endless = endless
self.state = None
self.cameras = cameras
self.poses_3d = poses_3d
self.poses_2d = poses_2d
self.augment = augment
self.kps_left = kps_left
self.kps_right = kps_right
self.joints_left = joints_left
self.joints_right = joints_right
def num_frames(self):
return self.num_batches * self.batch_size
def random_state(self):
return self.random
def set_random_state(self, random):
self.random = random
def augment_enabled(self):
return self.augment
def next_pairs(self):
if self.state is None:
if self.shuffle:
pairs = self.random.permutation(self.pairs)
else:
pairs = self.pairs
return 0, pairs
else:
return self.state
def next_epoch(self):
enabled = True
while enabled:
start_idx, pairs = self.next_pairs()
for b_i in range(start_idx, self.num_batches):
chunks = pairs[b_i*self.batch_size : (b_i+1)*self.batch_size]
for i, (seq_i, start_3d, end_3d, flip) in enumerate(chunks):
start_2d = start_3d - self.pad - self.causal_shift
end_2d = end_3d + self.pad - self.causal_shift
# 2D poses
seq_2d = self.poses_2d[seq_i]
low_2d = max(start_2d, 0)
high_2d = min(end_2d, seq_2d.shape[0])
pad_left_2d = low_2d - start_2d
pad_right_2d = end_2d - high_2d
if pad_left_2d != 0 or pad_right_2d != 0:
self.batch_2d[i] = np.pad(seq_2d[low_2d:high_2d], ((pad_left_2d, pad_right_2d), (0, 0), (0, 0)), "edge")
else:
self.batch_2d[i] = seq_2d[low_2d:high_2d]
if flip:
# Flip 2D keypoints
self.batch_2d[i, :, :, 0] *= -1
self.batch_2d[i, :, self.kps_left + self.kps_right] = self.batch_2d[i, :, self.kps_right + self.kps_left]
# 3D poses
if self.poses_3d is not None:
seq_3d = self.poses_3d[seq_i]
low_3d = max(start_3d, 0)
high_3d = min(end_3d, seq_3d.shape[0])
pad_left_3d = low_3d - start_3d
pad_right_3d = end_3d - high_3d
if pad_left_3d != 0 or pad_right_3d != 0:
self.batch_3d[i] = np.pad(seq_3d[low_3d:high_3d], ((pad_left_3d, pad_right_3d), (0, 0), (0, 0)), "edge")
else:
self.batch_3d[i] = seq_3d[low_3d:high_3d]
if flip:
# Flip 3D joints
self.batch_3d[i, :, :, 0] *= -1
self.batch_3d[i, :, self.joints_left + self.joints_right] = \
self.batch_3d[i, :, self.joints_right + self.joints_left]
# Cameras
if self.cameras is not None:
self.batch_cam[i] = self.cameras[seq_i]
if flip:
# Flip horizontal distortion coefficients
self.batch_cam[i, 2] *= -1
self.batch_cam[i, 7] *= -1
if self.endless:
self.state = (b_i + 1, pairs)
if self.poses_3d is None and self.cameras is None:
yield None, None, self.batch_2d[:len(chunks)]
elif self.poses_3d is not None and self.cameras is None:
yield None, self.batch_3d[:len(chunks)], self.batch_2d[:(len(chunks))]
elif self.poses_3d is None:
yield self.batch_cam, None, self.batch_2d[:len(chunks)]
else:
yield self.batch_cam[:len(chunks)], self.batch_3d[:len(chunks)], self.batch_2d[:len(chunks)]
if self.endless:
self.state = None
else:
enabled = False
class UnchunkedGenerator:
"""
Non-batched data generator, used for testing.
Sequences are returned one at a time (i.e. batch size = 1), without chunking.
If data augmentation is enabled, the batches contain two sequences (i.e. batch size = 2),
the second of which is a mirrored version of the first.
Arguments:
cameras -- list of cameras, one element for each video (optional, used for semi-supervised training)
poses_3d -- list of ground-truth 3D poses, one element for each video (optional, used for supervised training)
poses_2d -- list of input 2D keypoints, one element for each video
pad -- 2D input padding to compensate for valid convolutions, per side (depends on the receptive field)
causal_shift -- asymmetric padding offset when causal convolutions are used (usually 0 or "pad")
augment -- augment the dataset by flipping poses horizontally
kps_left and kps_right -- list of left/right 2D keypoints if flipping is enabled
joints_left and joints_right -- list of left/right 3D joints if flipping is enabled
"""
def __init__(self, cameras, poses_3d, poses_2d, pad=0, causal_shift=0,
augment=False, kps_left=None, kps_right=None, joints_left=None, joints_right=None):
assert poses_3d is None or len(poses_3d) == len(poses_2d)
assert cameras is None or len(cameras) == len(poses_2d)
self.augment = augment
self.kps_left = kps_left
self.kps_right = kps_right
self.joints_left = joints_left
self.joints_right = joints_right
self.pad = pad
self.causal_shift = causal_shift
self.cameras = [] if cameras is None else cameras
self.poses_3d = [] if poses_3d is None else poses_3d
self.poses_2d = poses_2d
def num_frames(self):
count = 0
for p in self.poses_2d:
count += p.shape[0]
return count
def augment_enabled(self):
return self.augment
def set_augment(self, augment):
self.augment = augment
def next_epoch(self):
for seq_cam, seq_3d, seq_2d in zip_longest(self.cameras, self.poses_3d, self.poses_2d):
batch_cam = None if seq_cam is None else np.expand_dims(seq_cam, axis=0)
batch_3d = None if seq_3d is None else np.expand_dims(seq_3d, axis=0)
batch_2d = np.expand_dims(np.pad(seq_2d,
((self.pad + self.causal_shift, self.pad - self.causal_shift), (0, 0),
(0, 0)),
'edge'), axis=0)
if self.augment:
# Append flipped version
if batch_cam is not None:
batch_cam = np.concatenate((batch_cam, batch_cam), axis=0)
batch_cam[1, 2] *= -1
batch_cam[1, 7] *= -1
if batch_3d is not None:
batch_3d = np.concatenate((batch_3d, batch_3d), axis=0)
batch_3d[1, :, :, 0] *= -1
batch_3d[1, :, self.joints_left + self.joints_right] = batch_3d[1, :,
self.joints_right + self.joints_left]
batch_2d = np.concatenate((batch_2d, batch_2d), axis=0)
batch_2d[1, :, :, 0] *= -1
batch_2d[1, :, self.kps_left + self.kps_right] = batch_2d[1, :, self.kps_right + self.kps_left]
yield batch_cam, batch_3d, batch_2d
|
the-stack_0_7784 | from __future__ import division
from __future__ import print_function
import argparse
import json
import numpy as np
import os.path
import random
import sys
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
import tqdm
from pathlib import Path
from tensorboardX import SummaryWriter
from torch.utils.data import DataLoader
from torch.optim import lr_scheduler
_project_folder_ = os.path.abspath('../')
if _project_folder_ not in sys.path:
sys.path.insert(0, _project_folder_)
from data.quickdraw_dataset import QuickDrawDataset
from data.tuberlin_dataset import TUBerlinDataset
from models.modelzoo import CNN_MODELS, CNN_IMAGE_SIZES
from models.sketch_r2cnn import SketchR2CNN
from neuralline.rasterize import Raster
DATASETS = {'tuberlin': TUBerlinDataset, 'quickdraw': QuickDrawDataset}
def train_data_collate(batch):
length_list = [len(item['points3']) for item in batch]
max_length = max(length_list)
points3_padded_list = list()
points3_offset_list = list()
intensities_list = list()
category_list = list()
for item in batch:
points3 = item['points3']
points3_length = len(points3)
points3_padded = np.zeros((max_length, 3), np.float32)
points3_padded[:, 2] = np.ones((max_length,), np.float32)
points3_padded[0:points3_length, :] = points3
points3_padded_list.append(points3_padded)
points3_offset = np.copy(points3_padded)
points3_offset[1:points3_length, 0:2] = points3[1:, 0:2] - points3[:points3_length - 1, 0:2]
points3_offset_list.append(points3_offset)
intensities = np.zeros((max_length,), np.float32)
intensities[:points3_length] = 1.0 - np.arange(points3_length, dtype=np.float32) / float(points3_length - 1)
intensities_list.append(intensities)
category_list.append(item['category'])
batch_padded = {
'points3': points3_padded_list,
'points3_offset': points3_offset_list,
'points3_length': length_list,
'intensities': intensities_list,
'category': category_list
}
sort_indices = np.argsort(-np.array(length_list))
batch_collate = dict()
for k, v in batch_padded.items():
sorted_arr = np.array([v[idx] for idx in sort_indices])
batch_collate[k] = torch.from_numpy(sorted_arr)
return batch_collate
class BaseTrain(object):
def __init__(self):
self.config = self._parse_args()
self.modes = ['train', 'valid']
self.step_counters = {m: 0 for m in self.modes}
self.reporter = None
self.device = torch.device('cuda:{}'.format(self.config['gpu']) if torch.cuda.is_available() else 'cpu')
print('[*] Using device: {}'.format(self.device))
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
if self.reporter:
self.reporter.close()
def _parse_args(self):
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('--batch_size', type=int, default=48)
arg_parser.add_argument('--ckpt_nets', nargs='*')
arg_parser.add_argument('--ckpt_prefix', type=str, default=None)
arg_parser.add_argument('--dataset_fn', type=str, default=None)
arg_parser.add_argument('--dataset_root', type=str, default=None)
arg_parser.add_argument('--gpu', type=int, default=0)
arg_parser.add_argument('--imgsize', type=int, default=224)
arg_parser.add_argument('--learn_rate_step', type=int, default=-1)
arg_parser.add_argument('--learn_rate', type=float, default=0.0001)
arg_parser.add_argument('--log_dir', type=str, default=None)
arg_parser.add_argument('--max_points', type=int, default=321)
arg_parser.add_argument('--model_fn', type=str, default=None)
arg_parser.add_argument('--note', type=str, default='')
arg_parser.add_argument('--num_epochs', type=int, default=1)
arg_parser.add_argument('--report_hist_freq', type=int, default=100)
arg_parser.add_argument('--report_image_freq', type=int, default=100)
arg_parser.add_argument('--report_scalar_freq', type=int, default=100)
arg_parser.add_argument('--save_epoch_freq', type=int, default=1)
arg_parser.add_argument('--save_step_freq', type=int, default=-1)
arg_parser.add_argument('--seed', type=int, default=10)
arg_parser.add_argument('--thickness', type=float, default=1.0)
arg_parser.add_argument('--valid_freq', type=int, default=1)
arg_parser.add_argument('--weight_decay', type=float, default=-1)
arg_parser = self.add_args(arg_parser)
config = vars(arg_parser.parse_args())
config['imgsize'] = CNN_IMAGE_SIZES[config['model_fn']]
if config['dataset_fn'] == 'quickdraw':
config['max_points'] = 321
config['report_image_freq'] = 500
config['save_epoch_freq'] = 1
config['valid_freq'] = 1
elif config['dataset_fn'] == 'tuberlin':
config['max_points'] = 448
config['report_image_freq'] = 100
config['save_epoch_freq'] = 20
config['valid_freq'] = 20
else:
raise Exception('Not valid dataset name!')
if config['log_dir'] is None:
raise Exception('No log_dir specified!')
else:
if not os.path.exists(config['log_dir']):
os.makedirs(config['log_dir'], 0o777)
if config['dataset_root'] is None:
raise Exception('No dataset_root specified!')
if config['ckpt_prefix'] is not None:
if len(config['ckpt_prefix']) < 1:
config['ckpt_prefix'] = None
if config['ckpt_nets'] is not None:
if len(config['ckpt_nets']) < 1:
config['ckpt_nets'] = None
if config['seed'] is None:
config['seed'] = random.randint(0, 2**31 - 1)
random.seed(config['seed'])
np.random.seed(config['seed'])
torch.manual_seed(config['seed'])
with open(os.path.join(config['log_dir'], 'options.json'), 'w') as fh:
fh.write(json.dumps(config, sort_keys=True, indent=4))
return config
def add_args(self, arg_parser):
return arg_parser
def run_name(self):
return None
def checkpoint_prefix(self):
return self.config['ckpt_prefix']
def prepare_dataset(self, dataset_dict):
pass
def create_data_loaders(self, dataset_dict):
raise NotImplementedError
def create_model(self, num_categories):
raise NotImplementedError
def weight_decay_excludes(self):
return ['bias']
def forward_batch(self, model, data_batch, mode, optimizer, criterion):
raise NotImplementedError
def run(self):
dataset_fn = self.config['dataset_fn']
dataset_root = self.config['dataset_root']
learn_rate = self.config['learn_rate']
learn_rate_step = self.config['learn_rate_step']
log_dir = self.config['log_dir']
model_fn = self.config['model_fn']
num_epochs = self.config['num_epochs']
report_scalar_freq = self.config['report_scalar_freq']
save_epoch_freq = self.config['save_epoch_freq']
save_step_freq = self.config['save_step_freq']
valid_freq = self.config['valid_freq']
weight_decay = self.config['weight_decay']
save_prefix = dataset_fn + '_' + model_fn
if self.run_name():
save_prefix = save_prefix + '_' + self.run_name()
if self.reporter is None:
self.reporter = SummaryWriter(log_dir)
train_data = {
m: DATASETS[dataset_fn](dataset_root, m) for m in self.modes
}
self.prepare_dataset(train_data)
num_categories = train_data[self.modes[0]].num_categories()
print('[*] Number of categories:', num_categories)
net = self.create_model(num_categories)
net.print_params()
data_loaders = self.create_data_loaders(train_data)
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(net.params_to_optimize(weight_decay, self.weight_decay_excludes()), lr=learn_rate)
if learn_rate_step > 0:
lr_exp_scheduler = lr_scheduler.StepLR(optimizer, step_size=learn_rate_step, gamma=0.5)
else:
lr_exp_scheduler = None
best_accu = 0.0
best_net = -1
ckpt_prefix = self.checkpoint_prefix()
ckpt_nets = self.config['ckpt_nets']
if ckpt_prefix is not None:
loaded_paths = net.load(ckpt_prefix, ckpt_nets)
print('[*] Loaded pretrained model from {}'.format(loaded_paths))
for epoch in range(1, num_epochs + 1):
print('-' * 20)
print('[*] Epoch {}/{}'.format(epoch, num_epochs))
for mode in self.modes:
is_train = mode == 'train'
if not is_train and epoch % valid_freq != 0:
continue
print('[*] Starting {} mode'.format(mode))
if is_train:
if lr_exp_scheduler is not None:
lr_exp_scheduler.step()
net.train_mode()
else:
net.eval_mode()
running_corrects = 0
num_samples = 0
pbar = tqdm.tqdm(total=len(data_loaders[mode]))
for bid, data_batch in enumerate(data_loaders[mode]):
self.step_counters[mode] += 1
logits, loss, gt_category = self.forward_batch(net, data_batch, mode, optimizer, criterion)
_, predicts = torch.max(logits, 1)
predicts_accu = torch.sum(predicts == gt_category)
running_corrects += predicts_accu.item()
sampled_batch_size = gt_category.size(0)
num_samples += sampled_batch_size
if report_scalar_freq > 0 and self.step_counters[mode] % report_scalar_freq == 0:
self.reporter.add_scalar('{}/loss'.format(mode), loss.item(), self.step_counters[mode])
self.reporter.add_scalar('{}/accuracy'.format(mode),
float(predicts_accu.data) / sampled_batch_size, self.step_counters[mode])
if is_train and save_step_freq > 0 and self.step_counters[mode] % save_step_freq == 0:
net.save(log_dir, self.step_counters[mode], save_prefix)
pbar.update()
pbar.close()
epoch_accu = float(running_corrects) / float(num_samples)
if is_train:
if epoch % save_epoch_freq == 0:
print('[*] {} accu: {:.4f}'.format(mode, epoch_accu))
net.save(log_dir, 'epoch_{}'.format(epoch), save_prefix)
else:
print('[*] {} accu: {:.4f}'.format(mode, epoch_accu))
if epoch_accu > best_accu:
best_accu = epoch_accu
best_net = epoch
print('[*] Best accu: {:.4f}, corresponding epoch: {}'.format(best_accu, best_net))
for m in self.modes:
train_data[m].dispose()
return best_accu
class SketchR2CNNTrain(BaseTrain):
def add_args(self, arg_parser):
arg_parser.add_argument('--dropout', type=float, default=0.5)
arg_parser.add_argument('--intensity_channels', type=int, default=1)
return arg_parser
def create_data_loaders(self, dataset_dict):
data_loaders = {
m: DataLoader(dataset_dict[m],
batch_size=self.config['batch_size'],
num_workers=3 if m == 'train' else 1,
shuffle=True if m == 'train' else False,
drop_last=True,
collate_fn=train_data_collate,
pin_memory=True) for m in self.modes
}
return data_loaders
def create_model(self, num_categories):
dropout = self.config['dropout']
imgsize = self.config['imgsize']
intensity_channels = self.config['intensity_channels']
model_fn = self.config['model_fn']
thickness = self.config['thickness']
return SketchR2CNN(CNN_MODELS[model_fn],
3,
dropout,
imgsize,
thickness,
num_categories,
intensity_channels=intensity_channels,
device=self.device)
def forward_batch(self, model, data_batch, mode, optimizer, criterion):
imgsize = self.config['imgsize']
is_train = mode == 'train'
report_hist_freq = self.config['report_hist_freq']
report_image_freq = self.config['report_image_freq']
thickness = self.config['thickness']
points = data_batch['points3'].to(self.device)
points_offset = data_batch['points3_offset'].to(self.device)
points_length = data_batch['points3_length']
category = data_batch['category'].to(self.device)
if report_image_freq > 0 and self.step_counters[mode] % report_image_freq == 0:
images = Raster.to_image(points, 1.0, imgsize, thickness, device=self.device)
image_grid = torchvision.utils.make_grid(images, nrow=4)
self.reporter.add_image('{}/sketch_input'.format(mode), image_grid, self.step_counters[mode])
if is_train:
optimizer.zero_grad()
with torch.set_grad_enabled(is_train):
logits, attention, images = model(points, points_offset, points_length)
loss = criterion(logits, category)
if is_train:
loss.backward()
optimizer.step()
if is_train and report_hist_freq > 0 and self.step_counters[mode] % report_hist_freq == 0:
self.reporter.add_histogram('{}/attention'.format(mode),
attention,
self.step_counters[mode],
bins='auto')
self.reporter.add_histogram('{}/points_length'.format(mode),
points_length,
self.step_counters[mode],
bins='auto')
return logits, loss, category
|
the-stack_0_7785 | #Given the image distinguish between different shapes.
#Like your model should be able to detect the centre and
# classify these shapes in the image automatically.
import cv2
from matplotlib import pyplot as plt
img= cv2.imread('shapes.png')
imgray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
ret,thresh = cv2.threshold(imgray,127,255,0)
contours, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
img = cv2.drawContours(img, contours, -1, (0,255,0), 3)
for i in range(len(contours)):
cnt=contours[i]
M = cv2.moments(cnt)
cx = int(M['m10'] / M['m00'])
cy = int(M['m01'] / M['m00'])
cv2.circle(img, (cx, cy), 7, (0,0,0), -1)
plt.imshow(img)
plt.xticks([]), plt.yticks([])
plt.show() |
the-stack_0_7788 | import numpy as np
import pandas
from swdb.util import COUNTIES
presidential_xls = 'http://elections.cdn.sos.ca.gov/sov/2016-primary/csv-presidential-candidates.xls'
voter_nominated_xls = 'http://elections.cdn.sos.ca.gov/sov/2016-primary/csv-voter-nominated-candidates.xls'
props_xls = 'http://elections.cdn.sos.ca.gov/sov/2016-primary/130-state-ballot-measures-formatted.xls'
contest_names = set(['President %s' % x for x in
['Democratic',
'Republican',
'American Independent',
'Green',
'Libertarian',
'P and F']])
# Normalized party names for consistency
party_dict = {'Democratic': 'DEM',
'Republican': 'REP',
'American Independent': 'AI',
'Green': 'GRN',
'Libertarian': 'LIB',
'Peace and Freedom': 'PF',
'No Party Preference': 'NPP'}
# Normalized office names
office_dict = {'United States Representative': 'U.S. House',
'State Assembly Member': 'State Assembly'}
# Clean up candidate names
candidate_dict = {'Ron Unz': 'Ron Unz',
'Alan Lowenthal': 'Alan Lowenthal'}
# Columns to select
columns = ['COUNTY_NAME', 'office', 'district',
'PARTY_NAME', 'CANDIDATE_NAME', 'VOTE_TOTAL']
presidential = pandas.read_excel(presidential_xls)
# Override office name for Presidential and fill in an empty district
presidential = presidential.assign(office='President').assign(district='')
# Select only contests of interest and the important columns
presidential = presidential.loc[
presidential.CONTEST_NAME.isin(contest_names)][columns]
voter_nominated = pandas.read_excel(voter_nominated_xls)
contest_split = voter_nominated['CONTEST_NAME'].str.extract(
'(?P<office>.+) District (?P<district>\d+)$', expand=True)
contest_split.office.fillna('U.S. Senate', inplace=True)
voter_nominated = pandas.concat([voter_nominated, contest_split], axis=1)
voter_nominated = voter_nominated.replace({'office': office_dict})[columns]
props = pandas.read_excel(props_xls,
names=['COUNTY_NAME', 'Yes', 'No'],
header=None).assign(office='Proposition 50')
props = props[props.COUNTY_NAME.isin(COUNTIES)]
props = pandas.melt(
props, id_vars=['COUNTY_NAME', 'office'], value_vars=['Yes', 'No'], var_name='CANDIDATE_NAME', value_name='VOTE_TOTAL')
props = props.assign(district='').assign(PARTY_NAME='')[columns]
result = pandas.concat([presidential, voter_nominated, props]
).replace({'PARTY_NAME': party_dict,
'CANDIDATE_NAME': candidate_dict})
output_columns = ['county', 'office',
'district', 'party', 'candidate', 'votes']
result[result.COUNTY_NAME != 'State Totals'].to_csv(
'2016/20160607__ca__primary.csv', header=output_columns, index=False)
|
the-stack_0_7791 | import os
from nltk import tokenize
import pandas as pd
from transformers import BertTokenizer
tokenizer = BertTokenizer.from_pretrained("bert-large-uncased")
def get_labels_vector():
# 文本
texts = []
list = os.listdir("train-articles")
for i in range(0, len(list)):
f = open("train-articles/" + list[i], encoding='utf8')
texts.append(f.read())
# 标签
labels = []
list_labels = os.listdir("train-labels-task1-span-identification")
for i in range(0, len(list_labels)):
f = open("train-labels-task1-span-identification/" + list_labels[i], encoding='utf8')
labels.append(f.read())
labels_tag = {} # 存储每篇文章的分词
for i in range(0, len(list_labels)):
labels_tag[list_labels[i][7:16]] = []
for i in range(0, len(texts)):
labels_tag[list_labels[i][7:16]].append(tokenizer.tokenize(texts[i]))
labels_tag_qujian = {} # 存储每篇文章的重点区间
for i in range(0, len(list_labels)):
labels_tag_qujian[list_labels[i][7:16]] = []
i = 10
j = 10
k = 0
for x in range(0, len(list_labels)):
if k < len(list_labels):
try:
while (True):
while (labels[k][i] != '\t' and labels[k][i] != '\n'):
if i < (len(labels[k]) - 1):
i = i + 1
else:
break
# print(j, i)
# print(labels[k][j:i])
if (len(labels[k][j:i]) < 6):
labels_tag_qujian[list_labels[k][7:16]].append(labels[k][j:i])
j = i + 1
i = j
except IndexError:
None
i = 10
j = 10
k = k + 1
# 判断开始和结束区间
labels_tag_word_qujian = {} # 存储每篇文章的分词
for i in range(0, len(list_labels)):
labels_tag_word_qujian[list_labels[i][7:16]] = []
i = 0
k = 0
# print(labels_tag[list_labels[1][7:16]])
# 词标签转换
for j in range(0, len(list_labels)):
i = 0
k = 0
while (i < len(labels_tag_qujian[list_labels[j][7:16]])):
start = labels_tag_qujian[list_labels[j][7:16]][i]
end = labels_tag_qujian[list_labels[j][7:16]][i + 1]
a = len(tokenizer.tokenize(texts[j][:int(start)])) # 起始区间
b = len(tokenizer.tokenize(texts[j][int(start):int(end)])) # 范围
c = a + b # 终点区间
labels_tag_word_qujian[list_labels[j][7:16]].append(a)
labels_tag_word_qujian[list_labels[j][7:16]].append(c)
k = k + 1
i = 2 * k
print("--------------show--------")
# print(labels_tag_word_qujian)
# 标签向量
labels_tag_word_vector = {}
for i in range(0, len(list_labels)):
labels_tag_word_vector[list_labels[i][7:16]] = []
for j in range(0, len(list_labels)):
for length in range(0, len(tokenizer.tokenize(texts[j]))):
labels_tag_word_vector[list_labels[j][7:16]].append(0)
for j in range(0, len(list_labels)):
i = 0
k = 0
while (i < len(labels_tag_word_qujian[list_labels[j][7:16]])):
start = labels_tag_word_qujian[list_labels[j][7:16]][i]
end = labels_tag_word_qujian[list_labels[j][7:16]][i + 1]
a = len(tokenizer.tokenize(texts[j]))
if (int(start) < a and int(end) + 1 < a):
for length in range(int(start), int(end) + 1):
labels_tag_word_vector[list_labels[j][7:16]][length] = 1
k = k + 1
i = 2 * k
# print(labels_tag_word_vector)
sum = 0
for j in range(0, len(list_labels)):
sum += len(labels_tag_word_vector[list_labels[j][7:16]])
print(sum / len(list_labels))
return labels_tag_word_vector, texts
def get_test_textVector():
# 文本
texts = []
list = os.listdir("dev-articles")
for i in range(0, len(list)):
f = open("dev-articles/" + list[i], encoding='utf8')
texts.append(f.read())
# texts_token = []
# for i in range(0, len(texts)):
# texts_token.append(tokenizer.tokenize(texts[i]))
return texts
def new_get_test_textVector():
# 文本
texts = []
list = os.listdir("test-articles")
for i in range(0, len(list)):
f = open("test-articles/" + list[i], encoding='utf8')
texts.append(f.read())
# texts_token = []
# for i in range(0, len(texts)):
# texts_token.append(tokenizer.tokenize(texts[i]))
return texts
def get_train_vector_TC():
train_articles = pd.read_excel("mapping_TC.xlsx")
# In[25]:
train_articles.head(8)
# In[15]:
# 文本
texts = []
list = os.listdir("train-articles")
for i in range(0, len(list)):
f = open("train-articles/" + list[i], encoding='utf8')
texts.append(f.read())
# In[16]:
# 标签
labels = []
list_labels = os.listdir("train-labels-task2-technique-classification")
for i in range(0, len(list_labels)):
f = open("train-labels-task2-technique-classification/" + list_labels[i], encoding='utf8')
labels.append(f.read())
# In[18]:
text_dict = {} # 存储每篇文章的分词
for i in range(0, len(list_labels)):
text_dict[list_labels[i][7:16]] = texts[i]
# In[33]:
id_lst = []
start_lst = []
end_lst = []
for i in range(0, train_articles.shape[0]):
a = len(tokenizer.tokenize(
text_dict[str(train_articles['File_ID'][i])][:int(train_articles['Start_IDX'][i])])) # 起始区间
b = len(tokenizer.tokenize(text_dict[str(train_articles['File_ID'][i])][
int(train_articles['Start_IDX'][i]):int(train_articles['End_IDX'][i])]))
c = a + b # 终点区间
start_lst.append(a)
end_lst.append(c)
id_lst.append(train_articles['File_ID'][i])
# In[36]:
frame_ = pd.DataFrame({'File_ID': id_lst, 'Start_IDX_Word': start_lst, 'End_IDX_Word': end_lst})
writer = pd.ExcelWriter('mapping_TC_word.xlsx', engine='xlsxwriter')
frame_.to_excel(writer, sheet_name='task-2_word')
writer.save()
# In[37]:
labels_tag_word_vector = {}
for i in range(0, len(list_labels)):
labels_tag_word_vector[list_labels[i][7:16]] = []
for j in range(0, len(list_labels)):
for length in range(0, len(tokenizer.tokenize(texts[j]))):
labels_tag_word_vector[list_labels[j][7:16]].append(0)
# In[39]:
train_articles_word = pd.read_excel("mapping_TC_word.xlsx", )
# In[40]:
train_articles_word.head()
# In[70]:
class_TC = []
f = open("propaganda-techniques-names-semeval2020task11.txt", encoding='utf8')
class_TC.append(f.read())
s = class_TC[0]
class_TC = s.split('\n')
class_TC.remove("")
# In[83]:
for i in range(0, train_articles_word.shape[0]):
a = len(tokenizer.tokenize(text_dict[str(train_articles_word['File_ID'][i])]))
if (int(train_articles_word['Start_IDX_Word'][i]) < a and int(train_articles_word['End_IDX_Word'][i]) + 1 < a):
for j in range(0, len(class_TC)):
if (train_articles['Classification'][i] == class_TC[j]):
index = j + 1
for length in range(int(train_articles_word['Start_IDX_Word'][i]),
int(train_articles_word['End_IDX_Word'][i]) + 1):
labels_tag_word_vector[str(train_articles_word['File_ID'][i])][length] = index
return labels_tag_word_vector |
the-stack_0_7795 | # -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
Swap gate.
"""
import numpy
from qiskit.circuit import ControlledGate
from qiskit.circuit import Gate
from qiskit.circuit import QuantumCircuit
from qiskit.circuit import QuantumRegister
from qiskit.util import deprecate_arguments
class SwapGate(Gate):
r"""The SWAP gate.
This is a symmetric and Clifford gate.
**Circuit symbol:**
.. parsed-literal::
q_0: ─X─
│
q_1: ─X─
**Matrix Representation:**
.. math::
SWAP =
\begin{pmatrix}
1 & 0 & 0 & 0 \\
0 & 0 & 1 & 0 \\
0 & 1 & 0 & 0 \\
0 & 0 & 0 & 1
\end{pmatrix}
The gate is equivalent to a state swap and is a classical logic gate.
.. math::
|a, b\rangle \rightarrow |b, a\rangle
"""
def __init__(self):
"""Create new SWAP gate."""
super().__init__('swap', 2, [])
def _define(self):
"""
gate swap a,b { cx a,b; cx b,a; cx a,b; }
"""
from qiskit.extensions.standard.x import CXGate
definition = []
q = QuantumRegister(2, 'q')
rule = [
(CXGate(), [q[0], q[1]], []),
(CXGate(), [q[1], q[0]], []),
(CXGate(), [q[0], q[1]], [])
]
for inst in rule:
definition.append(inst)
self.definition = definition
def control(self, num_ctrl_qubits=1, label=None, ctrl_state=None):
"""Return a (multi-)controlled-SWAP gate.
One control returns a CSWAP (Fredkin) gate.
Args:
num_ctrl_qubits (int): number of control qubits.
label (str or None): An optional label for the gate [Default: None]
ctrl_state (int or str or None): control state expressed as integer,
string (e.g. '110'), or None. If None, use all 1s.
Returns:
ControlledGate: controlled version of this gate.
"""
if ctrl_state is None:
if num_ctrl_qubits == 1:
return CSwapGate()
return super().control(num_ctrl_qubits=num_ctrl_qubits, label=label,
ctrl_state=ctrl_state)
def inverse(self):
"""Return inverse Swap gate (itself)."""
return SwapGate() # self-inverse
def to_matrix(self):
"""Return a numpy.array for the SWAP gate."""
return numpy.array([[1, 0, 0, 0],
[0, 0, 1, 0],
[0, 1, 0, 0],
[0, 0, 0, 1]], dtype=complex)
def swap(self, qubit1, qubit2):
"""Apply :class:`~qiskit.extensions.standard.SwapGate`.
"""
return self.append(SwapGate(), [qubit1, qubit2], [])
QuantumCircuit.swap = swap
class CSwapMeta(type):
"""A Metaclass to ensure that CSwapGate and FredkinGate are of the same type.
Can be removed when FredkinGate gets removed.
"""
@classmethod
def __instancecheck__(mcs, inst):
return type(inst) in {CSwapGate, FredkinGate} # pylint: disable=unidiomatic-typecheck
class CSwapGate(ControlledGate, metaclass=CSwapMeta):
r"""Controlled-X gate.
**Circuit symbol:**
.. parsed-literal::
q_0: ─X─
│
q_1: ─X─
│
q_2: ─■─
**Matrix representation:**
.. math::
CSWAP\ q_2, q_1, q_0 =
|0 \rangle \langle 0| \otimes I \otimes I +
|1 \rangle \langle 1| \otimes SWAP =
\begin{pmatrix}
1 & 0 & 0 & 0 & 0 & 0 & 0 & 0 \\
0 & 1 & 0 & 0 & 0 & 0 & 0 & 0 \\
0 & 0 & 1 & 0 & 0 & 0 & 0 & 0 \\
0 & 0 & 0 & 1 & 0 & 0 & 0 & 0 \\
0 & 0 & 0 & 0 & 1 & 0 & 0 & 0 \\
0 & 0 & 0 & 0 & 0 & 0 & 1 & 0 \\
0 & 0 & 0 & 0 & 0 & 1 & 0 & 0 \\
0 & 0 & 0 & 0 & 0 & 0 & 0 & 1 \\
\end{pmatrix}
.. note::
In Qiskit's convention, higher qubit indices are more significant
(little endian convention). In many textbooks, controlled gates are
presented with the assumption of more significant qubits as control,
which is how we present the gate above as well, resulting in textbook
matrices. Instead, if we use q_0 as control, the matrix will be:
.. math::
CSWAP\ q_0, q_1, q_2 =
|0 \rangle \langle 0| \otimes I \otimes I +
|1 \rangle \langle 1| \otimes SWAP =
\begin{pmatrix}
1 & 0 & 0 & 0 & 0 & 0 & 0 & 0 \\
0 & 1 & 0 & 0 & 0 & 0 & 0 & 0 \\
0 & 0 & 1 & 0 & 0 & 0 & 0 & 0 \\
0 & 0 & 0 & 0 & 0 & 1 & 0 & 0 \\
0 & 0 & 0 & 0 & 1 & 0 & 0 & 0 \\
0 & 0 & 0 & 1 & 0 & 0 & 0 & 0 \\
0 & 0 & 0 & 0 & 0 & 0 & 1 & 0 \\
0 & 0 & 0 & 0 & 0 & 0 & 0 & 1 \\
\end{pmatrix}
In the computational basis, this gate swaps the states of
the two target qubits if the control qubit is in the
:math:`|1\rangle` state.
.. math::
|0, b, c\rangle \rightarrow |0, b, c\rangle
|1, b, c\rangle \rightarrow |1, c, b\rangle
"""
def __init__(self):
"""Create new CSWAP gate."""
super().__init__('cswap', 3, [], num_ctrl_qubits=1)
self.base_gate = SwapGate()
def _define(self):
"""
gate cswap a,b,c
{ cx c,b;
ccx a,b,c;
cx c,b;
}
"""
from qiskit.extensions.standard.x import CXGate
from qiskit.extensions.standard.x import CCXGate
definition = []
q = QuantumRegister(3, 'q')
rule = [
(CXGate(), [q[2], q[1]], []),
(CCXGate(), [q[0], q[1], q[2]], []),
(CXGate(), [q[2], q[1]], [])
]
for inst in rule:
definition.append(inst)
self.definition = definition
def inverse(self):
"""Return inverse CSwap gate (itself)."""
return CSwapGate() # self-inverse
def to_matrix(self):
"""Return a numpy.array for the Fredkin (CSWAP) gate."""
return numpy.array([[1, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 1]], dtype=complex)
class FredkinGate(CSwapGate, metaclass=CSwapMeta):
"""The deprecated CSwapGate class."""
def __init__(self):
import warnings
warnings.warn('The class FredkinGate is deprecated as of 0.14.0, and '
'will be removed no earlier than 3 months after that release date. '
'You should use the class CSwapGate instead.',
DeprecationWarning, stacklevel=2)
super().__init__()
@deprecate_arguments({'ctl': 'control_qubit',
'tgt1': 'target_qubit1',
'tgt2': 'target_qubit2'})
def cswap(self, control_qubit, target_qubit1, target_qubit2,
*, ctl=None, tgt1=None, tgt2=None): # pylint: disable=unused-argument
"""Apply :class:`~qiskit.extensions.standard.CSwapGate`.
"""
return self.append(CSwapGate(), [control_qubit, target_qubit1, target_qubit2], [])
# support both cswap and fredkin as methods of QuantumCircuit
QuantumCircuit.cswap = cswap
QuantumCircuit.fredkin = cswap
|
the-stack_0_7796 |
import functools
import jaeger_client
import opentracing
from opentracing.propagation import Format
from opentracing_instrumentation import get_current_span, span_in_context
from entityservice.settings import Config as config
from entityservice.utils import load_yaml_config
DEFAULT_TRACER_CONFIG = {'sampler': {'type': 'const', 'param': 1}}
def get_tracer_config(service_name):
if config.TRACING_CONFIG_FILENAME is not None:
tracing_config = load_yaml_config(config.TRACING_CONFIG_FILENAME)
else:
tracing_config = DEFAULT_TRACER_CONFIG
return jaeger_client.Config(config=tracing_config, service_name=service_name)
def initialize_tracer(service_name='api'):
jaeger_config = get_tracer_config(service_name)
# Note this call also sets opentracing.tracer
return jaeger_config.initialize_tracer()
def create_tracer(service_name='worker'):
jaeger_config = get_tracer_config(service_name)
return jaeger_config.new_tracer()
def serialize_span(parent_span):
serialized_span = {}
opentracing.tracer.inject(parent_span, Format.TEXT_MAP, serialized_span)
return serialized_span
def serialize_current_span():
""" get the current span and serialize it."""
return serialize_span(get_current_span())
def deserialize_span_context(serialized_span):
if serialized_span is not None:
span_context = opentracing.tracer.extract(Format.TEXT_MAP, serialized_span)
return span_context
def trace(_func=None, *, span_name=None, args_as_tags=None, parent_span_arg='parent_span'):
"""
decorator to encapsulate a function in a span for tracing.
:param span_name: the operation name to set in the span. If None, then the function name is used
:param args_as_tags: a list of arguments of the function to be added as tags to the span.
:param parent_span_arg: a reference to the parent span. This can either be, a Span, SpanContext, serialized span
context, or None. If None, then the current active span is used as parent
"""
def trace_decorator(func):
with opentracing.tracer.start_span('in deco'):
pass
@functools.wraps(func)
def tracing_wrapper(*args, **kwargs):
op_name = func.__name__ if span_name is None else span_name
args_dict = dict(zip(func.__code__.co_varnames, args))
args_dict.update(kwargs)
parent = args_dict.get(parent_span_arg, None)
try:
parent = deserialize_span_context(parent)
except:
pass
if parent is None:
parent = get_current_span()
with opentracing.tracer.start_span(op_name, child_of=parent) as span:
#add tags
try:
for arg in args_as_tags:
span.set_tag(arg, args_dict.get(arg, None))
except TypeError:
pass
with span_in_context(span):
value = func(*args, **kwargs)
return value
return tracing_wrapper
if _func is None:
return trace_decorator
else:
return trace_decorator(_func)
|
the-stack_0_7797 | # -*- coding: utf-8 -*-
""" OCR VIEWS
BLUEPRINT: ocr_bp
ROUTES FUNCTIONS: ocr, uploaded_file
OTHER FUNCTIONS: allowed_file, tesseract_get_text, get_img_from_url, azure_get_text
"""
from PIL import Image
import requests
from flask import request, Blueprint, render_template, redirect, flash, send_from_directory
from werkzeug.utils import secure_filename
import pytesseract
import shutil
import os
from config import basedir
# Blueprint Configuration
ocr_bp = Blueprint(
'ocr_bp',
__name__,
template_folder='../templates/',
static_folder='../static/'
)
ALLOWED_EXTENSIONS = set(['pdf', 'png', 'jpg', 'jpeg', 'gif', 'svg'])
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
@ocr_bp.route('/ocr', methods=['GET', 'POST'])
def ocr():
import shutil
if request.method == 'POST':
# check if the post request has the file part
if 'file' in request.files:
file = request.files['file']
if file.filename == '':
flash('No selected file')
return redirect(request.url)
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file.save(f'app/ocr/uploads/{filename}')
source = os.path.join(basedir, f'app/ocr/uploads/{filename}')
img = Image.open(source)
ocr_text = tesseract_get_text(img)
ocr_text = ocr_text.split('\n')
return render_template("ocr/ocr.html", source=f"/ocr/uploads/{filename}", ocr_text=ocr_text)
elif "img_url" in request.form:
url = request.form['img_url']
if url == "":
flash('No selected file')
return redirect(request.url)
filename = get_img_from_url(url)
if url and allowed_file(filename):
filename = secure_filename(filename)
source = os.path.join(basedir, f'app/ocr/uploads/{filename}')
img = Image.open(source)
ocr_text = tesseract_get_text(img)
ocr_text = ocr_text.split('\n')
return render_template("ocr/ocr.html", source=url, ocr_text=ocr_text)
else:
flash('No file part')
return redirect(request.url)
return render_template("ocr/ocr.html")
@ocr_bp.route('/ocr/uploads/<filename>')
def uploaded_file(filename):
return send_from_directory('ocr/uploads', filename)
def tesseract_get_text(img):
# custom_config = r'--oem 2 --psm 12'
# result = pytesseract.image_to_string(img, config=custom_config)
result = pytesseract.image_to_string(img)
#with open('text_result.txt', mode ='w') as file:
# file.write(result)
return result
def get_img_from_url(img_url):
# Set up the filename
filename = img_url.split("/")[-1]
r = requests.get(img_url, stream=True)
if r.status_code == 200:
# Set decode_content value to True, otherwise the downloaded image file's size will be zero.
r.raw.decode_content = True
with open(os.path.join(basedir, f'app/ocr/uploads/{filename}'), 'wb') as file:
shutil.copyfileobj(r.raw, file)
return filename
def azure_get_text(url_image):
endpoint = "https://westeurope.api.cognitive.microsoft.com"
subscription_key = "b7b423c4968348568076f549657d6199"
text_recognition_url = endpoint + "/vision/v3.0/ocr"
headers = {"Ocp-Apim-Subscription-Key": subscription_key,
'Content-type': 'application/json'}
# params = {"includeTextDetails": True}
data = {'url': url_image}
response = requests.post(
text_recognition_url, headers=headers, json=data
)
doc = []
for rdrx, region in enumerate(response.json()['regions']):
para = []
for index, line in enumerate(response.json()['regions'][rdrx]['lines']):
text_line = []
for jdex, word in enumerate(line['words']):
text_line.append(line['words'][jdex]['text'])
text_line = ' '.join(text_line)
para.append(text_line)
doc.append(para)
return doc
|
the-stack_0_7799 | from CHRLINE import *
import os, hashlib, hmac, base64, time
import axolotl_curve25519 as Curve25519
from Crypto.Cipher import AES
from Crypto.Util.Padding import pad, unpad
def getSHA256Sum(*args):
instance = hashlib.sha256()
for arg in args:
if isinstance(arg, str):
arg = arg.encode()
instance.update(arg)
return instance.digest()
def get_issued_at() -> bytes:
return base64.b64encode(
f"iat: {int(time.time()) * 60}\n".encode("utf-8")) + b"."
def get_digest(key: bytes, iat: bytes) -> bytes:
return base64.b64encode(hmac.new(key, iat, hashlib.sha1).digest())
def create_token(auth_key: str) -> str:
mid, key = auth_key.partition(":")[::2]
key = base64.b64decode(key.encode("utf-8"))
iat = get_issued_at()
digest = get_digest(key, iat).decode("utf-8")
iat = iat.decode("utf-8")
return mid + ":" + iat + "." + digest
UPDATE_NAME = True
DISPLAY_NAME = "yinmo"
cl = CHRLINE(device="ANDROID", noLogin=True)
session = cl.openPrimarySession()
private_key = Curve25519.generatePrivateKey(os.urandom(32))
public_key = Curve25519.generatePublicKey(private_key)
nonce = os.urandom(16)
b64_private_key = base64.b64encode(private_key)
b64_public_key = base64.b64encode(public_key)
b64_nonce = base64.b64encode(nonce)
print(f"private_key: {b64_private_key}")
print(f"public_key: {b64_public_key}")
print(f"nonce: {b64_nonce}")
print(f"[SESSION] {session}")
info = cl.getCountryInfo(session)
phone = input('input your phone number(0936....): ')
region = input('input phone number region(TW or JP or...): ')
phone2 = cl.getPhoneVerifMethodV2(session, phone, region)
print(f"[PHONE] {phone2[3]}")
print(f"[VerifMethod] {phone2[1]}") # if it is not include number 1, maybe will return error
sendPin = cl.requestToSendPhonePinCode(session, phone, region, phone2[1][0])
print(f"[SEND PIN CODE] {sendPin}")
pin = input('Enter Pin code: ')
verify = cl.verifyPhonePinCode(session, phone, region, pin)
print(f"[VERIFY PIN CODE] {verify}")
if 'error' in verify:
if verify['error']['code'] == 5:
print(f"[HUMAN_VERIFICATION_REQUIRED]")
hv = HumanVerif(verify['error']['metadata'][11][1], verify['error']['metadata'][11][2])
RetryReq(session, hv)
cl.validateProfile(session, 'yinmo')
exchangeEncryptionKey = cl.exchangeEncryptionKey(session, b64_public_key.decode(), b64_nonce.decode(), 1)
print(f'exchangeEncryptionKey: {exchangeEncryptionKey}')
exc_key = base64.b64decode(exchangeEncryptionKey[1])
exc_nonce = base64.b64decode(exchangeEncryptionKey[2])
sign = Curve25519.calculateAgreement(private_key, exc_key)
print(f"sign: {sign}")
password = 'test2021Chrline'
master_key = getSHA256Sum(b'master_key', sign, nonce, exc_nonce)
aes_key = getSHA256Sum(b'aes_key', master_key)
hmac_key = getSHA256Sum(b'hmac_key', master_key)
e1 = AES.new(aes_key[:16], AES.MODE_CBC, aes_key[16:32])
doFinal = e1.encrypt(pad(password.encode(), 16))
hmacd = hmac.new(
hmac_key,
msg=doFinal,
digestmod=hashlib.sha256
).digest()
encPwd = base64.b64encode(doFinal + hmacd).decode()
print(f"[encPwd] {encPwd}")
setPwd = cl.setPassword(session, encPwd, 1)
print(f"[setPassword] {setPwd}")
register = cl.registerPrimaryWithTokenV3(session)
print(f"[REGISTER] {register}")
print(f"---------------------------")
authKey = register[1]
tokenV3IssueResult = register[2]
mid = register[3]
primaryToken = create_token(authKey)
print(f"[AuthKey]: {authKey}")
print(f"[PrimaryToken]: {primaryToken}")
print(f"[UserMid]: {mid}")
print(f"---------------------------")
accessTokenV3 = tokenV3IssueResult[1]
print(f"[accessTokenV3]: {accessTokenV3}")
refreshToken = tokenV3IssueResult[2]
print(f"[refreshToken]: {refreshToken}")
durationUntilRefreshInSec = tokenV3IssueResult[3]
print(f"[durationUntilRefreshInSec]: {durationUntilRefreshInSec}")
refreshApiRetryPolicy = tokenV3IssueResult[4]
loginSessionId = tokenV3IssueResult[5]
print(f"[loginSessionId]: {loginSessionId}")
tokenIssueTimeEpochSec = tokenV3IssueResult[6]
print(f"[tokenIssueTimeEpochSec]: {tokenIssueTimeEpochSec}")
cl = CHRLINE(primaryToken, device="ANDROID") #login
if UPDATE_NAME:
cl.updateProfileAttribute(2, DISPLAY_NAME) #update display name
# for i in range(100):
# accessTokenV3 = cl.refreshAccessToken(refreshToken)
# print(f"[accessTokenV3_2]: {accessTokenV3}") |
the-stack_0_7800 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import re
import unittest
from unittest.mock import MagicMock, PropertyMock, patch
from airflow.providers.apache.hive.transfers.hive_to_mysql import HiveToMySqlOperator
from airflow.utils import timezone
from airflow.utils.operator_helpers import context_to_airflow_vars
from tests.providers.apache.hive import TestHiveEnvironment
from tests.test_utils.mock_hooks import MockHiveServer2Hook, MockMySqlHook
DEFAULT_DATE = timezone.datetime(2015, 1, 1)
class TestHiveToMySqlTransfer(TestHiveEnvironment):
def setUp(self):
self.kwargs = dict(
sql='sql',
mysql_table='table',
hiveserver2_conn_id='hiveserver2_default',
mysql_conn_id='mysql_default',
task_id='test_hive_to_mysql',
)
super().setUp()
@patch('airflow.providers.apache.hive.transfers.hive_to_mysql.MySqlHook')
@patch('airflow.providers.apache.hive.transfers.hive_to_mysql.HiveServer2Hook')
def test_execute(self, mock_hive_hook, mock_mysql_hook):
HiveToMySqlOperator(**self.kwargs).execute(context={})
mock_hive_hook.assert_called_once_with(hiveserver2_conn_id=self.kwargs['hiveserver2_conn_id'])
mock_hive_hook.return_value.get_records.assert_called_once_with('sql', hive_conf={})
mock_mysql_hook.assert_called_once_with(mysql_conn_id=self.kwargs['mysql_conn_id'])
mock_mysql_hook.return_value.insert_rows.assert_called_once_with(
table=self.kwargs['mysql_table'],
rows=mock_hive_hook.return_value.get_records.return_value
)
@patch('airflow.providers.apache.hive.transfers.hive_to_mysql.MySqlHook')
@patch('airflow.providers.apache.hive.transfers.hive_to_mysql.HiveServer2Hook')
def test_execute_mysql_preoperator(self, mock_hive_hook, mock_mysql_hook):
self.kwargs.update(dict(mysql_preoperator='preoperator'))
HiveToMySqlOperator(**self.kwargs).execute(context={})
mock_mysql_hook.return_value.run.assert_called_once_with(self.kwargs['mysql_preoperator'])
@patch('airflow.providers.apache.hive.transfers.hive_to_mysql.MySqlHook')
@patch('airflow.providers.apache.hive.transfers.hive_to_mysql.HiveServer2Hook')
def test_execute_with_mysql_postoperator(self, mock_hive_hook, mock_mysql_hook):
self.kwargs.update(dict(mysql_postoperator='postoperator'))
HiveToMySqlOperator(**self.kwargs).execute(context={})
mock_mysql_hook.return_value.run.assert_called_once_with(self.kwargs['mysql_postoperator'])
@patch('airflow.providers.apache.hive.transfers.hive_to_mysql.MySqlHook')
@patch('airflow.providers.apache.hive.transfers.hive_to_mysql.NamedTemporaryFile')
@patch('airflow.providers.apache.hive.transfers.hive_to_mysql.HiveServer2Hook')
def test_execute_bulk_load(self, mock_hive_hook, mock_tmp_file, mock_mysql_hook):
type(mock_tmp_file).name = PropertyMock(return_value='tmp_file')
context = {}
self.kwargs.update(dict(bulk_load=True))
HiveToMySqlOperator(**self.kwargs).execute(context=context)
mock_tmp_file.assert_called_once_with()
mock_hive_hook.return_value.to_csv.assert_called_once_with(
self.kwargs['sql'],
mock_tmp_file.return_value.name,
delimiter='\t',
lineterminator='\n',
output_header=False,
hive_conf=context_to_airflow_vars(context)
)
mock_mysql_hook.return_value.bulk_load.assert_called_once_with(
table=self.kwargs['mysql_table'],
tmp_file=mock_tmp_file.return_value.name
)
mock_tmp_file.return_value.close.assert_called_once_with()
@patch('airflow.providers.apache.hive.transfers.hive_to_mysql.MySqlHook')
def test_execute_with_hive_conf(self, mock_mysql_hook):
context = {}
mock_hive_hook = MockHiveServer2Hook()
mock_hive_hook.get_records = MagicMock(return_value='test_hive_results')
self.kwargs.update(dict(hive_conf={'mapreduce.job.queuename': 'fake_queue'}))
with patch('airflow.providers.apache.hive.transfers.hive_to_mysql.HiveServer2Hook',
return_value=mock_hive_hook):
HiveToMySqlOperator(**self.kwargs).execute(context=context)
hive_conf = context_to_airflow_vars(context)
hive_conf.update(self.kwargs['hive_conf'])
mock_hive_hook.get_records.assert_called_once_with(
self.kwargs['sql'],
hive_conf=hive_conf
)
@unittest.skipIf(
'AIRFLOW_RUNALL_TESTS' not in os.environ,
"Skipped because AIRFLOW_RUNALL_TESTS is not set")
def test_hive_to_mysql(self):
test_hive_results = 'test_hive_results'
mock_hive_hook = MockHiveServer2Hook()
mock_hive_hook.get_records = MagicMock(return_value=test_hive_results)
mock_mysql_hook = MockMySqlHook()
mock_mysql_hook.run = MagicMock()
mock_mysql_hook.insert_rows = MagicMock()
with patch('airflow.providers.apache.hive.transfers.hive_to_mysql.HiveServer2Hook',
return_value=mock_hive_hook):
with patch('airflow.providers.apache.hive.transfers.hive_to_mysql.MySqlHook',
return_value=mock_mysql_hook):
op = HiveToMySqlOperator(
mysql_conn_id='airflow_db',
task_id='hive_to_mysql_check',
sql="""
SELECT name
FROM airflow.static_babynames
LIMIT 100
""",
mysql_table='test_static_babynames',
mysql_preoperator=[
'DROP TABLE IF EXISTS test_static_babynames;',
'CREATE TABLE test_static_babynames (name VARCHAR(500))',
],
dag=self.dag)
op.clear(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
ignore_ti_state=True)
raw_select_name_query = mock_hive_hook.get_records.call_args_list[0][0][0]
actual_select_name_query = re.sub(r'\s{2,}', ' ', raw_select_name_query).strip()
expected_select_name_query = 'SELECT name FROM airflow.static_babynames LIMIT 100'
self.assertEqual(expected_select_name_query, actual_select_name_query)
actual_hive_conf = mock_hive_hook.get_records.call_args_list[0][1]['hive_conf']
expected_hive_conf = {'airflow.ctx.dag_owner': 'airflow',
'airflow.ctx.dag_id': 'test_dag_id',
'airflow.ctx.task_id': 'hive_to_mysql_check',
'airflow.ctx.execution_date': '2015-01-01T00:00:00+00:00'}
self.assertEqual(expected_hive_conf, actual_hive_conf)
expected_mysql_preoperator = ['DROP TABLE IF EXISTS test_static_babynames;',
'CREATE TABLE test_static_babynames (name VARCHAR(500))']
mock_mysql_hook.run.assert_called_with(expected_mysql_preoperator)
mock_mysql_hook.insert_rows.assert_called_with(table='test_static_babynames', rows=test_hive_results)
|
the-stack_0_7802 | import scrapy
class SpiderSauraus(scrapy.Spider):
name = 'spidersauraus'
start_urls = ['https://en.wikipedia.org/wiki/List_of_dinosaur_genera']
def parse(self, response):
filename = 'dinosaurs.txt'
dinos = set()
count = 0
with open(filename, 'w') as f:
for dino in response.css('ul>li'):
dino_name = dino.css('i > a ::text').extract_first()
if dino_name != None:
dinos.add(dino_name)
if (count+1) == len(dinos):
f.write(dino_name)
f.write('\n')
count += 1
print ('{} Dinosaurs found!'.format(count))
|
the-stack_0_7803 | ##############################################################################
#
# Copyright (c) 2006 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Bootstrap a buildout-based project
Simply run this script in a directory containing a buildout.cfg.
The script accepts buildout command-line options, so you can
use the -c option to specify an alternate configuration file.
"""
import os
import shutil
import sys
import tempfile
from optparse import OptionParser
tmpeggs = tempfile.mkdtemp()
usage = '''\
[DESIRED PYTHON FOR BUILDOUT] bootstrap.py [options]
Bootstraps a buildout-based project.
Simply run this script in a directory containing a buildout.cfg, using the
Python that you want bin/buildout to use.
Note that by using --find-links to point to local resources, you can keep
this script from going over the network.
'''
parser = OptionParser(usage=usage)
parser.add_option("-v", "--version", help="use a specific zc.buildout version")
parser.add_option("-t", "--accept-buildout-test-releases",
dest='accept_buildout_test_releases',
action="store_true", default=False,
help=("Normally, if you do not specify a --version, the "
"bootstrap script and buildout gets the newest "
"*final* versions of zc.buildout and its recipes and "
"extensions for you. If you use this flag, "
"bootstrap and buildout will get the newest releases "
"even if they are alphas or betas."))
parser.add_option("-c", "--config-file",
help=("Specify the path to the buildout configuration "
"file to be used."))
parser.add_option("-f", "--find-links",
help=("Specify a URL to search for buildout releases"))
options, args = parser.parse_args()
######################################################################
# load/install setuptools
to_reload = False
try:
import pkg_resources
import setuptools
except ImportError:
ez = {}
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
# XXX use a more permanent ez_setup.py URL when available.
exec(urlopen('http://bitbucket.org/pypa/setuptools/raw/0.7.2/ez_setup.py'
).read(), ez)
setup_args = dict(to_dir=tmpeggs, download_delay=0)
ez['use_setuptools'](**setup_args)
if to_reload:
reload(pkg_resources)
import pkg_resources
# This does not (always?) update the default working set. We will
# do it.
for path in sys.path:
if path not in pkg_resources.working_set.entries:
pkg_resources.working_set.add_entry(path)
######################################################################
# Install buildout
ws = pkg_resources.working_set
cmd = [sys.executable, '-c',
'from setuptools.command.easy_install import main; main()',
'-mZqNxd', tmpeggs]
find_links = os.environ.get(
'bootstrap-testing-find-links',
options.find_links or
('http://downloads.buildout.org/'
if options.accept_buildout_test_releases else None)
)
if find_links:
cmd.extend(['-f', find_links])
setuptools_path = ws.find(
pkg_resources.Requirement.parse('setuptools')).location
requirement = 'zc.buildout'
version = options.version
if version is None and not options.accept_buildout_test_releases:
# Figure out the most recent final version of zc.buildout.
import setuptools.package_index
_final_parts = '*final-', '*final'
def _final_version(parsed_version):
for part in parsed_version:
if (part[:1] == '*') and (part not in _final_parts):
return False
return True
index = setuptools.package_index.PackageIndex(
search_path=[setuptools_path])
if find_links:
index.add_find_links((find_links,))
req = pkg_resources.Requirement.parse(requirement)
if index.obtain(req) is not None:
best = []
bestv = None
for dist in index[req.project_name]:
distv = dist.parsed_version
if _final_version(distv):
if bestv is None or distv > bestv:
best = [dist]
bestv = distv
elif distv == bestv:
best.append(dist)
if best:
best.sort()
version = best[-1].version
if version:
requirement = '=='.join((requirement, version))
cmd.append(requirement)
import subprocess
if subprocess.call(cmd, env=dict(os.environ, PYTHONPATH=setuptools_path)) != 0:
raise Exception(
"Failed to execute command:\n%s",
repr(cmd)[1:-1])
######################################################################
# Import and run buildout
ws.add_entry(tmpeggs)
ws.require(requirement)
import zc.buildout.buildout
if not [a for a in args if '=' not in a]:
args.append('bootstrap')
# if -c was provided, we push it back into args for buildout' main function
if options.config_file is not None:
args[0:0] = ['-c', options.config_file]
zc.buildout.buildout.main(args)
shutil.rmtree(tmpeggs)
|
the-stack_0_7805 | """
Tests for exam models
"""
from unittest import TestCase
from exams.models import (
ExamAuthorization,
ExamProfile,
)
class ExamProfileTest(TestCase):
"""Tests for ExamProfiles"""
def test_exam_profile_str(self):
"""
Test method ExamProfile.__str__ prints correctly
"""
ep = ExamProfile(id=1, status=ExamProfile.PROFILE_IN_PROGRESS)
assert str(ep) == 'Exam Profile "1" with status "in-progress"'
class ExamAuthorizationTest(TestCase):
"""Tests for ExamAuthorizationss"""
def test_exam_authorization_str(self):
"""
Test method ExamAuthorization.__str__ prints correctly
"""
auth = ExamAuthorization(id=1, status=ExamProfile.PROFILE_IN_PROGRESS, user_id=2)
assert str(auth) == 'Exam Authorization "1" with status "in-progress" for user 2'
|
the-stack_0_7808 | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Builds the CIFAR-10 network with additional variables to support pruning.
Summary of available functions:
# Compute input images and labels for training. If you would like to run
# evaluations, use inputs() instead.
inputs, labels = distorted_inputs()
# Compute inference on the model inputs to make a prediction.
predictions = inference(inputs)
# Compute the total loss of the prediction with respect to the labels.
loss = loss(predictions, labels)
# Create a graph to run one step of training with respect to the loss.
train_op = train(loss, global_step)
"""
# pylint: disable=missing-docstring
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import re
import sys
import tarfile
from six.moves import urllib
import tensorflow.compat.v1 as tf
from model_pruning.examples.cifar10 import cifar10_input
from model_pruning.python import pruning_interface
# Global constants describing the CIFAR-10 data set.
IMAGE_SIZE = cifar10_input.IMAGE_SIZE
NUM_CLASSES = cifar10_input.NUM_CLASSES
NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = cifar10_input.NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN # pylint: disable=line-too-long
NUM_EXAMPLES_PER_EPOCH_FOR_EVAL = cifar10_input.NUM_EXAMPLES_PER_EPOCH_FOR_EVAL
BATCH_SIZE = 128
DATA_DIR = '/tmp/cifar10_data'
# Constants describing the training process.
MOVING_AVERAGE_DECAY = 0.9999 # The decay to use for the moving average.
NUM_EPOCHS_PER_DECAY = 350.0 # Epochs after which learning rate decays.
LEARNING_RATE_DECAY_FACTOR = 0.1 # Learning rate decay factor.
INITIAL_LEARNING_RATE = 0.1 # Initial learning rate.
# If a model is trained with multiple GPUs, prefix all Op names with tower_name
# to differentiate the operations. Note that this prefix is removed from the
# names of the summaries when visualizing a model.
TOWER_NAME = 'tower'
DATA_URL = 'http://www.cs.toronto.edu/~kriz/cifar-10-binary.tar.gz'
def _activation_summary(x):
"""Helper to create summaries for activations.
Creates a summary that provides a histogram of activations.
Creates a summary that measures the sparsity of activations.
Args:
x: Tensor
Returns:
nothing
"""
# Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
# session. This helps the clarity of presentation on tensorboard.
tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name)
tf.summary.histogram(tensor_name + '/activations', x)
tf.summary.scalar(tensor_name + '/sparsity', tf.nn.zero_fraction(x))
def _variable_on_cpu(name, shape, initializer):
"""Helper to create a Variable stored on CPU memory.
Args:
name: name of the variable
shape: list of ints
initializer: initializer for Variable
Returns:
Variable Tensor
"""
with tf.device('/cpu:0'):
dtype = tf.float32
var = tf.get_variable(name, shape, initializer=initializer, dtype=dtype)
return var
def _variable_with_weight_decay(name, shape, stddev, wd):
"""Helper to create an initialized Variable with weight decay.
Note that the Variable is initialized with a truncated normal distribution.
A weight decay is added only if one is specified.
Args:
name: name of the variable
shape: list of ints
stddev: standard deviation of a truncated Gaussian
wd: add L2Loss weight decay multiplied by this float. If None, weight decay
is not added for this Variable.
Returns:
Variable Tensor
"""
dtype = tf.float32
var = _variable_on_cpu(
name, shape, tf.truncated_normal_initializer(stddev=stddev, dtype=dtype))
if wd is not None:
weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
tf.add_to_collection('losses', weight_decay)
return var
def distorted_inputs():
"""Construct distorted input for CIFAR training using the Reader ops.
Returns:
images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
Raises:
ValueError: If no data_dir
"""
if not DATA_DIR:
raise ValueError('Please supply a data_dir')
data_dir = os.path.join(DATA_DIR, 'cifar-10-batches-bin')
images, labels = cifar10_input.distorted_inputs(
data_dir=data_dir, batch_size=BATCH_SIZE)
return images, labels
def inputs(eval_data):
"""Construct input for CIFAR evaluation using the Reader ops.
Args:
eval_data: bool, indicating if one should use the train or eval data set.
Returns:
images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
Raises:
ValueError: If no data_dir
"""
if not DATA_DIR:
raise ValueError('Please supply a data_dir')
data_dir = os.path.join(DATA_DIR, 'cifar-10-batches-bin')
images, labels = cifar10_input.inputs(
eval_data=eval_data, data_dir=data_dir, batch_size=BATCH_SIZE)
return images, labels
def inference(images, matrix_compression_obj):
"""Build the CIFAR-10 model.
Args:
images: Images returned from distorted_inputs() or inputs().
matrix_compression_obj: A Pruning or
compression_lib.compression_op.ApplyCompression object.
Returns:
Logits.
"""
# We instantiate all variables using tf.compat.v1.get_variable() instead of
# tf.Variable() in order to share variables across multiple GPU training runs.
# If we only ran this model on a single GPU, we could simplify this function
# by replacing all instances of tf.compat.v1.get_variable() with
# tf.Variable().
#
# While instantiating conv and local layers, we add mask and threshold
# variables to the layer by calling the
# pruning_interface.apply_matrix_compression function.
# Note that the masks are applied only to the weight tensors
# conv1
with tf.variable_scope('conv1') as scope:
kernel = _variable_with_weight_decay(
'weights', shape=[5, 5, 3, 64], stddev=5e-2, wd=0.0)
conv = tf.nn.conv2d(
images, kernel,
[1, 1, 1, 1], padding='SAME')
biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.0))
pre_activation = tf.nn.bias_add(conv, biases)
conv1 = tf.nn.relu(pre_activation, name=scope.name)
_activation_summary(conv1)
# pool1
pool1 = tf.nn.max_pool(
conv1,
ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1],
padding='SAME',
name='pool1')
# norm1
norm1 = tf.nn.lrn(
pool1, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name='norm1')
# conv2
with tf.variable_scope('conv2') as scope:
kernel = _variable_with_weight_decay(
'weights', shape=[5, 5, 64, 64], stddev=5e-2, wd=0.0)
conv = tf.nn.conv2d(
norm1, kernel, [1, 1, 1, 1], padding='SAME')
biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.1))
pre_activation = tf.nn.bias_add(conv, biases)
conv2 = tf.nn.relu(pre_activation, name=scope.name)
_activation_summary(conv2)
# norm2
norm2 = tf.nn.lrn(
conv2, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name='norm2')
# pool2
pool2 = tf.nn.max_pool(
norm2,
ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1],
padding='SAME',
name='pool2')
# local3
with tf.variable_scope('local3') as scope:
# Move everything into depth so we can perform a single matrix multiply.
reshape = tf.reshape(pool2, [BATCH_SIZE, -1])
dim = reshape.get_shape()[1].value
weights = _variable_with_weight_decay(
'weights', shape=[dim, 384], stddev=0.04, wd=0.004)
biases = _variable_on_cpu('biases', [384], tf.constant_initializer(0.1))
local3 = tf.nn.relu(
tf.matmul(reshape, pruning_interface.apply_matrix_compression(
matrix_compression_obj, weights, scope)) + biases,
name=scope.name)
_activation_summary(local3)
# local4
with tf.variable_scope('local4') as scope:
weights = _variable_with_weight_decay(
'weights', shape=[384, 192], stddev=0.04, wd=0.004)
biases = _variable_on_cpu('biases', [192], tf.constant_initializer(0.1))
local4 = tf.nn.relu(
tf.matmul(local3, pruning_interface.apply_matrix_compression(
matrix_compression_obj, weights, scope)) + biases,
name=scope.name)
_activation_summary(local4)
# linear layer(WX + b),
# We don't apply softmax here because
# tf.nn.sparse_softmax_cross_entropy_with_logits accepts the unscaled logits
# and performs the softmax internally for efficiency.
with tf.variable_scope('softmax_linear') as scope:
weights = _variable_with_weight_decay(
'weights', [192, NUM_CLASSES], stddev=1 / 192.0, wd=0.0)
biases = _variable_on_cpu('biases', [NUM_CLASSES],
tf.constant_initializer(0.0))
softmax_linear = tf.add(
tf.matmul(local4, pruning_interface.apply_matrix_compression(
matrix_compression_obj, weights, scope)),
biases,
name=scope.name)
_activation_summary(softmax_linear)
return softmax_linear
def loss(logits, labels):
"""Add L2Loss to all the trainable variables.
Add summary for "Loss" and "Loss/avg".
Args:
logits: Logits from inference().
labels: Labels from distorted_inputs or inputs(). 1-D tensor of shape
[batch_size]
Returns:
Loss tensor of type float.
"""
# Calculate the average cross entropy loss across the batch.
labels = tf.cast(labels, tf.int64)
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=logits, name='cross_entropy_per_example')
cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
tf.add_to_collection('losses', cross_entropy_mean)
# The total loss is defined as the cross entropy loss plus all of the weight
# decay terms (L2 loss).
return tf.add_n(tf.get_collection('losses'), name='total_loss')
def _add_loss_summaries(total_loss):
"""Add summaries for losses in CIFAR-10 model.
Generates moving average for all losses and associated summaries for
visualizing the performance of the network.
Args:
total_loss: Total loss from loss().
Returns:
loss_averages_op: op for generating moving averages of losses.
"""
# Compute the moving average of all individual losses and the total loss.
loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
losses = tf.get_collection('losses')
loss_averages_op = loss_averages.apply(losses + [total_loss])
# Attach a scalar summary to all individual losses and the total loss; do the
# same for the averaged version of the losses.
for l in losses + [total_loss]:
# Name each loss as '(raw)' and name the moving average version of the loss
# as the original loss name.
tf.summary.scalar(l.op.name + ' (raw)', l)
tf.summary.scalar(l.op.name, loss_averages.average(l))
return loss_averages_op
def train(total_loss, global_step):
"""Train CIFAR-10 model.
Create an optimizer and apply to all trainable variables. Add moving
average for all trainable variables.
Args:
total_loss: Total loss from loss().
global_step: Integer Variable counting the number of training steps
processed.
Returns:
train_op: op for training.
"""
# Variables that affect learning rate.
num_batches_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN / BATCH_SIZE
decay_steps = int(num_batches_per_epoch * NUM_EPOCHS_PER_DECAY)
# Decay the learning rate exponentially based on the number of steps.
lr = tf.train.exponential_decay(
INITIAL_LEARNING_RATE,
global_step,
decay_steps,
LEARNING_RATE_DECAY_FACTOR,
staircase=True)
tf.summary.scalar('learning_rate', lr)
# Generate moving averages of all losses and associated summaries.
loss_averages_op = _add_loss_summaries(total_loss)
# Compute gradients.
with tf.control_dependencies([loss_averages_op]):
opt = tf.train.GradientDescentOptimizer(lr)
grads = opt.compute_gradients(total_loss)
# Apply gradients.
apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)
# Add histograms for trainable variables.
for var in tf.trainable_variables():
tf.summary.histogram(var.op.name, var)
# Add histograms for gradients.
for grad, var in grads:
if grad is not None:
tf.summary.histogram(var.op.name + '/gradients', grad)
# Track the moving averages of all trainable variables.
variable_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY,
global_step)
variables_averages_op = variable_averages.apply(tf.trainable_variables())
with tf.control_dependencies([apply_gradient_op, variables_averages_op]):
train_op = tf.no_op(name='train')
return train_op
def maybe_download_and_extract():
"""Download and extract the tarball from Alex's website."""
dest_directory = DATA_DIR
if not os.path.exists(dest_directory):
os.makedirs(dest_directory)
filename = DATA_URL.split('/')[-1]
filepath = os.path.join(dest_directory, filename)
if not os.path.exists(filepath):
def _progress(count, block_size, total_size):
sys.stdout.write(
'\r>> Downloading %s %.1f%%' %
(filename, float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress)
print()
statinfo = os.stat(filepath)
print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')
tarfile.open(filepath, 'r:gz').extractall(dest_directory)
|
the-stack_0_7809 | #!/usr/bin/python3 -OO
# Copyright 2007-2019 The SABnzbd-Team <[email protected]>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
tests.test_newsunpack - Tests of various functions in newspack
"""
import pytest
from sabnzbd.newsunpack import *
class TestNewsUnpack:
@pytest.mark.parametrize(
"test_input, expected_output",
[
(["cmd1", 9, "cmd3"], '"cmd1" "9" "cmd3"'), # sending all commands as valid string
(["", "cmd1", "5"], '"" "cmd1" "5"'), # sending blank string
(["cmd1", None, "cmd3", "tail -f"], '"cmd1" "" "cmd3" "tail -f"'), # sending None in command
(["cmd1", 0, "ps ux"], '"cmd1" "" "ps ux"'), # sending 0
],
)
def test_list_to_cmd(self, test_input, expected_output):
""" Test to convert list to a cmd.exe-compatible command string """
res = list2cmdline(test_input)
# Make sure the output is cmd.exe-compatible
assert res == expected_output
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.