content
stringlengths 5
1.05M
|
---|
"""
A demo of GMM (eventually versus DPMM) clustering of hand drawn digits data
"""
print(__doc__)
from time import time
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn import metrics
from sklearn.mixture import GMM
from sklearn.datasets import load_digits
from sklearn.decomposition import PCA
from sklearn.preprocessing import scale
np.random.seed(42)
digits = load_digits()
data = scale(digits.data)
n_samples, n_features = data.shape
n_digits = len(np.unique(digits.target))
labels = digits.target
sample_size = 300
def make_ellipses(gmm, ax, n_components):
for n in xrange(n_components):
v, w = np.linalg.eigh(gmm._get_covars()[n][:2, :2])
u = w[0] / np.linalg.norm(w[0])
angle = np.arctan2(u[1], u[0])
angle = 180 * angle / np.pi # convert to degrees
v *= 9
ell = mpl.patches.Ellipse(gmm.means_[n, :2], v[0], v[1],
180 + angle, color=plt.cm.spectral(n / 10.))
ell.set_clip_box(ax.bbox)
ell.set_alpha(0.5)
ax.add_artist(ell)
print("n_digits: %d, \t n_samples %d, \t n_features %d"
% (n_digits, n_samples, n_features))
print(79 * '_')
print('% 9s' % 'init'
' time inertia homo compl v-meas ARI AMI silhouette')
def bench_gmm(estimator, name, data):
t0 = time()
estimator.fit(data)
logprob = estimator.score(data)
predicted_labels = estimator.predict(data)
print('% 9s %.2fs %i %.3f %.3f %.3f %.3f %.3f %.3f'
% (name, (time() - t0), logprob.sum(),
metrics.homogeneity_score(labels, predicted_labels),
metrics.completeness_score(labels, predicted_labels),
metrics.v_measure_score(labels, predicted_labels),
metrics.adjusted_rand_score(labels, predicted_labels),
metrics.adjusted_mutual_info_score(labels, predicted_labels),
metrics.silhouette_score(data, predicted_labels,
metric='euclidean',
sample_size=sample_size)))
#bench_gmm(GMM(n_components=n_digits, covariance_type='tied', init_params='wmc', n_init=20, n_iter=100),
#name="GMM tied", data=data)
#bench_gmm(GMM(n_components=n_digits, covariance_type='full', init_params='wmc', n_init=20, n_iter=100),
#name="GMM full", data=data)
print(79 * '_')
###############################################################################
# Visualize the results on PCA-reduced data using colours and ellipses
reduced_data = PCA(n_components=2).fit_transform(data)
gmm = GMM(n_components=n_digits, covariance_type='full', init_params = 'wc', n_init=20, n_iter=100)
# Cheat by initializing the means to the means of the labled data points
gmm.means_ = np.array([reduced_data[digits.target == i].mean(axis=0)
for i in xrange(n_digits)])
t0 = time()
gmm.fit(reduced_data)
print("Model fitting done in %.3f" % (time() - t0))
plt.figure(1)
plt.clf()
h = plt.subplot(1,1,1)
make_ellipses(gmm,h,n_digits)
for n in range(n_digits):
digit_data = reduced_data[digits.target == n]
plt.scatter(digit_data[:, 0], digit_data[:, 1], 0.8, color=plt.cm.spectral(n / 10.),
label=digits.target_names[n])
# Plot the means as a white X
centroids = gmm.means_
plt.scatter(centroids[:, 0], centroids[:, 1],
marker='x', s=169, linewidths=3,
color='w', zorder=10)
plt.title('Gaussian Mixture Model clustering on the digits dataset (PCA-reduced data)\n'
'Means are marked with white cross')
plt.xticks(())
plt.yticks(())
plt.show()
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core
from hypothesis import given
from hypothesis import strategies as st
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
import numpy as np
import unittest
class TestMathOps(serial.SerializedTestCase):
@given(X=hu.tensor(),
exponent=st.floats(min_value=2.0, max_value=3.0),
**hu.gcs)
def test_elementwise_power(self, X, exponent, gc, dc):
def powf(X):
return (X ** exponent,)
def powf_grad(g_out, outputs, fwd_inputs):
return (exponent * (fwd_inputs[0] ** (exponent - 1)) * g_out,)
op = core.CreateOperator(
"Pow", ["X"], ["Y"], exponent=exponent)
self.assertReferenceChecks(gc, op, [X], powf,
output_to_grad="Y",
grad_reference=powf_grad),
@serial.given(X=hu.tensor(),
exponent=st.floats(min_value=-3.0, max_value=3.0),
**hu.gcs)
def test_sign(self, X, exponent, gc, dc):
def signf(X):
return [np.sign(X)]
op = core.CreateOperator(
"Sign", ["X"], ["Y"])
self.assertReferenceChecks(gc, op, [X], signf),
self.assertDeviceChecks(dc, op, [X], [0])
if __name__ == "__main__":
unittest.main()
|
class Solution:
"""
Clarifications:
1. The division (/) corresponds to the floor division (//)
2. The integers could have multiple digits
3. The operators precedence is: (/,*), (+,-), from left to right
Intuition:
1. Extract expressions terms one by one
2. Compute 1st.: (*, /) and then (+,-)
3. To compute *,/ operators use a stack
4. To compute +,- operators use a queue (because of left, right precedence)
E.g. 4 * 3 + 20 / 2:
_Operation: * /|__Stack of Nbrs__|__Stack of Operators__
1.Extract: 4 | None | None
2.Push(4) | 4 | None
3.Extract: * | 4 | None
4.Extract: 3 | 4 | None
5.Pop: 4 | 4 | None
6.Push: 4 * 3 | 12 | None
7.Extract: + | 12 | None
8.Push: + | 12 | +
9.Push: + | 12 | +
10.Extract: 20 | 12 | +
11.Push: 20 | 12 20 | +
12.Extract: / | 12 20 | +
13.Extract: 2 | 12 20 | +
14.Pop: 20 | 12 | +
15.Push: 20 / 2| 12 10 | +
_Operation: + -|__Stack of Nbrs__|__Stack of Operators__
16.Dequeue a=12| 10 | +
17.Dequeue + | 10 | None
18.Dequeue 10 | None |
19.a +=10 | None |
19.Return a=22 | None |
E.g. 1-1+1:
_Operation: * /|__Stack of Nbrs__|__Stack of Operators__
1.Extract: 1 | None | None
2.Push(1) | 1 | None
3.Extract: - | 1 | None
4.Push(-) | 1 | -
5.Extract: 1 | 1 | -
6.Push(1) | 1 1 | -
7.Extract: + | 1 1 | -
8.Push(+) | 1 1 | - +
9.Extract(1) | 1 1 1 | - +
_Operation: + -|__Stack of Nbrs__|__Stack of Operators__
10.Dequeue a=1 | 1 1 | - +
11.Dequeue - | 1 1 | +
12.Dequeue 1 | 1 | +
13.a -= 1 = 0 | 1 | +
14.Dequeue + | 1 | None
15.Dequeue 1 | None | None
16.a += 1 | None | None
17. Return a=1 | None | None
Time and Space Analysis:
Time Complexity: O(|s|)
Space Complexity: O(|s|)
"""
def calculate(self, s: str) -> int:
# 1. Computes * and /
nbr_stack = []
opr_stack = []
len_s = len(s)
i, a = self._extract_term(s, 0)
nbr_stack.append(int(a))
while i < len_s:
i, opr = self._extract_term(s, i)
if opr in '-+':
opr_stack.append(opr)
i, a = self._extract_term(s, i)
nbr_stack.append(int(a))
else:
a = nbr_stack.pop()
i, b = self._extract_term(s, i)
if opr == '/':
nbr_stack.append(a //int(b))
else:
nbr_stack.append(a * int(b))
# 2. Compute + and -
opr_idx = 0
nbr_idx = 1
a = nbr_stack[0]
while nbr_idx < len(nbr_stack) and opr_idx < len(opr_stack):
if opr_stack[opr_idx] == '+':
a += nbr_stack[nbr_idx]
else:
a -= nbr_stack[nbr_idx]
nbr_idx += 1
opr_idx += 1
return a
def _extract_term(self, s: str, start: int) -> (int, str):
s_len = len(s)
# 1. Skip spaces
while start < s_len and s[start] == ' ':
start += 1
if start == s_len:
return (start, '')
# 2. Extract an operator
if s[start] in '-+*/':
return (start + 1, s[start])
# 3. Extract a number
i = start
while i < s_len and not s[i] in '-+*/':
i += 1
return (i, s[start:i]) |
from pydantic import BaseModel
class ExampleOut(BaseModel):
name: str
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: Ampel-ZTF/ampel/template/ZTFLegacyChannelTemplate.py
# License: BSD-3-Clause
# Author: valery brinnel <[email protected]>
# Date: 16.10.2019
# Last Modified Date: 30.05.2021
# Last Modified By: valery brinnel <[email protected]>
from typing import Any, ClassVar
from ampel.log.AmpelLogger import AmpelLogger
from ampel.config.builder.FirstPassConfig import FirstPassConfig
from ampel.template.AbsEasyChannelTemplate import AbsEasyChannelTemplate
from ampel.model.ingest.T2Compute import T2Compute
class ZTFLegacyChannelTemplate(AbsEasyChannelTemplate):
"""
Channel template for ZTF. Each of the named variants consumes adifferent
alert streams from IPAC, and produce stocks with a different set of tags:
============== ============== ========================
Template ZTF programids Tags
============== ============== ========================
ztf_uw_private 1, 2, 3_public ZTF, ZTF_PUB, ZTF_PRIV
ztf_uw_public 1, 3_public ZTF, ZTF_PUB
============== ============== ========================
"""
# static variables (ClassVar type) are ignored by pydantic
_access: ClassVar[dict[str, list[str]]] = {
"ztf_uw_private": ["ZTF", "ZTF_PUB", "ZTF_PRIV"],
"ztf_uw_public": ["ZTF", "ZTF_PUB"],
"ztf_uw_caltech": ["ZTF", "ZTF_PUB"],
}
auto_complete: Any = False
#: include all previously ingested photopoints in emitted states
live_history: bool = True
#: include X days of archival datapoints in emitted states
archive_history: None | int = None
# Mandatory implementation
def get_channel(self, logger: AmpelLogger) -> dict[str, Any]:
assert self.template is not None
return {
**super().get_channel(logger),
"access": self.__class__._access[self.template],
}
# Mandatory implementation
def get_processes(
self, logger: AmpelLogger, first_pass_config: FirstPassConfig
) -> list[dict[str, Any]]:
# T3 processes
ret: list[dict[str, Any]] = []
for index, el in enumerate(self.t3_supervise):
# populate name and tier if unset
name = el.get("name", f"summary_{index:02d}")
process_name = f"{self.channel}|T3|{name}"
ret.append(
self.transfer_channel_parameters(el | {"name": process_name, "tier": 3})
)
if not any(model.unit == "T2LightCurveSummary" for model in self.t2_compute):
self.t2_compute.append(T2Compute(unit="T2LightCurveSummary"))
mongo_muxer = {"unit": "ZiMongoMuxer"} if self.live_history else None
archive_muxer = (
{"unit": "ZiArchiveMuxer", "config": {"history_days": self.archive_history}}
if self.archive_history is not None
else None
)
if mongo_muxer and archive_muxer:
muxer: None | dict[str,Any] = {
"unit": "ChainedT0Muxer",
"config": {"muxers": [mongo_muxer, archive_muxer]},
}
elif mongo_muxer:
muxer = mongo_muxer
elif archive_muxer:
muxer = archive_muxer
else:
muxer = None
supplier = {
"unit": "ZiAlertSupplier",
"config": {
"loader": {
"unit": "UWAlertLoader",
"config": {
**first_pass_config["resource"]["ampel-ztf/kafka"],
**{"stream": self.template},
},
}
}
}
ret.insert(
0,
self.craft_t0_process(
first_pass_config,
controller="ZTFAlertStreamController",
supplier=supplier,
shaper="ZiDataPointShaper",
muxer=muxer,
combiner={"unit": "ZiT1Combiner", "config": {"access": self.access, "policy": self.policy}},
),
)
return ret
|
n = int(input())
for i in range(n):
b, p = input().split()
b, p = int(b), float(p)
max_time = p / (b - 1)
min_time = p / (b + 1)
min_abpm = 60 / max_time
max_abpm = 60 / min_time
bpm = (60 * b) / p
print(min_abpm, bpm, max_abpm) |
import builtins
from infra.userportal.functions.topology import RivUserPortalFunctionSet
from infra.userportal.states.interfaces import RivStateMachineConstruct
from infra.interfaces import IVpcRivStack
from constructs import Construct
from aws_cdk import (
aws_stepfunctions as sf,
aws_stepfunctions_tasks as sft,
)
class RegisterStateMachine(RivStateMachineConstruct):
def __init__(self, scope: Construct, id: builtins.str, riv_stack: IVpcRivStack, functions: RivUserPortalFunctionSet, state_machine_type:sf.StateMachineType) -> None:
super().__init__(scope, id, riv_stack, functions, state_machine_type=state_machine_type)
'''
Check if this is a valid image...
'''
detect = sft.LambdaInvoke(self,'Check-ImageQuality',
lambda_function=functions.detect_faces.function,
input_path='$.inputRequest',
result_path='$.detection',
output_path='$',
invocation_type= sft.LambdaInvocationType.REQUEST_RESPONSE)
'''
Check if the user already exists...
'''
search = sft.LambdaInvoke(self,'Search-ExistingFaces',
lambda_function=functions.search_faces_by_image.function,
input_path='$.inputRequest',
result_path='$.search',
output_path='$',
invocation_type= sft.LambdaInvocationType.REQUEST_RESPONSE)
detect.next(search)
'''
Index the user and complete the operation...
'''
index = sft.LambdaInvoke(self,'Index-FaceInfo',
lambda_function=functions.index_faces.function,
input_path='$.inputRequest',
output_path='$',
result_path='$.index',
invocation_type= sft.LambdaInvocationType.REQUEST_RESPONSE)
'''
Stitch everything together...
'''
user_exists = sf.Choice(self,'Check-SearchResults')
user_exists.when(
condition= sf.Condition.string_equals('$.search.Payload.TopMatch.Face.ExternalImageId',"Special:RIV_NO_FACE_MATCH"),
next=index)
user_exists.when(
condition= sf.Condition.boolean_equals('$.search.Payload.TopMatch.Face.IsCallerUser',True),
next=index)
user_exists.otherwise(
sf.Fail(self,'UserAlreadyExistsError',
error='UserAlreadyExists',
cause='Cannot register double faces in same collections.'))
search.next(user_exists)
# Format the message into API Gateway Model
index.next(sf.Pass(self,'Registration-Complete',
parameters={
'UserId.$': '$.inputRequest.UserId',
'ImageId.$': '$.index.Payload.FaceRecord.Face.ImageId',
'Status': 'Registered'
}))
self.set_state_machine(
state_machine_name='{}-UserPortal-Register_User'.format(self.riv_stack.riv_stack_name),
definition=detect)
|
from aiohttp import web
from aiohttp_jsonrpc import handler
class JSONRPCExample(handler.JSONRPCView):
def rpc_test(self):
return None
def rpc_args(self, *args):
return len(args)
def rpc_kwargs(self, **kwargs):
return len(kwargs)
def rpc_args_kwargs(self, *args, **kwargs):
return len(args) + len(kwargs)
def rpc_exception(self):
raise Exception("YEEEEEE!!!")
app = web.Application()
app.router.add_route('*', '/', JSONRPCExample)
if __name__ == "__main__":
import logging
logging.basicConfig(level=logging.INFO)
web.run_app(app, print=logging.info)
|
"""
Author: Daniele Linguagossa
Heap CTF binary fuzzing made easy
"""
from pwn import *
import random
import struct
import re
import os
class Vulnerability():
vulns = {
'1': 'HEAP WRITE OOB',
'2': 'HEAP READ OOB',
'3': 'FREE NON ALLOC',
'4': 'DOUBLE FREE',
'5': 'USE_AFTER_FREE',
'6': 'SEGMENTATION FAULT'
}
def __init__(self, data):
data = data.split("-")
self.kind = data[0]
self.addr = data[1]
self.orgsize = data[2]
self.newsize = data[3]
def __str__(self):
return "Found {} on {} size: {} new size: {}".format(self.vulns[self.kind], self.addr, self.orgsize,
self.newsize)
class SELF():
pass
class InputType():
STRING = 1
NUMBER = 2
FORMAT = 3
CHOICE = 4
class ProcessRestart():
pass
class Input():
format_re = re.compile('(%[a-z])')
string_charset = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
def __init__(self, kind=None, choice=None, min=1, max=10, send_after=">", format=None, newline=True, end="\n",
after=None, map_choice=None):
self.type = kind
self.choice = choice
self.send_after = send_after
self.format = format
self.newline = newline
self.end = end
self.after = after
self.map_choice = map_choice
self.max = max
self.min = min
def _send(self, data, newline, callback):
callback(data, newline)
try:
if newline:
self.process.sendline(data)
else:
self.process.send_raw(data)
except:
pass
def _read_until(self):
try:
self.process.readuntil(self.send_after)
except:
pass
def _apply_post_hook(self, data):
if not self.newline:
data += self.end
return data
def _random_string(self, post_hook=True):
s = ""
l = random.randint(self.min, self.max)
for i in range(0, l):
s += self.string_charset[random.randint(0, len(self.string_charset) - 1)]
if post_hook:
return self._apply_post_hook(s)
else:
return s
def _random_int(self, post_hook=True):
n = random.randint(self.min, int(self.max))
if post_hook:
return self._apply_post_hook(str(n))
else:
return str(n)
def _random_format(self):
matches = self.format_re.findall(self.format)
data = self.format
for match in matches:
if match == "%s":
data = data.replace(match, self._random_string(post_hook=False), 1)
else:
data = str(data).replace(match, self._random_int(post_hook=False), 1)
return self._apply_post_hook(data)
def add_map_choice(self, map_choice):
self.map_choice = map_choice
def add_after(self, after):
self.after = after
def run(self, process, callback):
poll = process.poll()
if poll != None:
process.close()
return ProcessRestart()
self.process = process
self._read_until()
if self.type == InputType.STRING:
self._send(self._random_string(), self.newline, callback)
return self.after
elif self.type == InputType.CHOICE:
if self.choice is not None:
idx = random.randint(0, len(self.choice) - 1)
self._send(self.choice[idx], self.newline, callback)
if isinstance(self.map_choice[idx], SELF):
return self
else:
return self.map_choice[idx]
elif self.type == InputType.NUMBER:
self._send(self._random_int(), self.newline, callback)
return self.after
elif self.type == InputType.FORMAT:
self._send(self._random_format(), self.newline, callback)
return self.after
class HeapFuzz():
def __init__(self, bin, pipe="/tmp/heapfuzz", preload_lib="./heapfuzz.so"):
self.preload_lib = preload_lib
self._configure()
self.process = process(bin)
self.pipe_name = pipe
self.bin = bin
self._open_pipe()
self.vulnerabilities = {}
self.trigger = []
def _configure(self):
with open('/proc/sys/kernel/randomize_va_space', 'r') as aslr:
enabled = int(aslr.read())
if enabled:
log.warn("Please disable ASLR with 'echo 0 | sudo tee /proc/sys/kernel/randomize_va_space'!")
sys.exit(0)
aslr.close()
context.log_level = "warn"
os.environ["LD_PRELOAD"] = self.preload_lib
os.environ["USE_HEAPFUZZ"] = "1"
def _open_pipe(self):
self.pipe_fd = os.open(self.pipe_name, os.O_RDONLY | os.O_NONBLOCK)
def _restart(self):
try:
self.process.close()
except: pass
self.process = process(self.bin)
os.close(self.pipe_fd)
self._open_pipe()
self.trigger = []
def _read_from_pipe(self):
try:
l = os.read(self.pipe_fd, 4)
length = struct.unpack("<I", l)[0]
data = os.read(self.pipe_fd, length)
return data
except:
pass
def _parse_vulnerability(self, data):
if data:
l = str(Vulnerability(data))
try:
self.vulnerabilities[hash(l)]
except KeyError:
self.vulnerabilities.update({hash(l): self.trigger})
log.warn(Vulnerability(data))
log.warn("Triggered with:\n"+"\t"+"\n\t".join(self.trigger)+"\n")
def _send_callback(self, data, newline):
self.trigger.append("Sending '{}' {}".format(data, "with newline" if newline else "without newline"))
def start(self, init):
ret = init.run(self.process, self._send_callback)
while True:
try:
if ret is None:
break
elif isinstance(ret, ProcessRestart):
self._restart()
ret = init.run(self.process, self._send_callback)
else:
self._parse_vulnerability(self._read_from_pipe())
ret = ret.run(self.process, self._send_callback)
except KeyboardInterrupt:
self.process.close()
exit(0)
|
import matplotlib.pyplot as plt
import matplotlib.cbook as cbook
import os
import sys
import numpy as np
from matplotlib import pyplot as plt
import cv2
currentfile = '/'.join(__file__.split('/')[:-1])
img_path = os.path.join(currentfile,'images')
def put_chess(l_img,s_img,cord):
l_img.flags.writeable = True
x_offset, y_offset = cord
y1, y2 = y_offset, y_offset + s_img.shape[0]
x1, x2 = x_offset, x_offset + s_img.shape[1]
alpha_s = s_img[:, :, 3] / 255.0
alpha_l = 1.0 - alpha_s
for c in range(0, 3):
l_img[y1:y2, x1:x2, c] = (alpha_s * s_img[:, :, c] +
alpha_l * l_img[y1:y2, x1:x2, c])
def piece2img(piece):
if piece in 'RNBAKRPC'.lower():
return "B{}.GIF".format(piece.upper())
else:
return "R{}.GIF".format(piece.upper())
imgdic = {}
SEL = plt.imread(os.path.join(img_path,'OOS.GIF'))
for i in 'RNBAKRPC':
picname = piece2img(i)
picurl = os.path.join(img_path,picname)
imgdic[i] = plt.imread(picurl)
for i in 'RNBAKRPC'.lower():
picname = piece2img(i)
picurl = os.path.join(img_path,picname)
imgdic[i] = plt.imread(picurl)
def get_board_img(board,action=None):
board_im = plt.imread(os.path.join(img_path,'WHITE.GIF'))
def string_reverse(string):
# return ''.join(string[len(string) - i] for i in range(1, len(string)+1))
return ''.join(string[i] for i in range(len(string) - 1, -1, -1))
x_trans = {'a': 0, 'b': 1, 'c': 2, 'd': 3, 'e': 4, 'f': 5, 'g': 6, 'h': 7, 'i': 8}
if(action != None):
src = action[0:2]
to = action[2:]
src_x = int(x_trans[src[0]])
src_y = int(src[1])
to_x = int(x_trans[to[0]])
to_y = int(to[1])
# board = string_reverse(board)
board = board.replace("1", " ")
board = board.replace("2", " ")
board = board.replace("3", " ")
board = board.replace("4", " ")
board = board.replace("5", " ")
board = board.replace("6", " ")
board = board.replace("7", " ")
board = board.replace("8", " ")
board = board.replace("9", " ")
board = board.split('/')
# board = board.replace("/", "\n")
for i in range(9):
cv2.putText(board_im,'abcdefghi'[i],(20 + 40 * i,17),cv2.FONT_HERSHEY_COMPLEX,0.7,(0,0,0,255),1)
cv2.putText(board_im,'abcdefghi'[i],(20 + 40 * i,410),cv2.FONT_HERSHEY_COMPLEX,0.7,(0,0,0,255),1)
for i in range(10):
cv2.putText(board_im,'9876543210'[i],(5,33 + 40 * i),cv2.FONT_HERSHEY_COMPLEX,0.7,(0,0,0,255),1)
cv2.putText(board_im,'9876543210'[i],(355,33 + 40 * i),cv2.FONT_HERSHEY_COMPLEX,0.7,(0,0,0,255),1)
for i in range(9):
#put_chess(board,king,(10 + 40 * i,10))
for j in range(10):
piece = board[9 - j][i]
if piece.strip():
put_chess(board_im,imgdic[piece],(8 + 40 * i,10 + 40 * j))
if(action != None):
put_chess(board_im,SEL,(8 + 40 * src_x,10 + 40 * (9 - src_y)))
put_chess(board_im,SEL,(8 + 40 * to_x,10 + 40 * (9 - to_y)))
return board_im
#print(" abcdefghi")
#for i,line in enumerate(board):
# if (action != None):
# if(i == src_y):
# s = list(line)
# s[src_x] = 'x'
# line = ''.join(s)
# print(i,line) |
#!/usr/bin/env python
import curses
import time
import rospy
import sys
import signal
import numpy as np
import math
import datetime
from tr2py.tr2 import TR2
tr2 = TR2()
tr2.setMode(tr2.mode_rotate)
tr2.release()
jointSelected = 0;
joints = ["b0", "a0", "a1", "a2", "a3", "a4", "g0", "h0", "h1"]
modeSelected = 0
modes = ["ROTATE","BACKDRIVE","SERVO"]
stop = "FALSE"
def program():
global jointSelected, joints, modeSelected, modes, stop, tr2
rospy.init_node('tr2_keyboard_teleop', anonymous=True)
screen = curses.initscr()
curses.noecho()
curses.cbreak()
screen.keypad(True)
screen.nodelay(1)
cmd = 0
prevMode = modeSelected
try:
while True:
char = screen.getch()
if char == ord(' '): # emergency stop
if stop == "FALSE":
stop = "TRUE"
tr2.stop()
else:
stop = "FALSE"
tr2.release()
elif char == ord('q'): break
elif char == ord('d'): modeSelected = modeSelected + 1
elif char == ord('a'): modeSelected = modeSelected - 1
elif char == curses.KEY_UP: jointSelected = jointSelected + 1
elif char == curses.KEY_DOWN: jointSelected = jointSelected - 1
elif char == curses.KEY_LEFT: cmd = -1
elif char == curses.KEY_RIGHT: cmd = 1
else:
cmd = 0
aid = joints[jointSelected]
if modeSelected != prevMode:
if modeSelected == 0:
getattr(tr2, aid).setMode(tr2.mode_rotate)
elif modeSelected == 1:
getattr(tr2, aid).setMode(tr2.mode_backdrive)
elif modeSelected == 2:
getattr(tr2, aid).setMode(tr2.mode_servo)
if modeSelected == 0:
if (cmd != 0):
getattr(tr2, aid).actuate(cmd, 100)
elif modeSelected == 2:
getattr(tr2, aid).setPosition(cmd, 100)
tr2.step()
screen.clrtobot()
screen.addstr(0,0,"Welcome. Use arrow keys to control. Press 'Q' to exit, SPACE to emergency stop the robot.")
screen.addstr(1,0,"ACTUATOR: " + aid)
screen.addstr(2,0,"MODE: " + modes[modeSelected])
screen.addstr(3,0,"CMD: " + str(cmd) + " ")
screen.addstr(4,0,"STOP: " + stop)
screen.addstr(5,0,"INPUT: " + str(char))
screen.refresh()
curses.flushinp()
time.sleep(0.05)
finally:
screen.addstr(0,0,"exit")
curses.nocbreak()
screen.keypad(0)
curses.echo()
curses.endwin()
if __name__ == '__main__':
program()
|
"""
PEP 287: reStructuredText Docstring Format
https://www.python.org/dev/peps/pep-0287/
"""
from __future__ import absolute_import, unicode_literals
import os
import re
import astroid
from pylint import checkers
from pylint import interfaces
from pylint.checkers import utils
class PEP287Checker(checkers.BaseChecker):
"""
Enforce PEP287 reStructuredText docstring format.
"""
__implements__ = interfaces.IAstroidChecker
param_keywords = ["return", "returns", "param", "raises"]
name = "PEP287"
msgs = {
"E8010": (
"'%s' has no return mentioned", "PEP287-no-return",
"Please document return statement"),
"E8011": (
"'%s' has undocumented return statement", "PEP287-no-doc-return",
"Please specify what return statement is going to return back"),
"E8012": (
"Variable arguments are not described in the docstring of '%s'", "PEP287-no-varargs",
"Please write documentation of the arguments"),
"E8013": (
"'%s' has undocumented varargs", "PEP287-no-doc-varargs",
"Please write documentation for the varargs"),
"E8014": (
"Keyword arguments are not mentioned and not described in the docstring of '%s'", "PEP287-no-kwargs",
"Please describe kwargs what they are for"),
"E8015": (
"Parameter '%s' is missing explanation in %s", "PEP287-undocumented-param",
"Please add a short explanation about this parameter: what it does and/or what accepts"),
"E8016": (
"Parameter '%s' is not mentioned in the docstring of %s at all", "PEP287-doc-missing-param",
"Please document this parameter"),
"E8017": (
"Parameter '%s' is mentioned in the docstring, but is not in the function signature ('%s')",
"PEP287-excessive-param", "Please document this parameter"),
"E8018": (
"Docstring of '%s' does not contain main explanation.",
"PEP287-main-explanation-missing", "Please document this function what it does"),
"E8019": (
"One line expected between main explanation and parameters block in '%s'",
"PEP287-line-after-main-explanation",
"Before :param or :return one line is needed after the main explanation"),
"E8020": (
"Parameters block in '%s' is not the last one",
"PEP287-params-block-last",
"Parameters block should be the last one"),
"E8021": (
"Docstring in '%s' contains tabs instead of four spaces.",
"PEP287-tabs",
"Please do not use tabs, but four spaces instead."),
"E8022": (
"Code raises %s but the docstring doesn't mention that.",
"PEP287-raises-missing",
"Add to the docstring the info about what exceptions are being raised."),
"E8023": (
"Code does not raises %s as docstring describes.",
"PEP287-superfluous-raises",
"Please remove from the docstring superfluous data."),
"E8024": (
"Docstring is missing explanation why %s is raised.",
"PEP287-doc-why-raised-missing",
"Please explain why this explanation is raised."),
"E8025": (
"The syntax is ':raises %s:', i.e. it should end with the semi-colon, when describing the exception.",
"PEP287-doc-raised-wrong-syntax",
"Please add a semi-colon."),
"E8026": (
"Got E8019 as well? Just use ':raises' instead of '%s' in function '%s'.",
"PEP287-doc-raises-instead-raise",
"Although 'raise' is valid keyword, still please use 'raises'."),
}
def _cleanup_spaces(self, data):
"""
Remove double- or more spaces into one.
:param data:
:return:
"""
return re.sub(r"\s+", " ", data)
def _parse_param(self, line):
"""
Parse one param.
:param line:
:return:
"""
tokens = self._cleanup_spaces(line).split(" ", 2)
_, arg, doc = tokens + ["" for _ in range(3 - len(tokens))]
return arg.strip(":"), doc
def _parse_return(self, line):
"""
Parse return.
:param line:
:return:
"""
ret = self._cleanup_spaces(line).split(" ", 1)
return (ret + ['' for _ in range(2 - len(ret))])[-1]
def _check_raises_described(self, node, raised):
"""
Check if 'raises' is properly documented.
:param doc:
:return:
"""
for line in node.doc.strip().split(os.linesep):
line = line.strip()
if line.startswith(":raises "):
exc_name = line.split(" ", 1)[-1].split(" ", 1)
if len(exc_name) == 1:
self.add_message("PEP287-doc-why-raised-missing", node=node,
args=('"{}"'.format(exc_name[0].replace(":", "")),))
elif not exc_name[0].endswith(":"):
self.add_message("PEP287-doc-raised-wrong-syntax", node=node,
args=('"{}"'.format(exc_name[0]),))
def _get_doc_params(self, doc):
"""
Get documentation parameters.
:param doc:
:return:
"""
params = {}
for line in doc.split(os.linesep):
line = line.strip()
if not line:
continue
if line.startswith(":param "):
arg, doc = self._parse_param(line)
params[arg] = doc
if line.startswith(":return"):
params["return"] = self._parse_return(line)
return params
def _get_ident_len(self, line):
"""
Get ident length of the line.
:param line:
:return: int
"""
return len([True for elm in line.split(" ") if not bool(elm)])
def _check_tabs(self, node):
"""
There shall be no tabs. Ever.
:param node: function node
:return: None
"""
if len(node.doc.split("\t")) > 1:
self.add_message("PEP287-tabs", node=node, args=(node.name, ))
def _check_explanation_block(self, node):
"""
Docstring should contain explanation block.
:param node: function node
:return: None
"""
docmap = []
kw_ident = -1
for idx, line in enumerate(node.doc.rstrip().split(os.linesep)):
if not idx:
continue # Skip newline after triple-quotes
s_line = line.strip()
if not s_line:
docmap.append("-")
elif s_line.startswith(":") and s_line.split(" ", 1)[0].strip(":") in self.param_keywords:
docmap.append(":")
kw_ident = max(self._get_ident_len(line), kw_ident)
else:
# a = self._get_ident_len(line)
docmap.append(":" if kw_ident > -1 and self._get_ident_len(line) > kw_ident else "#")
docmap = ''.join(docmap)
if "#:" in docmap or "--:" in docmap:
self.add_message("PEP287-line-after-main-explanation", node=node, args=(node.name,))
if "#" not in docmap:
self.add_message("PEP287-main-explanation-missing", node=node, args=(node.name,))
if not (docmap.strip(":") + ":").endswith("-:"):
self.add_message("PEP287-params-block-last", node=node, args=(node.name,))
def _compare_signature(self, node, d_pars, n_args):
"""
Find out what is missing.
:param d_pars: Documentation parameters.
:param n_args: Node arguments.
:return:
"""
signature_names = []
# Varargs
if n_args.vararg:
signature_names.append(n_args.vararg)
if n_args.vararg and n_args.vararg not in d_pars:
self.add_message("PEP287-no-varargs", node=node, args=(node.name,))
# kwarg
if n_args.kwarg:
signature_names.append(n_args.kwarg)
if n_args.kwarg and n_args.kwarg not in d_pars:
self.add_message("PEP287-no-kwargs", node=node, args=(node.name,))
# other arguments
for idx, arg in enumerate(n_args.args):
signature_names.append(arg.name)
if idx == 0 and arg.name in ["cls", "self"] or arg.name.startswith("_"):
continue
if arg.name not in d_pars:
self.add_message("PEP287-doc-missing-param", node=node, args=(arg.name, node.name))
else:
if not d_pars[arg.name]:
self.add_message("PEP287-undocumented-param", node=node, args=(arg.name, node.name,))
for arg in d_pars:
if arg not in signature_names and arg not in ["return"]:
self.add_message("PEP287-excessive-param", node=node, args=(arg, node.name))
# returns
if "return" not in d_pars:
self.add_message("PEP287-no-return", node=node, args=(node.name,))
elif not d_pars["return"]:
self.add_message("PEP287-no-doc-return", node=node, args=(node.name,))
def what_raises(self, node, raises=None):
"""
Return number of raises statements in the code.
:param node: function node
:param raises: Reserved for the internal data transfer
:return: List of explicitly raised exception class names
"""
if raises is None:
raises = []
for element in node.get_children():
if isinstance(element, astroid.node_classes.Raise):
if isinstance(element.exc, astroid.node_classes.Name):
raises.append('-') # skipper
elif element.exc is None and element.cause is None:
raises.append("-")
elif hasattr(element.exc.func, "name"):
raises.append(element.exc.func.name)
elif hasattr(element.exc.func, "attrname"):
raises.append(element.exc.func.attrname)
else:
raises.append("undetected exception")
else:
raises = self.what_raises(element, raises=raises)
return raises
def _check_raises(self, node):
"""
Find out if a function raises something but
is not documents that or vice versa.
:param node: function node
:return: None
"""
exceptions = list(set(self.what_raises(node)))
documented = 0
self._check_raises_described(node, raised=exceptions)
for line in node.doc.strip().split(os.linesep):
line = line.strip()
if line.startswith(":rais"):
keyword = line.split(" ", 1)[0]
if keyword == ":raise": # This is actually an error
self.add_message("PEP287-doc-raises-instead-raise", node=node, args=(keyword, node.name,))
elif keyword != ":raises":
self.add_message("PEP287-doc-raises-instead-raise", node=node, args=(keyword, node.name,))
exc_name = line.replace(":raises ", ":raise ").split(" ", 1)[-1].replace(":", "").split(" ")[0]
if exc_name not in exceptions and '-' not in exceptions:
self.add_message("PEP287-superfluous-raises", node=node, args=(exc_name,))
else:
documented += 1
if exc_name in exceptions:
exceptions.pop(exceptions.index(exc_name))
for exc_name in exceptions:
if exc_name.startswith("current exception") and documented:
continue
elif exc_name == "-":
continue
if not exc_name.startswith("current"):
exc_name = '"{}"'.format(exc_name)
self.add_message("PEP287-raises-missing", node=node, args=(exc_name,))
# Here we check if there are only skippers, i.e. something is re-raised but never documented:
if len([skp for skp in exceptions if skp == '-']) > documented:
self.add_message("PEP287-raises-missing", node=node,
args=("an exception in the function '{}'".format(node.name),))
@utils.check_messages('docstring-triple-quotes')
def visit_functiondef(self, node):
"""
Check if docstring always starts and ends from/by triple double-quotes
and they are on the new line.
"""
if not node.name.startswith("__") and node.doc is not None:
self._check_raises(node)
if not node.name.startswith("_") and node.doc:
self._check_tabs(node)
self._check_explanation_block(node)
self._compare_signature(node, self._get_doc_params(node.doc), node.args)
def register(linter):
"""
Required method to auto register this checker
"""
linter.register_checker(PEP287Checker(linter))
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
_____________________________________________________________________________
This file contains code for utilities
_____________________________________________________________________________
"""
from pathlib import Path
import logging
from collections import OrderedDict
from config import _C
def get_cfg_defaults():
"""Get a yacs CfgNode object with default values for my_project."""
# Return a clone so that the defaults will not be altered
# This is for the "local variable" use pattern
return _C.clone()
def initial_logger():
logger = logging.getLogger(__name__)
if logger.hasHandlers():
logger.handlers.clear()
logger.setLevel(logging.INFO)
# create console handler with a higher log level
ch = logging.StreamHandler()
ch.setFormatter(CustomFormatter())
logger.addHandler(ch)
return logger
class CustomFormatter(logging.Formatter):
"""Logging Formatter to add colors and count warning / errors"""
grey = "\x1b[38;21m"
yellow = "\x1b[33;21m"
red = "\x1b[31;21m"
bold_red = "\x1b[31;1m"
reset = "\x1b[0m"
# format = "%(asctime)s - %(name)s - %(levelname)s - %(message)s (%(filename)s:%(lineno)d)"
# format = "%(asctime)s - %(levelname)s - %(message)s (%(filename)s:%(lineno)d)"
format = "%(levelname)s - %(message)s (%(filename)s:%(lineno)d)"
FORMATS = {
logging.DEBUG: grey + format + reset,
logging.INFO: grey + format + reset,
logging.WARNING: yellow + format + reset,
logging.ERROR: red + format + reset,
logging.CRITICAL: bold_red + format + reset
}
def format(self, record):
log_fmt = self.FORMATS.get(record.levelno)
formatter = logging.Formatter(log_fmt)
return formatter.format(record)
def experiment_loader(model_format='pth', data_path='./weight'):
data_path = Path(data_path)
if not data_path.exists():
raise Exception("No experiment folder for", data_path)
if model_format=='pth':
saved_model = sorted(data_path.glob('*.pth'))
elif model_format=='onnx':
saved_model = sorted(data_path.glob('*.onnx'))
elif model_format=='pt':
saved_model = sorted(data_path.glob('*.pt'))
elif model_format=='trt':
saved_model = sorted(data_path.glob('*.engine'))
saved_config = sorted(data_path.glob('*.yaml'))
if len(saved_model) < 1:
raise Exception("No model format ", model_format, type, "in", data_path)
if len(saved_config) < 1:
raise Exception("No config for model format ", model_format, type, "in", data_path)
return str(saved_model[0]), str(saved_config[0])
def copyStateDict(state_dict):
if list(state_dict.keys())[0].startswith("module"):
start_idx = 1
else:
start_idx = 0
new_state_dict = OrderedDict()
for k, v in state_dict.items():
name = ".".join(k.split(".")[start_idx:])
new_state_dict[name] = v
return new_state_dict |
from setuptools import setup, find_packages
setup(
name='zeit.magazin',
version='1.5.3.dev0',
author='gocept, Zeit Online',
author_email='[email protected]',
url='http://www.zeit.de/',
description="vivi ZMO Content-Type extensions",
packages=find_packages('src'),
package_dir={'': 'src'},
include_package_data=True,
zip_safe=False,
license='BSD',
namespace_packages=['zeit'],
install_requires=[
'gocept.httpserverlayer',
'gocept.selenium',
'gocept.testing>=1.4.0.dev0',
'grokcore.component',
'plone.testing',
'setuptools',
'zc.form',
'zeit.cms>=2.90.0.dev0',
'zeit.content.article>=3.25.0.dev0',
'zeit.content.gallery',
'zeit.content.link',
'zeit.content.portraitbox',
'zeit.edit',
'zeit.push>=1.12.0.dev0',
'zope.interface',
'zope.component',
],
)
|
import app
import logging
import os
from spacy.language import Language
from spacy.tokens import Doc
# import gpt_2_simple as gpt2
log = logging.getLogger(__name__)
from app.models import PIPELINE_STAGES as STAGE
class StoryGenerator(object):
"""
generates some human-readable "story" from our NLP analysis, preserving meaming and key points.
work in progress: may use something like GPT-2, paraphrasing or manual "templating"
"""
name = STAGE.STORY_GENERATOR
nlp: Language = None
# Which model to use. 124M = 500 MB !
# gpt2_model_name: str = "124M"
def __init__(self, nlp):
self.nlp = nlp
""" if not os.path.isdir(os.path.join("models", self.gpt2_model_name)):
log.info(f"Downloading {self.gpt2_model_name} model...")
gpt2.download_gpt2(model_name=self.gpt2_model_name)
# model is saved into current directory under /models/124M/ """
def __call__(self, doc):
if not doc.has_extension(STAGE.STORY_GENERATOR):
doc.set_extension(STAGE.STORY_GENERATOR, getter=self._generate_story)
return doc
def _generate_story(self, doc):
single_text = (
"Generate a story talking about key concepts, results, summary and ..."
)
""" sess = gpt2.start_tf_sess()
gpt2.load_gpt2(sess, model_name=self.gpt2_model_name)
# gpt2.finetune(sess,
# file_name,
# model_name=self.gpt2_model_name,
# steps=1000) # steps is max number of training steps
single_text = gpt2.generate(
sess, model_name=self.gpt2_model_name, return_as_list=True
)[0]
"""
return single_text
|
import gym
import pybullet_envs
import numpy as np
from ppo.agent import Agent
if __name__ == '__main__':
env = gym.make('AntBulletEnv-v0')
learn_interval = 100
batch_size = 5000
n_epochs = 1000
learning_rate = 0.0003
observation_space = env.observation_space.shape[0]
action_space = env.action_space.shape[0]
agent = Agent(n_actions=action_space, batch_size=batch_size,
learning_rate=learning_rate, n_epochs=n_epochs, input_dims=observation_space)
n_games = 300
best_score = env.reward_range[0]
score_history = []
learn_iters = 0
avg_score = 0
n_steps = 0
for i in range(n_games):
observation = env.reset()
done = False
score = 0
while not done:
action, prob, val = agent.choose_action(observation)
observation_, reward, done, info = env.step(action)
n_steps += 1
score += reward
agent.push(observation, action, prob, val, reward, done)
if n_steps % learn_interval == 0:
agent.learn()
observation = observation_
score_history.append(score)
avg_score = np.mean(score_history[-100:])
if avg_score > best_score:
best_score = avg_score
print(f'Episode: {i} / Score: {score} / AVG Score (100): {avg_score}') |
from __future__ import unicode_literals
from frappe import _
def get_data():
return [
{
"label": _("Property Management"),
"items": [
{
"type": "doctype",
"description": "Property",
"name": "Property",
},
{
"type": "doctype",
"description": "Property Unit",
"name": "Property Unit",
},
{
"type": "doctype",
"description": "Property Type",
"name": "Property Type",
},
]
}
]
|
import os
import re
from app import db, config, socketio, app
from app.library.formatters import formatted_file_data
from app.models.file import File
from app.models.package import Package
from app.modules.mod_process.file_repository import FileRepository
from app.modules.mod_process.status_map import StatusMap
class ProcessRepository:
# this dict holds all the currently active processes as id-instance pairs
# example: {1: <...>, 2: <...>, ...}
processes = {}
# this controls whether or not the encoding processing is active
# notice: do not modify this directly, but use set_encoding_active()
encoding_active = False
@staticmethod
def set_encoding_active(new_state):
"""
change the state of whether encoding should be active or not to a new state
:param new_state: should the encoding be active now
"""
ProcessRepository.encoding_active = new_state
# notify client
socketio.emit("active_changed", {"active": new_state})
# check if it's necessary to start new processes
ProcessRepository.check_and_start_processes()
@staticmethod
def cancel_all_processes():
"""
cancel all currently running Processes
"""
# iterate over a copy of processes because cancel_process modifies the dictionary
# while we are iterating over it
for file_id in ProcessRepository.processes.copy():
ProcessRepository.cancel_process(file_id)
@staticmethod
def is_running(file_id):
return file_id in ProcessRepository.processes
@staticmethod
def cancel_process(file_id):
"""
cancel a specific Process
:param file_id: the id of the file corresponding to the Process
"""
# stop thread
ProcessRepository.processes[file_id].stop()
# update status
file = File.query.filter_by(id=file_id).first()
file.status = StatusMap.failed.value
file.clear()
db.session.commit()
# emit file_done event
socketio.emit("file_done", {"data": formatted_file_data(file)})
# remove from processes dict
ProcessRepository.processes.pop(file_id)
@staticmethod
def check_and_start_processes():
"""
check if it's required to start new Processes and do so if needed
"""
while ProcessRepository.encoding_active:
# grab next potential file to process
file = FileRepository.get_queued_query().order_by(Package.position.asc(), File.position.asc()).first()
if file is None or ProcessRepository.count_processes_active() >= config["general"].getint(
"parallel_processes"):
break
# update file.status in DB
file.status = StatusMap.processing.value
db.session.commit()
# start the Process
from app.modules.mod_process.process import Process
process = Process(file)
process.daemon = True
# todo debug
# file.status = 0
# db.session.commit()
# ProcessRepository.encoding_active = False
# add to "processes" dict
ProcessRepository.processes[file.id] = process
process.start()
# emit file_started event
data = formatted_file_data(file)
data["count_active"] = ProcessRepository.count_processes_active()
data["count_queued"] = ProcessRepository.count_processes_queued()
socketio.emit("file_started", {"data": data})
@staticmethod
def count_processes_active():
"""
:return: the amount of processes currently active
"""
return len(ProcessRepository.processes)
@staticmethod
def count_processes_queued():
"""
:return: the amount of Files currently queued
"""
return FileRepository.get_queued_query().count()
@staticmethod
def count_processes_total():
"""
:return: count of all Files that are in packages that are queued
"""
# return ProcessRepository.count_processes_active() + ProcessRepository.count_processes_queued()
return Package.query.filter_by(queue=True).join(File).count()
# TODO
@staticmethod
def file_done(file):
"""
will be called whenever a Process is finished
:param file: the File object of the File that is done
"""
# delete from "processes"
ProcessRepository.processes.pop(file.id)
# remove original file from disk if desired
if config.getboolean("encoding", "delete_old_file"):
os.remove(file.filename)
# rename file if desired
if config.getboolean("encoding", "rename_enabled"):
rename_search = config.get("encoding", "rename_search")
rename_replace = config.get("encoding", "rename_replace")
# get pathinfo
pathinfo = os.path.split(file.filename)
path = pathinfo[0]
old_filename = pathinfo[1]
# only rename if match occurs
if re.match(rename_search, old_filename):
new_filename = re.sub(rename_search, rename_replace, old_filename)
# rename output_filename (created by ffmpeg, see process.py) to new_filename
os.rename(path + os.sep + file.output_filename, path + os.sep + new_filename)
# update status to "finished"
db.session.query(File).filter_by(id=file.id).update(dict(status=StatusMap.finished.value))
db.session.commit()
# check if it's necessary to start new processes
ProcessRepository.check_and_start_processes()
# notify client
socketio.emit("file_done", {
"data": {
"id": file.id,
"count_active": ProcessRepository.count_processes_active(),
"count_queued": ProcessRepository.count_processes_queued(),
"count_total": ProcessRepository.count_processes_total(),
}
})
app.logger.debug("Done with encoding of %s" % file.filename)
@staticmethod
def file_failed(file):
"""
will be called whenever a File fails
:param file: the File object of the File that has failed
"""
# delete from "processes"
ProcessRepository.processes.pop(file.id)
# update status and set attributes to zero
file = db.session.query(File).filter_by(id=file.id).first()
file.status = StatusMap.failed.value
file.clear()
db.session.commit()
# check if it's necessary to start new processes
ProcessRepository.check_and_start_processes()
# notify client
socketio.emit("file_done", {
"data": {
"id": file.id,
"count_active": ProcessRepository.count_processes_active(),
"count_queued": ProcessRepository.count_processes_queued(),
"count_total": ProcessRepository.count_processes_total(),
}
})
@staticmethod
def file_progress(file):
"""
will be called whenever a file makes progress
:param file: the File object of the File that has made progress
"""
# format data
info = formatted_file_data(file)
socketio.emit("file_progress", {"data": info})
|
# coding: utf-8
# Fabo #902 Kerberos基盤を用いたLidarLiteV3の自動アドレス変更
import FaBoGPIO_PCAL6408
import time
import LidarLiteV3
class Kerberos():
def __init__(self):
pcal6408 = FaBoGPIO_PCAL6408.PCAL6408()
########################################
# Lidar1のアドレスを変更する 0x62 -> 0x52
########################################
pcal6408.setDigital(1<<0, 1) # 0番目のLidarの電源を入れる
time.sleep(0.1)
lidar1 = LidarLiteV3.Connect(0x62)
lidar1.changeAddress(0x52)
########################################
# Lidar2のアドレスを変更する 0x62 -> 0x54
########################################
pcal6408.setDigital(1<<1, 1) # 1番目のLidarの電源を入れる
time.sleep(0.1)
lidar2 = LidarLiteV3.Connect(0x62)
lidar2.changeAddress(0x54)
########################################
# Lidar3のアドレスを変更する 0x62 -> 0x56
########################################
pcal6408.setDigital(1<<2, 1) # 3番目のLidarの電源を入れる
time.sleep(0.1)
lidar3 = LidarLiteV3.Connect(0x62)
lidar3.changeAddress(0x56)
self.pcal6408 = pcal6408
self.lidar1 = lidar1
self.lidar2 = lidar2
self.lidar3 = lidar3
return
def __del__(self):
self.pcal6408.setAllClear() # すべてのLidarの電源を落とす
def get_distance(self):
########################################10
# 全てのLidarの値を取得する
########################################
distance1 = self.lidar1.getDistance()
distance2 = self.lidar2.getDistance()
distance3 = self.lidar3.getDistance()
return distance1,distance2,distance3
|
"""p2 users view"""
from django.contrib import messages
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.auth.mixins import \
PermissionRequiredMixin as DjangoPermissionListMixin
from django.contrib.auth.models import User
from django.contrib.messages.views import SuccessMessageMixin
from django.shortcuts import reverse
from django.utils.translation import gettext as _
from django.views.generic import DeleteView, ListView, UpdateView
from guardian.mixins import PermissionListMixin, PermissionRequiredMixin
from guardian.shortcuts import get_anonymous_user
from p2.iam.forms import UserForm
from p2.lib.views import CreateAssignPermView
class UserListView(PermissionListMixin, LoginRequiredMixin, ListView):
"""List all Users the user has access to"""
model = User
permission_required = 'auth.view_user'
ordering = 'username'
paginate_by = 10
def get_queryset(self, *args, **kwargs):
return super().get_queryset(*args, **kwargs).exclude(pk=get_anonymous_user().pk)
class UserCreateView(SuccessMessageMixin, DjangoPermissionListMixin, CreateAssignPermView):
"""Create new User"""
model = User
form_class = UserForm
permission_required = 'auth.add_user'
template_name = 'generic/form.html'
success_message = _('Successfully created User')
permissions = [
'auth.view_user',
'auth.change_user',
'auth.delete_user',
]
def form_valid(self, form):
response = super().form_valid(form)
self.object.set_unusable_password()
self.object.save()
return response
def get_success_url(self):
return reverse('p2_ui:iam-users-list')
class UserUpdateView(SuccessMessageMixin, PermissionRequiredMixin, UpdateView):
"""Update existing User"""
model = User
form_class = UserForm
permission_required = 'auth.change_user'
template_name = 'generic/form.html'
success_message = _('Successfully updated User')
def get_success_url(self):
return reverse('p2_ui:iam-users-list')
class UserDeleteView(SuccessMessageMixin, PermissionRequiredMixin, DeleteView):
"""Delete User"""
model = User
permission_required = 'auth.delete_user'
template_name = 'generic/delete.html'
success_message = _('Successfully deleted User')
def get_success_url(self):
return reverse('p2_ui:iam-users-list')
def delete(self, request, *args, **kwargs):
obj = self.get_object()
messages.success(self.request, self.success_message % obj.__dict__)
return super().delete(request, *args, **kwargs)
|
from dbt.tests.util import AnyString, AnyFloat
def snowflake_stats():
return {
'has_stats': {
'id': 'has_stats',
'label': 'Has Stats?',
'value': True,
'description': 'Indicates whether there are statistics for this table',
'include': False,
},
'bytes': {
'id': 'bytes',
'label': 'Approximate Size',
'value': AnyFloat(),
'description': 'Approximate size of the table as reported by Snowflake',
'include': True,
},
'last_modified': {
'id': 'last_modified',
'label': 'Last Modified',
'value': AnyString(),
'description': 'The timestamp for last update/change',
'include': True,
},
'row_count': {
'id': 'row_count',
'label': 'Row Count',
'value': 1.0,
'description': 'An approximate count of rows in this table',
'include': True,
}
}
|
from recipe_compiler.recipe import Recipe
from recipe_compiler.recipe_category import RecipeCategory
def test_recipe_slug():
# Given
name = "Thomas Eckert"
residence = "Seattle, WA"
category = RecipeCategory("dessert")
recipe_name = '"Pie" Shell Script'
quote = "Hello, World"
ingredients = [""]
instructions = [""]
expected = "pie-shell-script"
# When
recipe = Recipe(
name, residence, category, recipe_name, quote, ingredients, instructions
)
# Then
assert expected == recipe.slug
|
# SPDX-License-Identifier: Apache-2.0
# Licensed to the Ed-Fi Alliance under one or more agreements.
# The Ed-Fi Alliance licenses this file to you under the Apache License, Version 2.0.
# See the LICENSE and NOTICES files in the project root for more information.
from explorations_py import __version__
def test_version():
assert __version__ == '0.1.0'
|
answer = ""
while answer != "n":
answer = input("Would you like to continue? [Y/n] ")
print("do this stuff")
if answer == "african swallow":
print("You found the secret password!")
break
if answer == european":
print("good point")
continue
print("Prompting in the while again")
print("Exited the while loop")
|
from werkzeug.exceptions import HTTPException
from flask import Response, redirect
from flask_admin import BaseView, expose
from flask_admin.contrib.sqla import ModelView as DefaultModelView
from flask_login import login_required
from project.home.decorators import roles_required
class BasicAuthException(HTTPException):
def __init__(self, message):
super().__init__(message, Response(
"You could not be authenticated. Please refresh the page.", 401,
{'WWW-Authenticate': 'Basic realm="Login Required"'}
))
class ModelView(DefaultModelView):
column_auto_select_related = True
def __init__(self, model, session, basic_auth, *args, **kwargs):
super(ModelView, self).__init__(model, session, *args, **kwargs)
self.basic_auth = basic_auth
def is_accessible(self):
if not self.basic_auth.authenticate():
raise BasicAuthException('Not authenticated.')
else:
return True
def inaccessible_callback(self, name, **kwargs):
return redirect(self.basic_auth.challenge())
class SuperSecretPage(BaseView):
@expose(url="/", methods=("GET", ))
@login_required
@roles_required('admin')
def secret(self):
return self.render('admin/super-secret-page.html')
|
Import('env')
env.Prepend(CPPPATH=['/home/karl/.platformio/packages/framework-mbed/features/unsupported/rpc']) |
from collections import deque
import os
import cv2
from .wrapper_base import Wrapper, ObservationWrapper
import gym
import gym.spaces as spaces
import numpy as np
os.environ.setdefault("PATH", "")
cv2.ocl.setUseOpenCL(False)
class FrameStack(Wrapper):
def __init__(self, env, k):
"""Stack k last frames."""
Wrapper.__init__(self, env)
self.k = k
self.frames = deque([], maxlen=k)
shp = env.observation_space.shape
self.observation_space = spaces.Box(
low=0,
high=255,
shape=(shp[:-1] + (shp[-1] * k,)),
dtype=env.observation_space.dtype,
)
# pylint: disable=method-hidden
def reset(self, **kwargs):
ob = self.env.reset(**kwargs)
for _ in range(self.k):
self.frames.append(ob)
return self._get_ob()
# pylint: disable=method-hidden
def step(self, action):
ob, reward, done, info = self.env.step(action)
self.frames.append(ob)
return self._get_ob(), reward, done, info
def _get_ob(self):
assert len(self.frames) == self.k
return np.concatenate(list(self.frames), axis=-1)
class ScaledFloatFrame(ObservationWrapper):
def __init__(self, env):
ObservationWrapper.__init__(self, env)
self.observation_space = gym.spaces.Box(
low=0, high=1, shape=env.observation_space.shape, dtype=np.float32
)
def observation(self, observation):
# careful! This undoes the memory optimization, use
# with smaller replay buffers only.
img = np.array(observation, dtype=np.float32)
img /= 255.0
return img
class NormalizeActions(Wrapper):
"""Makes env expect actions that are zero-mean and unit variance """
def __init__(self, env):
Wrapper.__init__(self, env)
self._mask = np.logical_and(
np.isfinite(env.action_space.low),
np.isfinite(env.action_space.high))
self._low = np.where(self._mask, env.action_space.low, -1)
self._high = np.where(self._mask, env.action_space.high, 1)
low = np.where(self._mask, -np.ones_like(self._low), self._low)
high = np.where(self._mask, np.ones_like(self._low), self._high)
self.action_space = gym.spaces.Box(low, high, dtype=np.float32)
def step(self, action):
original = (action + 1) / 2 * (self._high - self._low) + self._low
original = np.where(self._mask, original, action)
return self.env.step(original)
class FrameSkip(Wrapper):
def __init__(self, env, skip=4):
"""Return only every `skip`-th frame"""
Wrapper.__init__(self, env)
# most recent raw observations (for max pooling across time steps)
self._skip = max(1, skip)
# pylint: disable=method-hidden
def step(self, action):
"""Repeat action, sum reward, and max over last observations."""
total_reward = 0.0
done = None
for i in range(self._skip):
obs, reward, done, info = self.env.step(action)
total_reward += reward
if done:
break
return obs, total_reward, done, info
# pylint: disable=method-hidden
def reset(self, *args, **kwargs):
return self.env.reset(*args, **kwargs)
class ObsDict(ObservationWrapper):
"""Transform the normal observation into an obs dict with a key given by arg"""
def __init__(self, env, default_key="obs_1d"):
super(ObsDict, self).__init__(env)
self.observation_space = gym.spaces.Dict({default_key: self.env.observation_space})
self._key = default_key
def observation(self, observation):
return {self._key: observation}
class ObsDictRenameKey(ObservationWrapper):
"""Renames a key for an obs dict"""
def __init__(self, env, old_name="observation",new_name="obs_1d"):
super(ObsDictRenameKey, self).__init__(env)
old_obs_space = env.observation_space
assert isinstance(old_obs_space,gym.spaces.Dict)
import copy
new_obs_space = copy.deepcopy(old_obs_space)
new_obs_space.spaces[new_name] = new_obs_space.spaces.pop(old_name)
self.observation_space = new_obs_space
self.old_name = old_name
self.new_name = new_name
def observation(self, observation:dict):
observation[self.new_name] = observation.pop(self.old_name)
return observation
class RewardObs(Wrapper):
"""Make the reward part """
def __init__(self, env):
Wrapper.__init__(self, env)
spaces = self.env.observation_space.spaces
if "obs_1d" in spaces:
assert isinstance(spaces["obs_1d"], gym.spaces.Box)
assert spaces["obs_1d"].dtype == np.float32
new_space = gym.spaces.Box(-np.inf, np.inf,
shape=tuple(np.array(spaces["obs_1d"].shape) + 1))
else:
new_space = gym.spaces.Box(-np.inf, np.inf, shape=(1,))
spaces["obs_1d"] = new_space
self.observation_space = gym.spaces.Dict(spaces)
def step(self, action):
obs, reward, done, info = self.env.step(action)
obs['vector'] = np.concatenate(
(obs.get('vector', ()), np.array([reward], dtype=np.float32)),
axis=-1
)
return obs, reward, done, info
def reset(self):
obs = self.env.reset()
obs['vector'] = np.concatenate(
(obs.get('vector', ()), np.array([0], dtype=np.float32)),
axis=-1
)
return obs
|
# -*- coding: utf-8 -*-
import unittest
from pyramid import testing
class TestRedisSessionFactory(unittest.TestCase):
def _makeOne(self, request, secret='secret', **kw):
from .. import RedisSessionFactory
return RedisSessionFactory(secret, **kw)(request)
def _assert_is_a_header_to_set_cookie(self, header_value):
# The negative assertion below is the least complicated option for
# asserting that a Set-Cookie header sets a cookie rather than deletes
# a cookie. This helper method is to help make that intention clearer
# in the tests.
self.assertNotIn('Max-Age=0', header_value)
def _get_session_id(self, request):
from ..compat import cPickle
from ..util import get_unique_session_id
redis = request.registry._redis_sessions
session_id = get_unique_session_id(redis, timeout=100,
serialize=cPickle.dumps)
return session_id
def _serialize(self, session_id, secret='secret'):
from pyramid.session import signed_serialize
return signed_serialize(session_id, secret)
def _set_session_cookie(self, request, session_id, cookie_name='session',
secret='secret'):
cookieval = self._serialize(session_id, secret=secret)
request.cookies[cookie_name] = cookieval
def _make_request(self):
from . import DummyRedis
request = testing.DummyRequest()
request.registry._redis_sessions = DummyRedis()
request.exception = None
return request
def test_ctor_no_cookie(self):
request = self._make_request()
session = self._makeOne(request)
session_dict = session.from_redis()['managed_dict']
self.assertDictEqual(session_dict, {})
self.assertIs(session.new, True)
def test_ctor_with_cookie_still_valid(self):
request = self._make_request()
session_id_in_cookie = self._get_session_id(request)
self._set_session_cookie(request=request,
session_id=session_id_in_cookie)
session = self._makeOne(request)
self.assertEqual(session.session_id, session_id_in_cookie)
self.assertIs(session.new, False)
def test_ctor_with_bad_cookie(self):
request = self._make_request()
session_id_in_cookie = self._get_session_id(request)
invalid_secret = 'aaaaaa'
self._set_session_cookie(request=request,
session_id=session_id_in_cookie,
secret=invalid_secret)
session = self._makeOne(request)
self.assertNotEqual(session.session_id, session_id_in_cookie)
self.assertIs(session.new, True)
def test_session_id_not_in_redis(self):
request = self._make_request()
session_id_in_cookie = self._get_session_id(request)
self._set_session_cookie(request=request,
session_id=session_id_in_cookie)
redis = request.registry._redis_sessions
redis.store = {} # clears keys in DummyRedis
session = self._makeOne(request)
self.assertNotEqual(session.session_id, session_id_in_cookie)
self.assertIs(session.new, True)
def test_factory_parameters_used_to_set_cookie(self):
import re
import webob
cookie_name = 'testcookie'
cookie_max_age = 300
cookie_path = '/path'
cookie_domain = 'example.com'
cookie_secure = True
cookie_httponly = False
secret = 'test secret'
request = self._make_request()
session = request.session = self._makeOne(
request,
cookie_name=cookie_name,
cookie_max_age=cookie_max_age,
cookie_path=cookie_path,
cookie_domain=cookie_domain,
cookie_secure=cookie_secure,
cookie_httponly=cookie_httponly,
secret=secret,
)
session['key'] = 'value'
response = webob.Response()
request.response_callbacks[0](request, response)
set_cookie_headers = response.headers.getall('Set-Cookie')
self.assertEqual(len(set_cookie_headers), 1)
# Make another response and .set_cookie() using the same values and
# settings to get the expected header to compare against
response_to_check_against = webob.Response()
response_to_check_against.set_cookie(
key=cookie_name,
value=self._serialize(session_id=request.session.session_id,
secret=secret),
max_age=cookie_max_age,
path=cookie_path,
domain=cookie_domain,
secure=cookie_secure,
httponly=cookie_httponly,
)
expected_header = response_to_check_against.headers.getall(
'Set-Cookie')[0]
remove_expires_attribute = lambda s: re.sub('Expires ?=[^;]*;', '', s,
flags=re.IGNORECASE)
self.assertEqual(remove_expires_attribute(set_cookie_headers[0]),
remove_expires_attribute(expected_header))
# We have to remove the Expires attributes from each header before the
# assert comparison, as we cannot rely on their values to be the same
# (one is generated after the other, and may have a slightly later
# Expires time). The Expires value does not matter to us as it is
# calculated from Max-Age.
def test_factory_parameters_used_to_delete_cookie(self):
import webob
cookie_name = 'testcookie'
cookie_path = '/path'
cookie_domain = 'example.com'
request = self._make_request()
self._set_session_cookie(request=request,
cookie_name=cookie_name,
session_id=self._get_session_id(request))
session = request.session = self._makeOne(
request,
cookie_name=cookie_name,
cookie_path=cookie_path,
cookie_domain=cookie_domain,
)
session.invalidate()
response = webob.Response()
request.response_callbacks[0](request, response)
set_cookie_headers = response.headers.getall('Set-Cookie')
self.assertEqual(len(set_cookie_headers), 1)
# Make another response and .delete_cookie() using the same values and
# settings to get the expected header to compare against
response_to_check_against = webob.Response()
response_to_check_against.delete_cookie(
key=cookie_name,
path=cookie_path,
domain=cookie_domain,
)
expected_header = response.headers.getall('Set-Cookie')[0]
self.assertEqual(set_cookie_headers[0], expected_header)
# The tests below with names beginning with test_new_session_ test cases
# where first access to request.session creates a new session, as in
# test_ctor_no_cookie, test_ctor_with_bad_cookie and
# test_session_id_not_in_redis.
def test_new_session_cookie_on_exception_true_no_exception(self):
# cookie_on_exception is True by default, no exception raised
import webob
request = self._make_request()
request.session = self._makeOne(request)
response = webob.Response()
request.response_callbacks[0](request, response)
set_cookie_headers = response.headers.getall('Set-Cookie')
self.assertEqual(len(set_cookie_headers), 1)
self._assert_is_a_header_to_set_cookie(set_cookie_headers[0])
def test_new_session_cookie_on_exception_true_exception(self):
# cookie_on_exception is True by default, exception raised
import webob
request = self._make_request()
request.session = self._makeOne(request)
request.exception = Exception()
response = webob.Response()
request.response_callbacks[0](request, response)
set_cookie_headers = response.headers.getall('Set-Cookie')
self.assertEqual(len(set_cookie_headers), 1)
self._assert_is_a_header_to_set_cookie(set_cookie_headers[0])
def test_new_session_cookie_on_exception_false_no_exception(self):
# cookie_on_exception is False, no exception raised
import webob
request = self._make_request()
request.session = self._makeOne(request, cookie_on_exception=False)
response = webob.Response()
request.response_callbacks[0](request, response)
set_cookie_headers = response.headers.getall('Set-Cookie')
self.assertEqual(len(set_cookie_headers), 1)
self._assert_is_a_header_to_set_cookie(set_cookie_headers[0])
def test_new_session_cookie_on_exception_false_exception(self):
# cookie_on_exception is False, exception raised
import webob
request = self._make_request()
request.session = self._makeOne(request, cookie_on_exception=False)
request.exception = Exception()
response = webob.Response()
request.response_callbacks[0](request, response)
self.assertNotIn('Set-Cookie', response.headers)
def test_new_session_invalidate(self):
# new session -> invalidate()
import webob
request = self._make_request()
request.session = self._makeOne(request)
request.session.invalidate()
response = webob.Response()
request.response_callbacks[0](request, response)
self.assertNotIn('Set-Cookie', response.headers)
def test_new_session_session_after_invalidate_coe_True_no_exception(self):
# new session -> invalidate() -> new session
# cookie_on_exception is True by default, no exception raised
import webob
request = self._make_request()
session = request.session = self._makeOne(request)
session.invalidate()
session['key'] = 'value'
response = webob.Response()
request.response_callbacks[0](request, response)
set_cookie_headers = response.headers.getall('Set-Cookie')
self.assertEqual(len(set_cookie_headers), 1)
self._assert_is_a_header_to_set_cookie(set_cookie_headers[0])
def test_new_session_session_after_invalidate_coe_True_exception(self):
# new session -> invalidate() -> new session
# cookie_on_exception is True by default, exception raised
import webob
request = self._make_request()
session = request.session = self._makeOne(request)
session.invalidate()
session['key'] = 'value'
request.exception = Exception()
response = webob.Response()
request.response_callbacks[0](request, response)
set_cookie_headers = response.headers.getall('Set-Cookie')
self.assertEqual(len(set_cookie_headers), 1)
self._assert_is_a_header_to_set_cookie(set_cookie_headers[0])
def test_new_session_session_after_invalidate_coe_False_no_exception(self):
# new session -> invalidate() -> new session
# cookie_on_exception is False, no exception raised
import webob
request = self._make_request()
session = request.session = self._makeOne(request,
cookie_on_exception=False)
session.invalidate()
session['key'] = 'value'
response = webob.Response()
request.response_callbacks[0](request, response)
set_cookie_headers = response.headers.getall('Set-Cookie')
self.assertEqual(len(set_cookie_headers), 1)
self._assert_is_a_header_to_set_cookie(set_cookie_headers[0])
def test_new_session_session_after_invalidate_coe_False_exception(self):
# new session -> invalidate() -> new session
# cookie_on_exception is False, exception raised
import webob
request = self._make_request()
session = request.session = self._makeOne(request,
cookie_on_exception=False)
session.invalidate()
session['key'] = 'value'
request.exception = Exception()
response = webob.Response()
request.response_callbacks[0](request, response)
self.assertNotIn('Set-Cookie', response.headers)
def test_new_session_multiple_invalidates(self):
# new session -> invalidate() -> new session -> invalidate()
# Invalidate more than once, no new session after last invalidate()
import webob
request = self._make_request()
session = request.session = self._makeOne(request)
session.invalidate()
session['key'] = 'value'
session.invalidate()
response = webob.Response()
request.response_callbacks[0](request, response)
self.assertNotIn('Set-Cookie', response.headers)
def test_new_session_multiple_invalidates_with_no_new_session_in_between(
self
):
# new session -> invalidate() -> invalidate()
# Invalidate more than once, no new session in between invalidate()s,
# no new session after last invalidate()
import webob
request = self._make_request()
session = request.session = self._makeOne(request)
session.invalidate()
session.invalidate()
response = webob.Response()
request.response_callbacks[0](request, response)
self.assertNotIn('Set-Cookie', response.headers)
# The tests below with names beginning with test_existing_session_ test
# cases where first access to request.session returns an existing session,
# as in test_ctor_with_cookie_still_valid.
def test_existing_session(self):
import webob
request = self._make_request()
self._set_session_cookie(
request=request,
session_id=self._get_session_id(request),
)
request.session = self._makeOne(request)
response = webob.Response()
request.response_callbacks[0](request, response)
self.assertNotIn('Set-Cookie', response.headers)
def test_existing_session_invalidate(self):
# existing session -> invalidate()
import webob
request = self._make_request()
self._set_session_cookie(request=request,
session_id=self._get_session_id(request))
request.session = self._makeOne(request)
request.session.invalidate()
response = webob.Response()
request.response_callbacks[0](request, response)
set_cookie_headers = response.headers.getall('Set-Cookie')
self.assertEqual(len(set_cookie_headers), 1)
self.assertIn('Max-Age=0', set_cookie_headers[0])
def test_existing_session_session_after_invalidate_coe_True_no_exception(
self
):
# existing session -> invalidate() -> new session
# cookie_on_exception is True by default, no exception raised
import webob
request = self._make_request()
self._set_session_cookie(request=request,
session_id=self._get_session_id(request))
session = request.session = self._makeOne(request)
session.invalidate()
session['key'] = 'value'
response = webob.Response()
request.response_callbacks[0](request, response)
set_cookie_headers = response.headers.getall('Set-Cookie')
self.assertEqual(len(set_cookie_headers), 1)
self._assert_is_a_header_to_set_cookie(set_cookie_headers[0])
def test_existing_session_session_after_invalidate_coe_True_exception(
self
):
# existing session -> invalidate() -> new session
# cookie_on_exception is True by default, exception raised
import webob
request = self._make_request()
self._set_session_cookie(request=request,
session_id=self._get_session_id(request))
session = request.session = self._makeOne(request)
session.invalidate()
session['key'] = 'value'
request.exception = Exception()
response = webob.Response()
request.response_callbacks[0](request, response)
set_cookie_headers = response.headers.getall('Set-Cookie')
self.assertEqual(len(set_cookie_headers), 1)
self._assert_is_a_header_to_set_cookie(set_cookie_headers[0])
def test_existing_session_session_after_invalidate_coe_False_no_exception(
self
):
# existing session -> invalidate() -> new session
# cookie_on_exception is False, no exception raised
import webob
request = self._make_request()
self._set_session_cookie(request=request,
session_id=self._get_session_id(request))
session = request.session = self._makeOne(request,
cookie_on_exception=False)
session.invalidate()
session['key'] = 'value'
response = webob.Response()
request.response_callbacks[0](request, response)
set_cookie_headers = response.headers.getall('Set-Cookie')
self.assertEqual(len(set_cookie_headers), 1)
self._assert_is_a_header_to_set_cookie(set_cookie_headers[0])
def test_existing_session_session_after_invalidate_coe_False_exception(
self
):
# existing session -> invalidate() -> new session
# cookie_on_exception is False, exception raised
import webob
request = self._make_request()
self._set_session_cookie(request=request,
session_id=self._get_session_id(request))
session = request.session = self._makeOne(request,
cookie_on_exception=False)
session.invalidate()
session['key'] = 'value'
request.exception = Exception()
response = webob.Response()
request.response_callbacks[0](request, response)
set_cookie_headers = response.headers.getall('Set-Cookie')
self.assertEqual(len(set_cookie_headers), 1)
self.assertIn('Max-Age=0', set_cookie_headers[0])
# Cancel setting of cookie for new session, but still delete cookie for
# the earlier invalidate().
def test_existing_session_multiple_invalidates(self):
# existing session -> invalidate() -> new session -> invalidate()
# Invalidate more than once, no new session after last invalidate()
import webob
request = self._make_request()
self._set_session_cookie(request=request,
session_id=self._get_session_id(request))
session = request.session = self._makeOne(request)
session.invalidate()
session['key'] = 'value'
session.invalidate()
response = webob.Response()
request.response_callbacks[0](request, response)
set_cookie_headers = response.headers.getall('Set-Cookie')
self.assertEqual(len(set_cookie_headers), 1)
self.assertIn('Max-Age=0', set_cookie_headers[0])
def test_existing_session_multiple_invalidates_no_new_session_in_between(
self
):
# existing session -> invalidate() -> invalidate()
# Invalidate more than once, no new session in between invalidate()s,
# no new session after last invalidate()
import webob
request = self._make_request()
self._set_session_cookie(request=request,
session_id=self._get_session_id(request))
session = request.session = self._makeOne(request)
session.invalidate()
session.invalidate()
response = webob.Response()
request.response_callbacks[0](request, response)
set_cookie_headers = response.headers.getall('Set-Cookie')
self.assertEqual(len(set_cookie_headers), 1)
self.assertIn('Max-Age=0', set_cookie_headers[0])
def test_instance_conforms(self):
from pyramid.interfaces import ISession
from zope.interface.verify import verifyObject
request = self._make_request()
inst = self._makeOne(request)
verifyObject(ISession, inst)
def test_adjusted_session_timeout_persists(self):
request = self._make_request()
inst = self._makeOne(request)
inst.adjust_timeout_for_session(555)
session_id = inst.session_id
cookieval = self._serialize(session_id)
request.cookies['session'] = cookieval
new_session = self._makeOne(request)
self.assertEqual(new_session.timeout, 555)
def test_client_callable(self):
from . import DummyRedis
request = self._make_request()
redis = DummyRedis()
client_callable = lambda req, **kw: redis
inst = self._makeOne(request, client_callable=client_callable)
self.assertEqual(inst.redis, redis)
def test_session_factory_from_settings(self):
from .. import session_factory_from_settings
request = self._make_request()
settings = {'redis.sessions.secret': 'secret',
'redis.sessions.timeout': '999'}
inst = session_factory_from_settings(settings)(request)
self.assertEqual(inst.timeout, 999)
|
#!/usr/bin/env python
import sys
from setuptools import setup, find_packages
if sys.version_info.major < 3 or (
sys.version_info.major == 3 and sys.version_info.minor < 6
):
sys.exit("Python 3.6 or newer is required")
VERSION = None
with open("elsie/version.py") as f:
exec(f.read())
if VERSION is None:
raise Exception("version.py executed but VERSION was not set")
with open("requirements.txt") as f:
dependencies = [line.strip() for line in f.readlines()]
setup(
name="elsie",
version=VERSION,
description="Framework for making slides",
long_description="""
Elsie is a Framework for making slides in Python,
Check out its documentation at https://spirali.github.io/elsie.
""",
author="Stanislav Böhm",
author_email="[email protected]",
url="https://github.com/spirali/elsie",
packages=find_packages(),
install_requires=dependencies,
classifiers=(
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
),
)
|
"""This re-indexes resources in SOLR to fix problems during SOLR builds.
* By default, prints errors on stdout.
* Optional argument --log: logs output to system log.
"""
from django.core.management.base import BaseCommand
from hs_core.models import BaseResource
from hs_core.hydroshare.utils import get_resource_by_shortkey
from haystack import connection_router, connections
from haystack.exceptions import NotHandled
import logging
def has_subfolders(resource):
for f in resource.files.all():
if '/' in f.short_path:
return True
return False
def repair_solr(short_id):
""" Repair SOLR index content for a resource """
logger = logging.getLogger(__name__)
try:
res = BaseResource.objects.get(short_id=short_id)
except BaseResource.DoesNotExist:
print("{} does not exist".format(short_id))
# instance with proper type
instance = res.get_content_model()
assert instance, (res, res.content_model)
print("re-indexing {} in solr".format(short_id))
# instance of BaseResource matching real instance
baseinstance = BaseResource.objects.get(pk=instance.pk)
basesender = BaseResource
using_backends = connection_router.for_write(instance=baseinstance)
for using in using_backends:
# if object is public/discoverable or becoming public/discoverable, index it
if instance.raccess.public or instance.raccess.discoverable:
try:
index = connections[using].get_unified_index().get_index(basesender)
index.update_object(baseinstance, using=using)
except NotHandled:
logger.exception(
"Failure: changes to %s with short_id %s not added to Solr Index.",
str(type(instance)), baseinstance.short_id)
# if object is private or becoming private, delete from index
else:
try:
index = connections[using].get_unified_index().get_index(basesender)
index.remove_object(baseinstance, using=using)
except NotHandled:
logger.exception("Failure: delete of %s with short_id %s failed.",
str(type(instance)), baseinstance.short_id)
class Command(BaseCommand):
help = "Repair SOLR index for a set of resources"
def add_arguments(self, parser):
# a list of resource id's: none does nothing.
parser.add_argument('resource_ids', nargs='*', type=str)
# Named (optional) arguments
parser.add_argument(
'--log',
action='store_true', # True for presence, False for absence
dest='log', # value is options['log']
help='log errors to system log',
)
parser.add_argument(
'--type',
dest='type',
help='limit to resources of a particular type'
)
parser.add_argument(
'--storage',
dest='storage',
help='limit to specific storage medium (local, user, federated)'
)
parser.add_argument(
'--access',
dest='access',
help='limit to specific access class (public, discoverable, private)'
)
parser.add_argument(
'--has_subfolders',
action='store_true', # True for presence, False for absence
dest='has_subfolders', # value is options['has_subfolders']
help='limit to resources with subfolders',
)
def repair_filtered_solr(self, resource, options):
if (options['type'] is None or resource.resource_type == options['type']) and \
(options['storage'] is None or resource.storage_type == options['storage']) and \
(options['access'] != 'public' or resource.raccess.public) and \
(options['access'] != 'discoverable' or resource.raccess.discoverable) and \
(options['access'] != 'private' or not resource.raccess.discoverable) and \
(not options['has_subfolders'] or has_subfolders(resource)):
storage = resource.get_irods_storage()
if storage.exists(resource.root_path):
repair_solr(resource.short_id)
else:
print("{} does not exist in iRODS".format(resource.short_id))
def handle(self, *args, **options):
if len(options['resource_ids']) > 0: # an array of resource short_id to check.
for rid in options['resource_ids']:
resource = get_resource_by_shortkey(rid)
self.repair_filtered_solr(resource, options)
else:
for resource in BaseResource.objects.all():
self.repair_filtered_solr(resource, options)
|
import sys, os
import datetime
# a utility
from fasp.runner import FASPRunner
from fasp.search import DiscoverySearchClient
from fasp.workflow import DNAStackWESClient
from fasp.workflow import GCPLSsamtools
import pyega3.pyega3 as ega
class EGAhtsget():
def __init__(self, credentialsPath):
*credentials, self.key = ega.load_credential(os.path.expanduser(credentialsPath))
self.credentials = credentials
self.token = ega.get_token(credentials)
def getSize(self, identifier):
display_file_name, file_name, file_size, check_sum = ega.get_file_name_size_md5(self.token, identifier)
return file_size
def htsget(self, identifier, ref, start, end, type, saveTo ):
display_file_name, file_name, file_size, check_sum = ega.get_file_name_size_md5(self.token, identifier)
genomic_range_args = (ref, check_sum, start, end, type)
print(display_file_name)
ega.download_file_retry(self.credentials, identifier, display_file_name, file_name, file_size, check_sum, 3, self.key,
saveTo, genomic_range_args, -1, 10)
def main(argv):
faspRunner = FASPRunner(pauseSecs=0)
creditor = faspRunner.creditor
settings = faspRunner.settings
# Step 1 - Discovery
# query for relevant files
searchClient = DiscoverySearchClient('https://ga4gh-search-adapter-presto-public.prod.dnastack.com/')
query = "SELECT sample_submitter_id, fileid, filename FROM dbgap_demo.scr_ega.scr_egapancreatic_sample_multi p join dbgap_demo.scr_ega.scr_egapancreatic_files f on f.sample_primary_id = p.sample_primary_id where phenotype = 'pancreatic adenocarcinoma' limit 3"
query_job = searchClient.runQuery(query)
# Step 2 - Use htsget at EGA
htsgetClient = EGAhtsget('~/.keys/ega.credentials')
# Step 3 - set up a class that run a compute for us
location = 'projects/{}/locations/{}'.format(settings['GCPProject'], settings['GCPPipelineRegion'])
wesClient = GCPLSsamtools(location, settings['GCPOutputBucket'])
# repeat steps 2 and 3 for each row of the query
for row in query_job:
print("sample={}, EGAFileID={}".format(row[0], row[1]))
# Step 2 - Use DRS to get the URL
fileSize = htsgetClient.getSize(row[1])
print(fileSize)
# we've predetermined we want to use the gs copy in this case
#url = drsClient.getAccessURL(row[1], 'gs')
#htsgetClient.htsget(row[1], 'chr1', 100000, 102000, 'BAM', row[2])
localfile = 'NA19377.unmapped.ILLUMINA.bwa.LWK.low_coverage.20120522.bam'
#row[2]
# Step 3 - Run a pipeline on the file at the drs url
outfile = "{}.txt".format(row[0])
pipeline_id = wesClient.runWorkflow(localfile, outfile)
#print('submitted:{}'.format(pipeline_id))
via = 'local'
note = 'samtools on htsget BAM'
time = datetime.datetime.now().strftime("%m/%d/%Y, %H:%M:%S")
faspRunner.logRun(time, via, note, pipeline_id, outfile, str(fileSize),
searchClient, htsgetClient, wesClient)
if __name__ == "__main__":
main(sys.argv[1:])
|
from typing import Dict, Literal, Optional
from pydantic import BaseModel, Field
from common.runtimes import ParameterDefinition, RuntimeConfig, SSHConfig
_DEFAULT: Dict[str, ParameterDefinition] = {
"worker_count": ParameterDefinition(name="worker_count", type="int", readable_name="Max. amount of workers",
description="Max. amount of workers for parallel request handling.")
}
class DockerDeploymentParameterDefinitions(BaseModel):
worker_count: ParameterDefinition = Field(_DEFAULT["worker_count"],
description="Max. amount of workers for parallel request handling.")
def __init__(self, *args, **kwargs):
for k in kwargs.keys():
kwargs[k] = {"name": k, **_DEFAULT[k].dict(), **kwargs[k]}
super().__init__(*args, **kwargs)
class DockerDeploymentRuntimeConfig(RuntimeConfig[DockerDeploymentParameterDefinitions]):
type: Literal["docker"] = "docker"
binding_ip: Optional[str] = Field(None, description="The fixed IP-address all containers binding will be bound to.")
ssh: SSHConfig = Field(default=None, description="Configuration for accessing Docker runtime remotely via ssh.")
entrypoint: str = Field(default=None,
description="The public endpoint deployment to access the traefik entrypoint.")
parameters: DockerDeploymentParameterDefinitions = Field(DockerDeploymentParameterDefinitions())
|
from pywr.nodes import InputNode, LinkNode, OutputNode
from pywr.model import Model
from pathlib import Path
import pytest
@pytest.fixture()
def test_dir() -> Path:
return Path(__file__).parent
@pytest.fixture()
def model_dir(test_dir: Path):
return test_dir / "models"
class TestSimple1:
def test_simple_schema(self):
"""Test simple model from data"""
data = {
"timestepper": {"start": "2020-01-01", "end": "2020-12-31", "timestep": 1},
"nodes": [
{"name": "input1", "type": "input"},
{"name": "link1", "type": "link"},
{
"name": "output1",
"type": "output",
"cost": -10.0,
"max_flow": "demand",
},
],
"edges": [
{"from_node": "input1", "to_node": "link1"},
{"from_node": "link1", "to_node": "output1"},
],
"parameters": [{"name": "demand", "type": "constant", "value": 10.0}],
}
model = Model(**data)
assert len(model.nodes) == 3
assert len(model.edges) == 2
assert len(model.parameters) == 1
assert isinstance(model.nodes["input1"], InputNode)
assert isinstance(model.nodes["link1"], LinkNode)
assert isinstance(model.nodes["output1"], OutputNode)
model.run()
# TODO test the outputs
@pytest.mark.parametrize("filename", ["simple1.json", "simple1.yml"])
def test_from_file(self, model_dir: Path, filename: str):
model = Model.from_file(model_dir / filename)
assert len(model.nodes) == 3
assert len(model.edges) == 2
assert len(model.parameters) == 1
assert isinstance(model.nodes["input1"], InputNode)
assert isinstance(model.nodes["link1"], LinkNode)
assert isinstance(model.nodes["output1"], OutputNode)
model.run()
# TODO test the outputs
def test_duplicate_node_name_error():
data = {
"nodes": [
{"name": "node1", "type": "input"},
{"name": "node1", "type": "link"},
],
"edges": [],
"parameters": [],
}
with pytest.raises(ValueError):
Model(**data)
|
# coding: utf-8
from __future__ import unicode_literals
from spacy.kb import KnowledgeBase
from spacy.util import ensure_path
from spacy.lang.en import English
from spacy.tests.util import make_tempdir
def test_issue4674():
"""Test that setting entities with overlapping identifiers does not mess up IO"""
nlp = English()
kb = KnowledgeBase(nlp.vocab, entity_vector_length=3)
vector1 = [0.9, 1.1, 1.01]
vector2 = [1.8, 2.25, 2.01]
kb.set_entities(entity_list=["Q1", "Q1"], freq_list=[32, 111], vector_list=[vector1, vector2])
assert kb.get_size_entities() == 1
# dumping to file & loading back in
with make_tempdir() as d:
dir_path = ensure_path(d)
if not dir_path.exists():
dir_path.mkdir()
file_path = dir_path / "kb"
kb.dump(str(file_path))
kb2 = KnowledgeBase(vocab=nlp.vocab, entity_vector_length=3)
kb2.load_bulk(str(file_path))
assert kb2.get_size_entities() == 1
|
import os
import subprocess
import requests
import shutil
import tempfile
from datetime import timedelta
from django.utils import timezone
from django.core.management.base import BaseCommand, CommandError
from core.models import Image, AnimatedGif
class Command(BaseCommand):
help = """
Generates animated gifs
If it has been 3 hours since the last 3 hourly gif was made it generates a new one
If it has been a day since the last daily gif it will generate another one
"""
GIF_PROFILES = (
{
'period': AnimatedGif.PERIOD_3_HOURLY,
'created__gte': timezone.now() - timedelta(hours=3),
'output_file_path': '/tmp/{}.gif'.format(AnimatedGif.PERIOD_3_HOURLY),
},
# {
# 'period': AnimatedGif.PERIOD_DAILY,
# 'created__gte': timezone.now() - timedelta(days=1),
# 'output_file_path': '/tmp/{}.gif'.format(AnimatedGif.PERIOD_DAILY),
# },
)
GIF_VERSIONS = (
{
'name': 'full size',
'thumb_size': None,
},
{
'name': 'preview',
'thumb_size': '200x200',
},
)
def make_gif(self, snapshot_image_urls, output_file_path, delay_ms='30'):
if not snapshot_image_urls:
return
scratch_dir = tempfile.mkdtemp()
self.stdout.write('Using scratch dir {}'.format(scratch_dir))
snapshot_file_paths = []
for url in snapshot_image_urls:
# Download and save all the images to the scratch_dir so that convert can process
# them
self.stdout.write('Downloading {}'.format(url))
response = requests.get(url, stream=True)
if response.ok:
file_path = os.path.join(scratch_dir, os.path.basename(url))
with open(file_path, 'wb') as out_file:
self.stdout.write('Saving to {}'.format(file_path))
shutil.copyfileobj(response.raw, out_file)
snapshot_file_paths.append(file_path)
else:
self.stderr.write('Error {} when getting {}'.format(response.status_code, url))
# Construct the convert command
command = ['convert']
if delay_ms is not None:
command += ['-delay', delay_ms]
if resize_px is not None:
command += ['-resize', resize_px]
command += ['-loop', '0'] + snapshot_file_paths + [output_file_path]
# Generate the gif
result = subprocess.run(command)
# Delete the scratch dir
shutil.rmtree(scratch_dir)
if result.returncode:
raise CommandError(
'Received error code {} when running {}'.format(result.returncode, result.args))
def handle(self, *args, **options):
for gif_profile in self.GIF_PROFILES:
output_file_path = gif_profile.pop('output_file_path')
if not AnimatedGif.objects.filter(**gif_profile).exists():
# No gif for the defired time period. Generate a new one based on any new snapshots
# uploaded in that time
snapshots_to_render = (
Image.objects
.filter(created__gte=gif_profile['created__gte'])
.order_by('created'))
snapshot_image_fields = [snapshot.image for snapshot in snapshots_to_render]
# Create the new AnimatedGif instance but don't save it yet
gif_instance = AnimatedGif(
title=os.path.basename(output_file_path), period=gif_profile['period'])
# Render the gif versions (ie full size and thumbnail)
for version in self.GIF_VERSIONS:
if version['thumb_size'] is None:
snapshot_urls = [field.url for field in snapshot_image_fields]
gif_path = output_file_path
self.make_gif(snapshot_urls, gif_path)
image_field = gif_instance.image
else:
snapshot_urls = [
field.thumbnail[version['thumb_size']].url
for field in snapshot_image_fields]
path_bits = output_file_path.rsplit('.', maxsplit=1)
path_bits[0] = '{}_{}'.format(path_bits[0], version['thumb_size'])
gif_path = '.'.join(path_bits)
self.make_gif(snapshot_urls, gif_path, resize_px=None)
image_field = getattr(gif_instance, 'image_{}'.format(version['name']))
# Read the gif data into a buffer and save it on the appropriate image field
with open(gif_path, 'rb') as image_data:
file_name = os.path.basename(gif_path)
image_field.save(file_name, image_data)
gif_instance.save()
# Now go ahead and delete the snapshots to conserve space
# They need to be individually deleted so the corresponding image file in S3
# storage is deleted as well
for snapshot in snapshots_to_render:
snapshot.delete()
self.stdout.write(self.style.SUCCESS('Process complete'))
|
"""
Setup file for automat
"""
from setuptools import setup, find_packages
try:
from m2r import parse_from_file
long_description = parse_from_file('README.md')
except(IOError, ImportError):
print("\n\n!!! m2r not found, long_description is bad, don't upload this to PyPI !!!\n\n")
import io
long_description = io.open('README.md', encoding="utf-8").read()
setup(
name='Automat',
use_scm_version=True,
url='https://github.com/glyph/Automat',
description="""
Self-service finite-state machines for the programmer on the go.
""".strip(),
long_description=long_description,
packages=find_packages(exclude=[]),
package_dir={'automat': 'automat'},
setup_requires=[
'setuptools-scm',
'm2r',
],
install_requires=[
"attrs>=16.1.0",
"six",
],
extras_require={
"visualize": ["graphviz>0.5.1",
"Twisted>=16.1.1"],
},
entry_points={
"console_scripts": [
"automat-visualize = automat._visualize:tool"
],
},
author='Glyph',
author_email='[email protected]',
include_package_data=True,
license="MIT",
keywords='fsm finite state machine automata',
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
)
|
########
# Copyright (c) 2015 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
"""
This module maintains backwards compatibility with Compute node types
version < 3.3.
"""
from cloudify.decorators import operation
from cloudify_agent.installer import operations
@operation
def install(**kwargs):
_fix_winrm_port_for_old_windows_blueprints(kwargs)
operations.create(**kwargs)
operations.configure(**kwargs)
@operation
def start(**kwargs):
_fix_winrm_port_for_old_windows_blueprints(kwargs)
operations.start(**kwargs)
@operation
def stop(**kwargs):
_fix_winrm_port_for_old_windows_blueprints(kwargs)
operations.stop(**kwargs)
@operation
def restart(**kwargs):
_fix_winrm_port_for_old_windows_blueprints(kwargs)
operations.restart(**kwargs)
@operation
def uninstall(**kwargs):
_fix_winrm_port_for_old_windows_blueprints(kwargs)
operations.delete(**kwargs)
def _fix_winrm_port_for_old_windows_blueprints(kwargs):
cloudify_agent = kwargs.get('cloudify_agent') or {}
agent_config = kwargs.get('agent_config') or {}
cloudify_agent.update(agent_config)
cloudify_agent.setdefault('windows', True)
cloudify_agent.setdefault('port', 5985)
kwargs['cloudify_agent'] = cloudify_agent
|
import os
import matplotlib.pyplot as plt
from PIL import Image, ImageFilter
# Load the image from the source file
image_file = "C:/Repositories/Image_Analysis/data/voc/plane/002279.jpg"
image = Image.open(image_file)
""" #Common Filters
blurred_image = image.filter(ImageFilter.BLUR)
sharpened_image = image.filter(ImageFilter.SHARPEN)
# Display it
fig = plt.figure(figsize=(16, 12))
# Plot original image
a=fig.add_subplot(1, 3, 1)
image_plot_1 = plt.imshow(image)
a.set_title("Original")
# Plot blurred image
a=fig.add_subplot(1, 3, 2)
image_plot_2 = plt.imshow(blurred_image)
a.set_title("Blurred")
# Plot sharpened image
a=fig.add_subplot(1, 3, 3)
image_plot_3 = plt.imshow(sharpened_image)
a.set_title("Sharpened") """
#Cutom filters
my_kernel = (100, 150, -50,
-100, 20, -100,
-10, 10, 20)
filtered_image = image.filter(ImageFilter.Kernel((3,3), my_kernel))
# Display it
fig = plt.figure(figsize=(16, 12))
# Plot original image
a=fig.add_subplot(1, 2, 1)
image_plot_1 = plt.imshow(image)
a.set_title("Original")
# Plot filtered image
a=fig.add_subplot(1, 2, 2)
image_plot_2 = plt.imshow(filtered_image)
a.set_title("Custom Filter")
plt.show() |
import numpy as np
import matplotlib.pylab as plt
import cv2
from skimage.metrics import structural_similarity as ssim
from skimage.metrics import peak_signal_noise_ratio as psnr
import os
from os.path import join as opj
from os.path import dirname as opd
from tqdm import tqdm
def plot(img,title="",savename="",savedir=None):
plt.figure()
plt.title(title)
plt.imshow(img,vmax=img.max(),vmin=0)
if savedir!=None:
plt.savefig(opj(savedir,savename+'.png'),dpi=200)
else:
plt.show()
plt.close()
def plot12(img1,img2,title1="",title2="",title="",savename="",savedir=None):
fig = plt.figure()
fig.suptitle(title)
plt.subplot(121)
plt.title(title1)
plt.imshow(img1,vmax=img1.max(),vmin=0)
plt.subplot(122)
plt.title(title2)
plt.imshow(img2,vmax=img2.max(),vmin=0)
if savedir!=None:
plt.savefig(opj(savedir,savename+'.png'),dpi=200)
else:
plt.show()
plt.close()
def plot_hist(array,bins=None,title='',savename="",savedir=None):
plt.figure()
plt.title(title)
if bins!=None:
plt.hist(array,bins=bins)
else:
plt.hist(array)
if savedir!=None:
plt.savefig(opj(savedir,savename+'.png'),dpi=200)
else:
plt.show()
plt.close()
def plot_matrix(matrix,cmap='viridis_r',vmin=None,vmax=None,text=False,title='',savename="",savedir=None):
plt.figure(figsize=(20,20))
plt.title(title)
if vmin!=None and vmax!=None:
plt.imshow(matrix,cmap=cmap,vmin=vmin,vmax=vmax)
else:
plt.imshow(matrix,cmap=cmap)
plt.colorbar(shrink=0.8)
if text:
for i in range(matrix.shape[0]):
for j in range(matrix.shape[1]):
plt.text(j, i, "{:.2f}".format(matrix[i, j]),
ha="center", va="center", color="w",size=8)
if savedir!=None:
plt.savefig(opj(savedir,savename+'.png'),dpi=200)
else:
plt.show()
plt.close()
def plot_boxplot(array,showfliers=True,whis=1.5,flierprops=None,title='',savename="",savedir=None):
plt.figure()
plt.title(title)
plt.boxplot(array,showfliers=showfliers,whis=whis,flierprops=flierprops)
if savedir!=None:
plt.savefig(opj(savedir,savename+'.png'),dpi=200)
else:
plt.show()
plt.close()
def plot12_boxplot(array1,array2,showfliers=True,whis=1.5,flierprops=None,
title1="",title2="",title="",savename="",savedir=None):
fig = plt.figure()
fig.suptitle(title)
plt.subplot(121)
plt.title(title1)
plt.boxplot(array1,showfliers=showfliers,whis=whis,flierprops=flierprops)
plt.subplot(122)
plt.title(title2)
plt.boxplot(array2,showfliers=showfliers,whis=whis,flierprops=flierprops)
if savedir!=None:
plt.savefig(opj(savedir,savename+'.png'),dpi=200)
else:
plt.show()
plt.close() |
#!/usr/bin/env python
import argparse
import shelve
import sys
import time
import fresh_tomatoes
from media import Movie
from omdb import get_movie_info
from youtube_trailer import get_trailer_url
# default movies to search for and add to the movie trailer page
default_titles = [
'inception', 'braveheart', 'jason bourne', 'iron man',
'g.i. jane', 'good will hunting', 'ray', 'furious 7',
'san andreas', 'get shorty', 'lost in translation',
'her', 'adaptation'
]
# parse the arguments
parser = argparse.ArgumentParser(description='Create Fresh Tomatoes Web Page',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--dbfile', default='movie_trailer_db',
help='The database for the movie trailers')
parser.add_argument('-f', '--file',
help='Input file to get movie titles from, with one title per line')
parser.add_argument('-l', '--list',
help='List of movies separated by separator')
parser.add_argument('-s', '--separator', default=',',
help='The separator used if a list of movies is provided')
parser.add_argument('-x', '--exclude', action='store_true',
help='Exclude the default titles. List or file must be specified.')
args = parser.parse_args()
# shortcut
err = sys.stderr.write
movie_titles = set()
if not args.exclude:
movie_titles.update(default_titles)
elif not args.file and not args.list:
err("Error: Must specify at least one of file or list\n")
parser.print_help()
# read titles from a file
if args.file:
for title in open(args.file).readlines():
title = title.strip().lower()
if title: movie_titles.add(title)
# process titles in list
if args.list:
titles = args.list.split(args.separator)
for title in titles:
title = title.strip().lower()
if title: movie_titles.add(title)
# open the database
db = shelve.open(args.dbfile, writeback=True)
# list of movies to include in trailer web page
movies = []
# process each movie title
for title in movie_titles:
print 'Searching for movie %s' % title
if db.has_key(title):
print 'Movie %s found in database via search title' % title
movies.append(db[title])
continue
response = get_movie_info(title)
time.sleep(1)
movie = Movie(response)
if not movie.found:
err('Error: Movie %s not found in OMDB\n' % title)
err('Error: Response: %s\n' % response)
elif not movie.url_poster:
err('Error: Movie %s does not contain a poster URL\n' % title)
err('Error: Response: %s\n' % response)
else:
print 'Found details for movie %s' % title
if db.has_key(movie.title):
print 'Movie %s found in database via OMDB title' % movie.title
movie = db[movie.title]
else:
movie.search_title = title
movie.url_trailer = get_trailer_url(movie.title)
db[movie.title] = movie
db[movie.search_title] = movie
movies.append(movie)
db.close()
if not movies: sys.exit(1)
print "%s movies added to Fresh Tomatoes Trailers Page" % len(movies)
fresh_tomatoes.open_movies_page(movies)
sys.exit(0)
|
from flask import Flask, render_template, request, redirect, url_for, flash, session
from database import Mysql
from datetime import timedelta
app = Flask(__name__)
app.secret_key = b"secret_key"
app.permanent_session_lifetime = timedelta(days=30)
@app.route("/login", methods=["POST", "GET"])
def login():
"""
Route of login and validations' users
:return: Page of login
"""
if request.method == "POST":
user = request.form.get("username")
password = request.form.get("password")
info_users = Mysql(email=user, user=user, password=password)
if info_users.conf_login():
# Use sessions for validate the access to home
if request.form.get('remember') == "remember":
session.permanent = True
else:
session.permanent = False
session["username"] = info_users.get_user_name()[0]
return redirect(url_for("home"))
else:
# Error of Login
flash("message_alert", "Seu usuário ou senha estão incorretos")
return redirect(url_for("login"))
else:
if "username" in session:
return redirect(url_for("home"))
return render_template("login.html")
@app.route("/home")
def home():
if "username" in session:
flash("message_home", session["username"])
return render_template("home.html")
return redirect(url_for("login.html"))
@app.route("/logout")
def logout():
session.pop("username", None)
return redirect(url_for("login"))
@app.route("/error")
def error():
"""
:return: Page error
"""
return redirect(url_for("login.html"))
@app.route("/register", methods=["POST", "GET"])
def register_user():
"""
:return:Page register user
"""
if request.method == "POST":
email = request.form.get("email")
user = request.form.get("username")
password = request.form.get("password")
info_users = Mysql(email=email, user=user, password=password)
if info_users.register() is True:
flash("message_alert", f"Usuário {user} cadastrado com sucesso")
return redirect(url_for("login"))
else:
flash("message_alert", "Erro ao cadastrar usuário")
return redirect(url_for("register_user"))
return render_template("register.html")
if __name__ == '__main__':
app.run(debug=True)
|
import scipy.io
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import os
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import GradientBoostingClassifier
import csv
from features import *
dataDirectory = "data/clips"
def setDataDirectory(dirName):
global dataDirectory
dataDirectory = dirName
return
def loadData(matlabFile,lat=0):
matlabDict = scipy.io.loadmat(matlabFile)
if matlabFile.count('_ictal_') > 0:
lat = matlabDict['latency'][0]
freq = len(matlabDict['data'][0])
data = pd.DataFrame({'time':np.arange(lat,1.0+lat,1.0/freq)})
for i in range(len(matlabDict['channels'][0][0])):
channelName = "chan%i"%i
data[channelName] = matlabDict['data'][i]
return data
def downSample(data,factor):
coarseData = data.groupby(lambda x: int(np.floor(x/factor))).mean()
return coarseData
def plotChannels(data,channels,plotOpts):
if len(channels) > len(plotOpts):
print 'ERROR: Must specify plot options for each channel'
return
for chan in range(len(channels)):
plt.plot(data['time'],data[channels[chan]],plotOpts[chan])
plt.xlabel('time (s)')
plt.ylabel('Electrode reading')
plt.legend(channels)
def loadClips(patient,clipType,clipNumbers,targetFrequency):
data = []
for clip in clipNumbers:
clipPath = "%s/%s/%s_%s_segment_%i.mat"%(dataDirectory,patient,patient,clipType,clip)
tmpData = loadData(clipPath,clip-1)
downFactor = float(len(tmpData['time'])) / targetFrequency
if downFactor > 1.0:
data.append(downSample(tmpData,downFactor))
else:
data.append(tmpData)
return pd.concat(data)
def convertToFeatureSeries(data,featureFunctions,isSeizure=False,latency=0,isTest=False,testFile=""):
#converts time series data into a set of features
#featureFunctions should be a list of the desired features, which must be defined in funcDict
#isSeizure and latency are used to add that information for training/validation
#when loading test samples, isTest should be set True and the file name specified so that this information is available when writing the submission file
global funcDict
data['time'] = data['time'] - latency
features = []
for func in featureFunctions:
features.append(funcDict[func](data))
data = pd.concat(features,axis=1)
if not isTest:
data['latency'] = latency
data['isSeizure'] = int(isSeizure)
data['isEarly'] = int(latency < 19 and isSeizure)
else:
data['testFile'] = testFile
return data
def loadTrainAndValidationSamples(dataSelector,featureFunctions,commonFrequency=-1):
#loads training samples and optionally splits off a chunk for validation
#dataSelector is a list of lists that have the form [patientName,fraction of seizure segments to use for validation,fraction of non-seizure segments for validation
#--for example [['Patient_2',0.5,0.5],['Dog_3',0.2,0.2]] would load the data for patient 2 and dog 3, putting half of the patient 2 data and 20% of the dog 3 data in the validation sample and the rest in the training sample
#featureFunctions specifies the list of features to use
#commonFrequency option is used to downsample the data to that frequency
entriesTrain = []
entriesValid = []
for patient in dataSelector:
files = os.listdir('%s/%s'%(dataDirectory,patient[0]))
ictal = []
interictal = []
for phil in files:
if phil.count('_inter') > 0:
interictal.append(phil)
elif phil.count('_ictal_') > 0:
ictal.append(phil)
for i in ictal:
tmpData = loadData("%s/%s/%s"%(dataDirectory,patient[0],i))
lat = tmpData['time'][0]
if commonFrequency > 0:
downFactor = float(len(tmpData['time'])) / commonFrequency
if downFactor > 1.0:
tmpData = downSample(tmpData,downFactor)
featureSet = convertToFeatureSeries(tmpData,featureFunctions,True,lat)
if np.random.random() > patient[1]:
entriesTrain.append(featureSet)
else:
entriesValid.append(featureSet)
for ii in interictal:
tmpData = loadData("%s/%s/%s"%(dataDirectory,patient[0],ii))
lat = tmpData['time'][0]
if commonFrequency > 0:
downFactor = float(len(tmpData['time'])) / commonFrequency
if downFactor > 1.0:
tmpData = downSample(tmpData,downFactor)
featureSet = convertToFeatureSeries(tmpData,featureFunctions,False,0)
if np.random.random() > patient[2]:
entriesTrain.append(featureSet)
else:
entriesValid.append(featureSet)
if len(entriesTrain) == 0:
print "ERROR: No entries in training sample"
return {'train':0,'validation':0}
trainSample = pd.concat(entriesTrain,ignore_index=True)
if len(entriesValid) == 0:
return {'train':trainSample,'validation':0}
validSample = pd.concat(entriesValid,ignore_index=True)
return {'train':trainSample,'validation':validSample}
def loadTestSample(featureFunctions,commonFrequency=-1):
#loads test data
#arguments same as corresponding arguments for loadTrainAndValidationSamples
patientList = ['Dog_1','Dog_2','Dog_3','Dog_4','Patient_1','Patient_2','Patient_3','Patient_4','Patient_5','Patient_6','Patient_7','Patient_8']
entries = []
for patient in patientList:
files = os.listdir('%s/%s'%(dataDirectory,patient))
for phil in files:
if phil.count('test') > 0:
tmpData = loadData("%s/%s/%s"%(dataDirectory,patient,phil))
if commonFrequency > 0:
downFactor = float(len(tmpData['time'])) / commonFrequency
if downFactor > 1.0:
tmpData = downSample(tmpData,downFactor)
featureSet = convertToFeatureSeries(tmpData,featureFunctions,isTest=True,testFile=phil)
entries.append(featureSet)
testSample = pd.concat(entries,ignore_index=True)
return testSample
def loadIndivTestSamples(dataSelector, featureFunctions,commonFrequency=-1):
#loads test data
#arguments same as corresponding arguments for loadTrainAndValidationSamples
#patientList = ['Dog_1','Dog_2','Dog_3','Dog_4','Patient_1','Patient_2','Patient_3','Patient_4','Patient_5','Patient_6','Patient_7','Patient_8']
entries = []
for patient in dataSelector:
files = os.listdir('%s/%s'%(dataDirectory,patient[0]))
for phil in files:
if phil.count('test') > 0:
tmpData = loadData("%s/%s/%s"%(dataDirectory,patient[0],phil))
if commonFrequency > 0:
downFactor = float(len(tmpData['time'])) / commonFrequency
if downFactor > 1.0:
tmpData = downSample(tmpData,downFactor)
featureSet = convertToFeatureSeries(tmpData,featureFunctions,isTest=True,testFile=phil)
entries.append(featureSet)
testSample = pd.concat(entries,ignore_index=True)
return testSample
def trainRandomForest(trainDF):
#trains a random forest on the training sample and returns the trained forest
trainArray = trainDF.values
forest = RandomForestClassifier(n_estimators=1000)
return forest.fit(trainArray[:,0:-3],trainArray[:,-2:])
def validateRandomForest(forest,validDF,latencyBinWidth=-1):
#prints efficiency and false positive metrics and plots efficiency vs. latency for a given forest using the validation sample
output = forest.predict(validDF.values[:,0:-3])
validDF['PiS'] = output[:,0].astype(int)
validDF['PiE'] = output[:,1].astype(int)
for key,group in validDF.groupby('isSeizure'):
if key:
print "Efficiency for seizure detection: ",group['PiS'].mean()
for k,g in group.groupby('isEarly'):
if k:
print "Efficiency for early seizure detection: ",g['PiE'].mean()
df = group.groupby('latency').mean()
if latencyBinWidth > 1.0:
df = downSample(df,latencyBinWidth)
plt.plot(np.array(df.index),df['PiS'],'b-')
plt.plot(np.array(df.index),df['PiE'],'r-')
plt.xlabel('latency')
plt.ylabel('efficiency')
plt.title('Detection efficiency vs. Latency')
plt.savefig('efficiencySeizure.png')
else:
print "False positive rate for seizure: ",group['PiS'].mean()
print "False positive rate for early seizure: ",group['PiE'].mean()
return validDF
def trainDoubleForest(trainDF):
#trains a random forest on the training sample and returns the trained forest
trainArray = trainDF.values
forestSeizure = ExtraTreesClassifier(n_estimators=1000, min_samples_split = 1)
forestEarly = ExtraTreesClassifier(n_estimators=1000, min_samples_split = 1)
return {'seizure':forestSeizure.fit(trainArray[:,0:-3],trainArray[:,-2]),'early':forestEarly.fit(trainArray[:,0:-3],trainArray[:,-1])}
def validateDoubleForest(forests,validDF,latencyBinWidth=-1):
#prints efficiency and false positive metrics and plots efficiency vs. latency for a given forest using the validation sample
seizure = forests['seizure'].predict(validDF.values[:,0:-3])
early = forests['early'].predict(validDF.values[:,0:-3])
validDF['PiS'] = seizure.astype(int)
validDF['PiE'] = early.astype(int)
for key,group in validDF.groupby('isSeizure'):
if key:
print "Efficiency for seizure detection: ",group['PiS'].mean()
for k,g in group.groupby('isEarly'):
if k:
print "Efficiency for early seizure detection: ",g['PiE'].mean()
df = group.groupby('latency').mean()
if latencyBinWidth > 1.0:
df = downSample(df,latencyBinWidth)
plt.plot(np.array(df.index),df['PiS'],'b-')
plt.plot(np.array(df.index),df['PiE'],'r-')
plt.xlabel('latency')
plt.ylabel('efficiency')
plt.title('Detection efficiency vs. Latency')
plt.savefig('efficiencySeizure.png')
else:
print "False positive rate for seizure: ",group['PiS'].mean()
print "False positive rate for early seizure: ",group['PiE'].mean()
return validDF
def testProbs(forestList,testDF):
#runs the forest on the test sample and returns output
output = []
for forest in forestList:
output.append(forest.predict_proba(testDF.values[:,0:-1])[:,1])
output = np.array(output).T
return output
def makeSubmit(output, testDF):
# writes submission file
outFile = open("submission.csv","wb")
csv_writer = csv.writer(outFile)
csv_writer.writerow(['clip','seizure','early'])
csv_writer.writerows(zip(testDF['testFile'].values,output[:,0].astype(float),output[:,1].astype(float)))
outFile.close()
return
def makeSubmission(forestList,testDF):
#runs the forest on the test sample and writes submission file
output = []
for forest in forestList:
output.append(forest.predict_proba(testDF.values[:,0:-1])[:,1])
output = np.array(output).T
outFile = open("submission.csv","wb")
csv_writer = csv.writer(outFile)
csv_writer.writerow(['clip','seizure','early'])
csv_writer.writerows(zip(testDF['testFile'].values,output[:,0].astype(float),output[:,1].astype(float)))
outFile.close()
return
def plotFeatures(feat1,feat2,data):
m1 = np.mean(data[feat1])
s1 = np.std(data[feat1])
m2 = np.mean(data[feat2])
s2 = np.std(data[feat2])
plt.subplot(1,2,1)
for key,group in data.groupby('isSeizure'):
if key:
plt.plot((group[feat1]-m1)/s1,(group[feat2]-m2)/s2,'ro')
else:
plt.plot((group[feat1]-m1)/s1,(group[feat2]-m2)/s2,'bo')
plt.xlabel(feat1)
plt.ylabel(feat2)
plt.legend(( 'seizure' , 'non-seizure' ))
plt.subplot(1,2,2)
for key,group in data.groupby('isEarly'):
if key:
plt.plot((group[feat1]-m1)/s1,(group[feat2]-m2)/s2,'ro')
else:
plt.plot((group[feat1]-m1)/s1,(group[feat2]-m2)/s2,'bo')
plt.xlabel(feat1)
plt.ylabel(feat2)
plt.legend(( 'early' , 'non-early' ))
plt.savefig('%s_v_%s.png'%(feat2,feat1))
|
@bot.command(pass_context=True, name="태그")
async def 태그(ctx, *names):
name = ''
i = 0
for word in names:
i += 1
if word == names[-1] and len(names) == i:
name += str(word)
else:
name += str(word) + ' '
if name == '':
await bot.send_message(ctx.message.channel, "태그 기능은 디피 태그 (제목)을 이용해 만드실 수 있으며, 디피 t(제목)을 이용해 보실 수 있습니다.")
else:
await bot.send_message(ctx.message.channel, "제목: %s\n내용을 입력해 주세요." % name)
body = await bot.wait_for_message(timeout=100.0, author=ctx.message.author)
if not body == '':
@bot.command(name="t" + name, pass_context=True)
async def tag(ctx):
await bot.send_message(ctx.message.channel, body.content)
print(name)
print(body.content)
await taginsert("tag", name, body.content)
await bot.send_message(ctx.message.channel, "등록되었습니다.")
else:
await bot.send_message(ctx.message.channel, "공백은 안돼요!")
@bot.event
async def taginit(name, message):
nameclone = copy.deepcopy(name)
messageclone = copy.deepcopy(message)
@bot.command(name="t" + nameclone, pass_context=True)
async def tag(ctx):
await bot.send_message(ctx.message.channel, messageclone)
print(nameclone)
print(messageclone)
@bot.command(pass_context=True)
async def 리브투어(ctx, *words):
plaintext = ''
i = 0
for word in words:
i += 1
if word == words[-1] and len(words) == i:
plaintext += str(word)
else:
plaintext += str(word) + ' '
answer = translator.sentence_split(plaintext)
await bot.send_message(ctx.message.channel, answer)
@bot.command(pass_context=True)
async def 이스텔어(ctx, *words):
plaintext = ''
i = 0
for word in words:
i += 1
if word == words[-1] and len(words) == i:
plaintext += str(word)
else:
plaintext += str(word) + ' '
answer = istelish.sentence_split(plaintext)
await bot.send_message(ctx.message.channel, answer)
@bot.command(pass_context=True)
async def 이스텔어입력(ctx, istelish, korean):
await bot.send_message(ctx.message.channel, "꼭 이스텔어-한국어의 순서대로 쓰세요!.")
await bot.send_message(ctx.message.channel, "이스텔어: {0}".format(istelish))
await bot.send_message(ctx.message.channel, "한국어: {0}".format(korean))
await istelishinsert("", istelish, korean)
await bot.send_message(ctx.message.channel, "입력되었습니다.")
@bot.command(pass_context=True)
async def 리브투어입력(ctx, leavetolanguage, korean):
translator.insert(korean, leavetolanguage)
await bot.send_message(ctx.message.channel, "입력되었습니다.")
@bot.command(pass_context=True)
async def 토론방로그(ctx):
if discord.utils.get(ctx.message.author.roles, name="호민관") is None:
await bot.send_message(ctx.message.channel, "권한이 없습니다. 호민관만 접근 가능합니다.")
else:
await bot.send_file(ctx.message.channel, "logadminchannel.txt")
def runinception(picture):
return simplepredict.findfaceanddetect(picture)
@bot.command(pass_context=True)
async def 그림추론(ctx):
await bot.send_message(ctx.message.channel, "사진을 올려주세요!\n기계학습 라이브러리 텐서플로우 & inception v3 모델 기반.\n현재 가능한 캐릭터: 뮤즈 9인, 츠시마 요시코, 하츠네 미쿠, 시부야 린, 와타나베 요우, 시마무라 우즈키, 타카미 치카, IWS-2000, 요네바야시 사이코.\n사진을 업로드하시면, 이용자는 업로드한 사진을 기계학습 목적을 위해서 제작자에게 제공하는 것에 동의하신걸로 간주됩니다.")
body = await bot.wait_for_message(author=ctx.message.author)
if body:
try:
url = body.attachments[0]['url']
async def get(url):
async with aiohttp.get(url) as r:
if r.status == 200:
return await r.read()
else:
return None
file = await get(url)
if file:
if ".jpg" or ".JPG" or ".jpeg" or ".JPEG" in url:
ext = "jpg"
elif ".png" or ".PNG" in url:
ext = "png"
else:
await bot.send_message(ctx.message.channel, "지원하지 않는 확장자입니다.")
with open("%s.%s" % (body.attachments[0]['id'], ext), 'wb') as picture:
picture.write(file)
prediction = runinception("%s.%s" % (body.attachments[0]['id'], ext))
if prediction is not None:
await bot.send_message(ctx.message.channel, "으음...\n제 생각에는 %s퍼센트로 %s일것 같네요!" % (prediction[2], prediction[1]))
else:
await bot.send_message(ctx.message.channel, "이런! 인식에 실패했어요!")
except:
await bot.send_message(ctx.message.channel, "사진이 없는듯 하네요?")
raise
async def Aloh(self, ctx):
with open("data/Alohbackup.txt", "r", encoding="utf-8") as a:
body = ''
while True:
sayd = a.readline()
if sayd == "0.5배라는 건가요" or sayd == "Speed 0.5는":
body += sayd + '\n'
else:
if sayd:
sayd = sayd.replace('\n', '')
checker = bool(re.match('[0-9]+[.]', sayd))
if checker is True:
if body == '':
body += sayd + '\n'
num = re.match(
'[0-9]+[.]', sayd).group().replace('.', '')
await alohinsert("alohsayd", int(num) - 1, "Alohsayd " + str(int(num) - 1), body)
body = sayd + '\n'
print(num)
print(sayd)
else:
body += sayd + '\n'
else:
body += '\n' + sayd + '\n'
await alohinsert("alohsayd", int(1172), "Alohsayd " + str(1172), body)
body = ''
print(num)
print(sayd)
break
|
from flask import Blueprint, request
from app.models import db, Photo
photo_routes = Blueprint('photos', __name__)
@photo_routes.route('/<int:id>', methods=["DELETE"])
def delete_photo(id):
photo = Photo.query.get(id)
db.session.delete(photo)
db.session.commit()
return {"message": "Delete Successful"} |
# -*- coding: UTF-8 -*-
from collections import defaultdict
from parsers.Parsers import StringParser, IntParser, DatetimeParser, LineParser
from utils import Exchange, Protocol
from parsers.Quotes import QuoteSnapshot
def test_head_rule_file():
rule_file = '../datas/head_rule.conf'
ex = Exchange.SH
protocol = Protocol.FILE
line_type = LineParser.HEAD
head_parser = LineParser(ex, protocol, rule_file, line_type)
assert head_parser.parse_rules[0]['Seq'] == '1'
assert head_parser.parse_rules[1]['Seq'] == '2'
assert head_parser.parse_rules[2]['Seq'] == '3'
assert head_parser.parse_rules[3]['Seq'] == '4'
assert head_parser.parse_rules[4]['Seq'] == '5'
assert head_parser.parse_rules[5]['Seq'] == '6'
assert head_parser.parse_rules[6]['Seq'] == '7'
assert head_parser.parse_rules[7]['Seq'] == '8'
assert head_parser.parse_rules[8]['Seq'] == '9'
d = defaultdict(dict)
d['Seq'] = '1'
d['Name'] = 'BeginString'
d['Desc'] = 'Start identify'
d['Rule_String'] = 'C6'
d['Value'] = 'HEADER'
assert head_parser.parse_rules[0]['Seq'] == d['Seq']
assert head_parser.parse_rules[0]['Name'] == d['Name']
assert head_parser.parse_rules[0]['Desc'] == d['Desc']
assert head_parser.parse_rules[0]['Rule_String'] == d['Rule_String']
assert head_parser.parse_rules[0]['Value'] == d['Value']
assert isinstance(head_parser.parse_rules[0]['Parser'], StringParser)
assert isinstance(head_parser.parse_rules[1]['Parser'], StringParser)
assert isinstance(head_parser.parse_rules[2]['Parser'], IntParser)
assert isinstance(head_parser.parse_rules[3]['Parser'], IntParser)
assert isinstance(head_parser.parse_rules[4]['Parser'], IntParser)
assert isinstance(head_parser.parse_rules[5]['Parser'], StringParser)
assert isinstance(head_parser.parse_rules[6]['Parser'], DatetimeParser)
assert isinstance(head_parser.parse_rules[7]['Parser'], IntParser)
assert isinstance(head_parser.parse_rules[8]['Parser'], StringParser)
assert head_parser.parse_rules[0]['Value'] == 'HEADER'
assert head_parser.parse_rules[1]['Value'] == 'MTP1.00'
assert head_parser.parse_rules[2]['Value'] is ''
assert head_parser.parse_rules[3]['Value'] is ''
assert head_parser.parse_rules[4]['Value'] is ''
assert head_parser.parse_rules[5]['Value'] is ''
assert head_parser.parse_rules[6]['Value'] is ''
assert head_parser.parse_rules[7]['Value'] == '0'
assert head_parser.parse_rules[8]['Value'] is ''
|
from socket import * # Contains everything you need to set up sockets
from Prime import is_prime
# Create server port address and socket
server_port = 400
server_socket = socket(AF_INET, SOCK_DGRAM) # Uses IPV4. SOCK_DGRAM indicates UDP
# Assign the port number server_port to the server's socket
server_socket.bind(('', server_port))
print "The server is ready to receive..."
# While loop to continuously to constantly be available for messages
while 1:
# Store message and client address
message, client_address = server_socket.recvfrom(2048)
# Check if message is prime
num_to_check = int(message) # Cast data to integer
if is_prime(num_to_check):
result = "Number is prime! :-)"
else:
result = "Number is NOT prime. :-("
# Send message back to client using client address
server_socket.sendto(result, client_address)
|
from itertools import count
from typing import Optional
import numpy as np
from matplotlib import patches
from ..lattice import Lattice
from .base import BaseElement
from .utils import straight_element
class Drift(BaseElement):
"""Drift element.
Args:
l: Drift length in meters.
name (optional): Element name.
Attributes:
l: Element length in meters.
length: Element length in meters.
m: Element phase space transfer matrix.
name: Element name.
"""
_instance_count = count(0)
def __init__(self, l: float, name: Optional[str] = None):
super().__init__("l", "name")
self.l = l
if name is None:
name = f"drift_{next(self._instance_count)}"
self.name = name
def _get_length(self) -> float:
return self.l
def _get_transfer_matrix(self) -> np.ndarray:
out = np.identity(5)
out[0, 1] = self.length
out[2, 3] = self.length
return out
def slice(self, n_drifts: int) -> Lattice:
"""Slice the element into a many smaller elements.
Args:
n_drifts: Number of :py:class:`Drift` elements.
Returns:
:py:class:`~accelerator.lattice.Lattice` of sliced :py:class:`Drift`
elements.
"""
out = [
Drift(self.length / n_drifts, name=f"{self.name}_slice_{i}")
for i in range(n_drifts)
]
return Lattice(out)
def _get_patch(self, s: float) -> patches.Patch:
return patches.Rectangle(
(s, -0.5), self.length, 1, facecolor="tab:gray", alpha=0.5, label="Drift"
)
@staticmethod
def _dxztheta_ds(theta: float, d_s: float) -> np.ndarray:
return straight_element(theta, d_s)
|
import comments
from django.shortcuts import redirect, render ,HttpResponse ,get_object_or_404
from comments.models import Page , UserComment
from django.contrib.auth.models import User
from django.shortcuts import redirect
from comments.templatetags import extras
from .serializers import commentSerializer
from django.http import HttpResponseRedirect, Http404, HttpResponse
from rest_framework.renderers import JSONRenderer
from django.contrib.auth.decorators import login_required
from django.contrib.contenttypes.models import ContentType
#for basic html
def home(request):
pages = Page.objects.all()
context = {'pages': pages}
return render(request,'index.html',context)
def getpage(request , slug):
page = Page.objects.filter(slug=slug).first()
comments = UserComment.objects.filter(page=page , parent=None)
replies= UserComment.objects.filter(page=page).exclude(parent=None)
replyDict={}
for reply in replies:
if reply.parent.id not in replyDict.keys():
replyDict[reply.parent.id]=[reply]
else:
replyDict[reply.parent.id].append(reply)
context = {'page': page ,'comments':comments ,'user': request.user ,'replyDict': replyDict}
return render(request,'page.html',context)
#funtion for posting new comments
def post_comment(request):
if request.method == "POST":
comment=request.POST.get("comment")
user = request.user
pageid =request.POST.get('pageid')
page = Page.objects.get(id=pageid)
parentid= request.POST.get('parentid')
if parentid=="":
comment=UserComment(comment= comment, user=user, page=page)
comment.save()
else:
parent= UserComment.objects.get(id=parentid)
comment=UserComment(comment= comment, user=user, page=page , parent=parent)
comment.save()
return redirect(f"/{page.slug}")
#funtion to get all comment list in a page
@login_required #API URL = '/comment/'
def comment_list(request):
try:
all_comments = UserComment.objects.all()
except:
raise Http404
serializer = commentSerializer(all_comments, many=True)
json_data = JSONRenderer().render(serializer.data)
return HttpResponse(json_data,content_type='application/json')
#funtion to delete a comment or thread of comments
def comment_delete(request, id):
try:
obj = UserComment.objects.get(id=id)
except:
raise Http404
if request.method == "POST":
parent_obj_url = obj.content_object.get_absolute_url()
obj.delete()
return HttpResponseRedirect(parent_obj_url)
context = {"object": obj }
return render(request, "confirm_delete.html", context)
|
import tensorflow as tf
import numpy as np
import os
from .settings import settings
#Architectural Notes:
#1. Generator: GRU Units
# - Many to Many
# - L1: Number units: 128, Activation = ELU
# - L2: Number units: 32, Activation = ELU
# - Dense: on each head and use tf.gather to get each output, and calculate avg loss in each step
# and minimize that
# - softmax activation, probablity sampling
# - Softmax Cross Entropy
#2. Discriminator: GRU Units
# - Many to One
# - L1: Number of Units: 32 Activation = ELU
# - Dense: 128: Activation: sigmoid
# - Dense: 128: Activation: sigmoid
# - Loss: Sigmoid Cross Entropy
class Generator:
def __init__(self,**kwargs):
'''
Params:
L1(int): Number of GRU Units in Layer 1
L2(int): Number of GRU UNits in Layer 2
noise: Tensor having Noise Component
Shape: [Batch_Size, Timesteps, Size]
keep: Keep Probability for Dropout Layers
disc: Discriminator Class Object
real: Tensor of Real Data
'''
self.l1 = kwargs.get("L1",128)
self.l2 = kwargs.get("L2",32)
self.z = kwargs["noise"]
self.kp = kwargs.get("keep",0.7)
self.disc = kwargs["disc"]
self.real = kwargs["real"]
self.batch_size = tf.shape(self.z)[0]
def dense(self,X,out):
with tf.name_scope("dense"):
w = tf.Variable(tf.random_normal([X.get_shape()[-1].value,out]))
b = tf.Variable(tf.random_normal([out]))
return tf.matmul(X,w)+b
def use_disc(self,input_):
disc_out = self.disc.set_input(input_).build(reuse = True) # include variable reuse in build method of disc
return disc_out
def optimize(self,learning_rate = 0.01):
return tf.train.AdagradOptimizer(learning_rate = learning_rate).minimize(self.loss,var_list = self.train_vars)
def build(self):
self.z = tf.transpose(self.z,[1,0,2])
with tf.variable_scope("GEN"):
with tf.name_scope("GRU"):
cells = [tf.nn.rnn_cell.GRUCell(self.l1),tf.nn.rnn_cell.GRUCell(self.l2)]
cells = list(map(lambda x:tf.nn.rnn_cell.DropoutWrapper(x,output_keep_prob = self.kp),cells))
rnn_cell = tf.nn.rnn_cell.MultiRNNCell(cells)
out,state = tf.nn.dynamic_rnn(rnn_cell,self.z,dtype = tf.float32,time_major = True)
out = tf.reshape(out,[-1,self.l2])
out = tf.nn.relu(self.dense(out,self.l2))#),[-1,self.z.get_shape()[1].value,self.l2])
self.z = tf.transpose(self.z,[1,0,2])
targetFeature = self.z.get_shape()[-1].value#self.z.get_shape()[1:].num_elements()
out = tf.reshape(self.dense(out,targetFeature),[-1,self.z.get_shape()[1].value,self.z.get_shape()[-1].value])
out = tf.nn.softmax(out,axis = 1,name = "output") # To Find Probability Distribution over each batch, which starts from axis=1
#out = tf.transpose(out,[1,0,2])
self.train_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,scope="GEN")
self.output = out
disc_out = self.use_disc(out)
self.adv_out = disc_out
self.loss = -tf.reduce_mean(disc_out)
return self
class Discriminator:
def __init__(self,**kwargs):
'''
Params:
L1(int): Number of GRU Units in Layer 1
L2(int): Number of GRU UNits in Layer 2
input: Tensor having inputs to Discriminator
Shape: [Batch_Size, Timesteps, Size]
keep: Keep Probability for Dropout Layers
'''
self.L1 = kwargs.get("L1",128)
self.L2 = kwargs.get("L2",32)
self.input = kwargs["input"]
self.kp = kwargs.get("keep",0.8)
self.batch_size = tf.shape(self.input)[0]
def set_input(self,tensor):
assert self.input.get_shape()[1:] == tensor.get_shape()[1:]
self.input_ = tensor
return self
def dense(self,X,out,name):
w = tf.get_variable("w_"+name,[X.get_shape()[-1].value,out])
b = tf.get_variable("b_"+name,[out])
return tf.matmul(X,w)+b
def optimize(self,lossTensor,learning_rate = 0.01):
return tf.train.AdagradOptimizer(learning_rate = learning_rate).minimize(lossTensor,var_list = self.train_vars)
def build(self,reuse = False):
if not reuse:
inp = self.input
else:
inp = self.input_
with tf.variable_scope("DISC",reuse = reuse):
with tf.name_scope("GRU"):
cells = [tf.nn.rnn_cell.GRUCell(self.L1,reuse = reuse),tf.nn.rnn_cell.GRUCell(self.L2,reuse = reuse)]
cells = list(map(lambda x:tf.nn.rnn_cell.DropoutWrapper(x,output_keep_prob = self.kp),cells))
rnn_cell = tf.nn.rnn_cell.MultiRNNCell(cells)
inp = tf.transpose(inp,[1,0,2])
out,state = tf.nn.static_rnn(rnn_cell,tf.unstack(inp),dtype = tf.float32)
out = out[-1]
out = tf.nn.relu(self.dense(out,128,"h1"))
out = tf.nn.sigmoid(self.dense(out,1,"h2"))
inp = tf.transpose(inp,[1,0,2])
if not reuse:
self.input = inp
self.train_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,scope = "DISC")
self.output = out
return self
return out
|
from django.shortcuts import render_to_response
from django.template import RequestContext
from wouso.core.game import get_games
from wouso.core.game.models import Game
from wouso.interface import logger
def games(request):
""" List of games """
wgs = []
for model in get_games():
wgs.append({'link': model._meta.app_label,
'name': model._meta.verbose_name}
)
return render_to_response('interface/games.html',
{'games': wgs},
context_instance=RequestContext(request))
|
from __future__ import unicode_literals
from frappe import _
import frappe
COMMENT_ADDED_FROM_TEMPLATE = '''<span class="text-muted">comment from %s</span>'''
def before_save(doc, method=None):
if doc.reference_doctype in ("Back Order", "Sales Invoice", "Delivery Note", "Packing Slip"):
doc_reference = frappe.get_doc(_(doc.reference_doctype), doc.reference_name)
if frappe.db.exists('Sales Order', doc_reference.get('sales_order')):
doc_sales_order = frappe.get_doc('Sales Order', doc_reference.get('sales_order'))
comment = doc.content + COMMENT_ADDED_FROM_TEMPLATE % doc.reference_name
doc_sales_order.add_comment(doc.communication_medium, text=comment, comment_by=doc.user)
frappe.clear_cache()
|
from subprocess import Popen
import os
import time
from marmot.representations.representation_generator import RepresentationGenerator
from marmot.experiment.import_utils import mk_tmp_dir
class POSRepresentationGenerator(RepresentationGenerator):
def _get_random_name(self, suffix=''):
return 'tmp_'+suffix+str(time.time())
def _get_pos_tagging(self, src, tagger, par_file, tmp_dir):
# tokenize and add the sentence end marker
# tokenization is done with nltk
tmp_tokenized_name = os.path.join(tmp_dir, self._get_random_name('tok'))
tmp_tok = open(tmp_tokenized_name, 'wr+')
for words in src:
tmp_tok.write('%s\nSentenceEndMarker\n' % '\n'.join([w.encode('utf-8') for w in words]))
tmp_tok.seek(0)
# pass to tree-tagger
tmp_tagged_name = os.path.join(tmp_dir, self._get_random_name('tag'))
tmp_tagged = open(tmp_tagged_name, 'wr+')
tagger_call = Popen([tagger, '-token', par_file], stdin=tmp_tok, stdout=tmp_tagged)
tagger_call.wait()
tmp_tagged.seek(0)
# remove sentence markers, restore sentence structure
output = []
cur_sentence = []
for line in tmp_tagged:
word_tag = line[:-1].decode('utf-8').strip().split('\t')
# each string has to be <word>\t<tag>
# TODO: if it's not of this format, it could be the end of sequence (empty string) or an error
if len(word_tag) != 2:
continue
if word_tag[0] == 'SentenceEndMarker':
output.append(cur_sentence)
cur_sentence = []
else:
cur_sentence.append(word_tag[1])
tmp_tok.close()
tmp_tagged.close()
# delete all temporary files
os.remove(tmp_tokenized_name)
os.remove(tmp_tagged_name)
return output
# <tagger> -- path to tree-tagger
# <parameters> -- parameters of tree-tagger
# <data_label> -- which data should be tagged ('source' or 'target')
def __init__(self, tagger, parameters, data_label, tmp_dir=None):
self.tmp_dir = mk_tmp_dir(tmp_dir)
self.tagger = tagger
self.parameters = parameters
self.data = data_label
def generate(self, data_obj):
data_obj[self.data+'_pos'] = self._get_pos_tagging(data_obj[self.data], self.tagger, self.parameters, self.tmp_dir)
return data_obj
|
import cherrypy
from wsmeext.tg1 import wsexpose, wsvalidate
import wsmeext.tg1
__all__ = ['adapt', 'wsexpose', 'wsvalidate']
def scan_api(root=None):
for baseurl, instance in cherrypy.tree.apps.items():
path = [token for token in baseurl.split('/') if token]
for i in wsmeext.tg1._scan_api(instance.root, path):
yield i
def adapt(wsroot):
wsroot._scan_api = scan_api
controller = wsmeext.tg1.Controller(wsroot)
return controller
|
# 21/11/02 = Tue
# 191. Number of 1 Bits [Easy]
# Write a function that takes an unsigned integer and returns the number of '1'
# bits it has (also known as the Hamming weight).
# Note:
# Note that in some languages, such as Java, there is no unsigned integer type.
# In this case, the input will be given as a signed integer type. It should not
# affect your implementation, as the integer's internal binary representation
# is the same, whether it is signed or unsigned. In Java, the compiler
# represents the signed integers using 2's complement notation. Therefore, in
# Example 3, the input represents the signed integer. -3.
# Example 1:
# Input: n = 00000000000000000000000000001011
# Output: 3
# Explanation: The input binary string 00000000000000000000000000001011 has a
# total of three '1' bits.
# Example 2:
# Input: n = 00000000000000000000000010000000
# Output: 1
# Explanation: The input binary string 00000000000000000000000010000000 has a
# total of one '1' bit.
# Example 3:
# Input: n = 11111111111111111111111111111101
# Output: 31
# Explanation: The input binary string 11111111111111111111111111111101 has a
# total of thirty one '1' bits.
# Constraints:
# The input must be a binary string of length 32.
# Follow up: If this function is called many times, how would you optimize it?
class Solution:
def hammingWeight(self, n: int) -> int:
ret = 0
for i in range(32):
ret += (n >> i) & 0x1
return ret
class Solution:
def hammingWeight(self, n: int) -> int:
return format(n, "b").count("1")
|
from .task import NotebookTask, record_outputs
|
from flask import Flask, request, jsonify, g
from datetime import datetime, timedelta, timezone
from collections import Counter
import os, sqlite3
app = Flask(__name__, static_url_path="", static_folder="static/dist")
# timeStart
# timeEnd
# returns => [{ x, y, magnitude }]
@app.get("/api/v1/locations")
def locations():
now = datetime.now(timezone.utc)
return locations_query(now - timedelta(minutes=30), now)
@app.get("/api/v1/locations/<int:start>/<int:end>")
def locations_interval(start, end):
return locations_query(datetime.fromtimestamp(start, timezone.utc), datetime.fromtimestamp(end, timezone.utc))
@app.get("/api/v1/simulated/locations/<int:start>/<int:end>")
def simulated_locations_interval(start, end):
return locations_query(datetime.fromtimestamp(start, timezone.utc), datetime.fromtimestamp(end, timezone.utc), simulated=True)
@app.get("/api/v1/simulated/locations")
def simulated_locations():
now = datetime.now(timezone.utc)
return locations_query(now - timedelta(minutes=30), now, simulated=True)
@app.get("/api/v1/data")
def data():
now = datetime.now(timezone.utc)
return locations_data(now - timedelta(minutes=30), now)
@app.get("/api/v1/data/<int:start>/<int:end>")
def data_interval(start, end):
return locations_data(datetime.fromtimestamp(start, timezone.utc), datetime.fromtimestamp(end, timezone.utc))
@app.get("/api/v1/simulated/data/<int:start>/<int:end>")
def simulated_data_interval(start, end):
return locations_data(datetime.fromtimestamp(start, timezone.utc), datetime.fromtimestamp(end, timezone.utc), simulated=True)
def normalize_point(lat, lon):
return round(lat, 6), round(lon, 6)
def locations_query(start, end, simulated=False):
cur = get_db().cursor()
table = "simulated" if simulated else "locations"
cur.execute(f"SELECT id, lat, lon, time FROM {table} WHERE ? <= time AND time <= ?", (start, end))
data = {}
for id, lat, lon, time in cur:
if id not in data or time > data[id][2]:
data[id] = lat, lon, time
return create_response(normalize_point(lat, lon) for lat, lon, time in data.values())
def locations_data(start, end, simulated=False):
cur = get_db().cursor()
table = "simulated" if simulated else "locations"
cur.execute(f"SELECT lat, lon FROM {table} WHERE ? <= time AND time <= ?", (start, end))
return create_response(normalize_point(lat, lon) for lat, lon in cur)
def create_response(data):
bins = Counter(data)
resp = jsonify([{"lat": lat, "lon": lon, "value": val} for (lat, lon), val in bins.items()])
resp.headers["Access-Control-Allow-Origin"] = "*"
return resp
# @app.get("/api/v1/debug")
# def debug():
# cur = get_db().cursor()
# cur.execute("SELECT * FROM locations")
# return jsonify(cur.fetchall())
#
# # POST
# # id {DEPRECATED}
# # location: {x, y, z}
# @app.post("/api/v1/upload")
# def upload():
# conn = get_db()
# # cur_time = datetime.datetime.now()
# conn.execute("INSERT INTO locations (id, lat, lon, alt) VALUES (:id, :lat, :lon, :alt)", request.json)
# conn.commit()
# return jsonify(request.json)
#
# # POST
# # id
# # location: {x, y, z}
# @app.post("/api/v1/simulated/upload")
# def simulated_upload():
# conn = get_db()
# # cur_time = datetime.datetime.now()
# conn.execute("INSERT INTO simulated (id, lat, lon, alt) VALUES (:id, :lat, :lon, :alt)", request.json)
# conn.commit()
# return jsonify(request.json)
#
# @app.get("/api/v1/simulated/debug")
# def simulated_debug():
# cur = get_db().cursor()
# cur.execute("SELECT * FROM simulated")
# return jsonify(cur.fetchall())
#
# @app.post("/api/v1/simulated/dropall")
# def simulated_dropall():
# conn = get_db()
# # cur_time = datetime.datetime.now()
# conn.execute("DELETE FROM simulated", request.json)
# conn.commit()
# return "dropped"
# DB CODE
DATABASE = os.environ.get("DATABASE_PATH", "./database.db")
def get_db():
db = getattr(g, "_database", None)
if db is None:
db = g._database = sqlite3.connect(DATABASE, detect_types=sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES)
return db
@app.teardown_appcontext
def close_connection(exception):
db = getattr(g, "_database", None)
if db is not None:
db.close() |
import os, sys, email, re, base64, traceback
from datetime import datetime
def safe_b64decode(str):
length = len(str) % 4
if length == 0:
return base64.b64decode(str)
else:
for i in range(4 - int(length)):
str = str + '='
return base64.b64decode(str)
class MailExtactor:
"""
"""
def __init__(self):
self.dest_dir = r'./'
self.processd_file_count = 0
self.saved_file_count = 0
self.failed_file_count = 0
self.file_count_in_current_mail = 0
def set_dest_dir(self, dest):
self.dest_dir = dest
def save_file(self, content, dest_file):
if os.path.exists(dest_file):
print "[WARN] find exist file, name = " + dest_file
root, ext = os.path.splitext(dest_file)
dest_file = root + '_' + datetime.utcnow().strftime('%Y%m%d%H%M%S%f')[:-3] + ext
with open(dest_file, 'wb') as output:
output.write(content)
print "[INFO] save file into: " + dest_file
self.file_count_in_current_mail += 1
def save_attachment(self, filename, mime_type, content, encoding):
if 'image' in mime_type:
return
dest_file = ''
if encoding == 'base64':
if 'word' in mime_type:
dest_dir = os.path.join(self.dest_dir, 'office')
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
dest_file = os.path.join(dest_dir, filename)
else:
dest_file = os.path.join(self.dest_dir, filename)
b64_list = content.split('\n')
if len(b64_list[-1]) != len(b64_list[0]) and len(b64_list[-2]) != len(b64_list[0]):
del b64_list[-1]
# self.save_file(base64.b64decode(''.join(b64_list)), dest_file) # raise TypeError(msg) TypeError: Incorrect padding
self.save_file(safe_b64decode(''.join(b64_list)),dest_file)
elif encoding == 'quoted-printable' and '.wsf' in filename:
dest_dir = os.path.join(self.dest_dir, 'wsf')
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
dest_file = os.path.join(dest_dir, filename)
self.save_file(content, dest_file)
else:
print "[ERROR] Not supported encoding"
def analyze_mail_message(self, msg):
if msg.is_multipart():
msg_list = msg.get_payload()
for msg in msg_list:
self.analyze_mail_message(msg)
else:
find_filename_in_content_type = False
find_attachment = False
find_filename_in_content_dispositioin = False
if msg.has_key("Content-Type"):
content_type = msg["Content-Type"].strip().lower()
# print content_type
mime_type = content_type.split(';')[0]
# print mime_type
if 'name' in content_type:
mo = re.search(r'name=[\"\']?(?P<filename>[^\"\';\r\n]*)[\"\']?', content_type)
if None == mo:
print "[REGEX ERROR] cannot find filename in content type"
else:
filename = mo.group("filename")
find_filename_in_content_type = True
if msg.has_key("Content-Transfer-Encoding"):
content_transfer_encoding = msg["Content-Transfer-Encoding"].strip().lower()
if msg.has_key("Content-Disposition"):
content_disposition = msg["Content-Disposition"].strip().lower()
# print content_disposition
if 'attachment' in content_disposition:
find_attachment = True
if not find_filename_in_content_type and 'filename' in content_disposition:
mo = re.search(r'filename=\s*[\"\']?(?P<filename>[^\"\';\r\n]*)[\"\']?', content_disposition)
if None == mo:
print "[REGEX ERROR] cannot find filename in content disposition"
else:
filename = mo.group("filename")
find_filename_in_content_dispositioin = True
if find_attachment or ((find_filename_in_content_type or find_filename_in_content_dispositioin) and ('base64' == content_transfer_encoding)):
filename = filename.replace('?', '_')
filename = filename.replace('/', '_')
filename = filename.replace('\\', '_')
self.save_attachment(filename, mime_type, msg.get_payload(), content_transfer_encoding)
return True
else:
return False
def analyze_mail_structure(self, mail_content):
try:
msg = email.message_from_string(mail_content)
self.analyze_mail_message(msg)
except Exception, e:
print "[ERROR] Cannot analyze email structure"
print e
print traceback.print_exc()
def extract_from_mail(self, mail_path):
if not os.path.exists(mail_path):
print "[ERROR] Cannot find mail path, " + mail_path
return
root, ext = os.path.splitext(mail_path)
if ext != '.eml':
print "[WARN] It's not .eml format, " + mail_path
return
print "Now process " + mail_path
self.processd_file_count += 1
self.file_count_in_current_mail = 0
with open(mail_path, 'r') as fh:
self.analyze_mail_structure(fh.read())
self.saved_file_count += self.file_count_in_current_mail
if self.file_count_in_current_mail == 0:
print 'ERROR!!!!!!!!!!!!!!!\n' * 10
self.failed_file_count += 1
def process_single_mail(self, file_path):
if not os.path.exists(self.dest_dir):
os.makedirs(self.dest_dir)
self.extract_from_mail(file_path)
def process_multiple_mail(self, folder_path):
if not os.path.exists(self.dest_dir):
os.makedirs(self.dest_dir)
for root, dirs, files in os.walk(folder_path):
for name in files:
self.extract_from_mail(os.path.join(root,name))
for dir_ in dirs:
self.process_multiple_mail(root+'\\'+dir_)
def print_usage():
print """
Usage:
python extract_from_mail.py --[file|dir] input_path output_path
"""
if __name__ == '__main__':
if len(sys.argv) != 4:
print_usage()
exit(-1)
mail_extractor = MailExtactor()
mail_extractor.set_dest_dir(sys.argv[3])
if 'file' in sys.argv[1]:
mail_extractor.process_single_mail(sys.argv[2])
elif 'dir' in sys.argv[1]:
mail_extractor.process_multiple_mail(sys.argv[2])
else:
print "Unsupported argument!"
print "\n\n*************************************"
print "Processd file count: " + str(mail_extractor.processd_file_count)
print "Saved file count: " + str(mail_extractor.saved_file_count)
print "Failed file count: " + str(mail_extractor.failed_file_count)
|
import cv2
from tensorflow.python.keras.models import load_model
from pre_process import scale_and_center
def display_img(img):
cv2.imshow('sudoku', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
def extract_number(img):
temp = [[0 for i in range(9)] for j in range(9)]
for i in range(9):
for j in range(9):
image = img[i][j]
image = cv2.resize(image, (28,28))
thresh = 128
gray = cv2.threshold(image, thresh, 255, cv2.THRESH_BINARY)[1]
conts = cv2.findContours(gray, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
conts = conts[0] if len(conts)==2 else conts[1]
for c in conts:
x, y, w, h = cv2.boundingRect(c)
if(x <3 or y < 3 or h < 3 or w < 3):
continue
ROI = gray[y:y + h, x:x + w]
ROI = scale_and_center(ROI, 120)
cv2.imwrite("CleanedCells/cell{}{}.jpg".format(i, j), ROI)
temp[i][j] = predict(ROI)
return temp
def predict(img):
image = img.copy()
image = cv2.resize(image, (28, 28))
image = image.astype('float32')
image = image.reshape(1, 28, 28, 1)
image /= 255
model = load_model('model.hdf5')
pred = model.predict_classes(image.reshape(1, 28, 28, 1), batch_size=1)
#print(pred[0])
return pred[0]
|
"""
Universidad del Valle de Guatemala
Redes
CAtedrático: Vinicio Paz
Estuardo Ureta - Oliver Mazariegos - Pablo Viana
-> Logica del juego Old Maid
"""
import itertools, random
class OldMaid(object):
def __init__(self,players,copy):
# Lista de jugadores. Cada jugador es un diccionario
self.players = players
# Turnos globales
self.turn = 1
# A que jugador le toca, solo puede ser 0, 1, 2
self.playerTurn = 0
# Aca estaran todas las parejas que bajen los jugadores
# las parejas las bajaremos en forma de tupla
self.board = []
if not copy:
print('Se llamo el shuffle')
self.shuffle()
# Funcion que genera un deck, lo revuelve y reparte cartas a cada jugador
def getPlayerTurn(self):
return self.playerTurn
def shuffle(self):
# crea deck
deck = list(itertools.product(range(1,5),['\u2660', '\u2661', '\u2662', '\u2663']))
# revuelve deck
random.shuffle(deck)
# quita las Qs: 12
"""
comentado pal testing
deck.remove((12,'Heart'))
deck.remove((12,'Club'))
deck.remove((12,'Diamond'))
"""
player = 0
# Repartición de cartas
for card in deck:
# Da carta a un jugador
self.players[player]['hand'].append(card)
if player == 2: player = 0
else: player += 1
def nextTurn(self):
print('cambio de turno')
if self.playerTurn == 2:
self.playerTurn = 0
else: self.playerTurn += 1
self.turn += 1
# Funcion que detecta quienes son los ganadores
def winners(self):
ganadores = []
for player in self.players:
if not player['hand']: ganadores.append(player['username'])
return ganadores
# Fucnion que checkea si ya termino el juego, si si devuelve los ganadores
def isOver(self):
if len(self.board) == 24: return self.winners()
else: False
# Función que chequea si jugador tiene parejas y la devuelve
def hasPair(self, playerIndex):
hand = self.players[playerIndex]['hand']
cont = 0
for i in range(len(hand)):
current = hand[i]
for j in range(i+1,len(hand)):
possible = hand[j]
# Chequea si hay pareja
if current[0] == possible[0]:
cont += 1
if cont > 0:
return True
else:
return False
# Función que chequea si jugador tiene parejas con una carta en específico y la quita de su mano si es pareja
def isPair(self, playerIndex, key):
hand = self.players[playerIndex]['hand']
cont = 0
for i in range(len(hand)):
current = hand[i][0]
if current == key:
cont +=1
if cont == 2:
break
cards = []
if cont == 2:
for i in range(len(hand)):
current = hand[i][0]
if current == key:
cards.append(i)
cont -=1
if cont == 0:
#st = self.getStatus()
#print(st)
#self.oponent[playerIndex]['hand'].pop(cards[1])
#self.oponent[playerIndex]['hand'].pop(cards[0])
self.board.append((self.players[playerIndex]['hand'].pop(cards[1]),self.players[playerIndex]['hand'].pop(cards[0])))
#self.oponent[playerIndex]['hand'].pop(cards[1])
#self.oponent[playerIndex]['hand'].pop(cards[0])
#self.players[playerIndex]['hand'].pop(cards[1])
#self.players[playerIndex]['hand'].pop(cards[0])
#stat = self.getStatus()
#print(stat)
break
#return self.players[playerIndex]['hand']
return True
else:
return False
# Función que lista los posibles movimientos
def listMoves(self):
if self.playerTurn == 0:
oponent = 2
else:
oponent = self.playerTurn - 1
return list(range(len(self.players[oponent]['hand'])))
# Función que hace un movimiento. Recibe el index de la carta que quiere robar el jugador
def move(self, cardPicked):
oponent = self.oponent()
print(f"hand before move {self.players[self.playerTurn]['hand']}")
# Se le quita carta al jugador a la derecha, y se le agrega a la mano del jugador actual
self.players[self.playerTurn]['hand'].append(self.players[oponent]['hand'].pop(cardPicked))
print(f"hand after move {self.players[self.playerTurn]['hand']}")
# Se actualiza el turno del jugador
# Se devuelve la pareja que se encontró
return True
#Esta funcion calcula quien es el oponente del jugar con el turno actual
def oponent(self):
if self.playerTurn == 0:
oponent = 2
else:
oponent = self.playerTurn - 1
return oponent
def getStatus(self):
return {
'turn': self.turn,
'board': self.board,
'players': self.players,
'player_in_turn':self.players[self.playerTurn],
'index_of_player_in_turn': self.playerTurn,
'oponent': self.players[self.oponent()]
}
# Función que chequea si jugador tiene parejas y la devuelve
def removePairs(self):
for index, player in enumerate(self.players):
hand = player['hand']
for i in range(len(hand)):
current = hand[i]
for j in range(i+1,len(hand)):
possible = hand[j]
# Chequea si hay pareja
if current[0] == possible[0]:
self.players[index]['hand'].pop(j)
self.players[index]['hand'].pop(i)
break
def getPlayers(self):
return self.players
def setHand(self,username,hand):
for index, pl in enumerate(self.players):
if self.players[index]['username'] == username:
print(f"HAND BEFORE SET: {self.players[index]['hand']}")
self.players[index]['hand'] = hand
print(f"HAND AFTER SET: {self.players[index]['hand']}") |
# -*- coding: utf-8 -*-
from liao_xue_feng._6_oop.human import Human
class Student(Human):
def __init__(self, name, age, job):
super().__init__(name, age)
self.__job = job
def speak(self):
super().speak()
print('My job is %s.' % self.__job)
|
import asyncio
import aiohttp
import aiosqlite
from async_timeout import timeout
import os
import sqlite3
import json
from datetime import datetime
from urllib.parse import urlencode
from .settings import MAX_CACHE_AGE, SQLITE_PATH
async def check_existing_query(db, url):
""" Checks local SQLite3 DB to see if requested URL is stored.
If a table isn't found, one is created.
If a query is found, checks max cache age in settings to see if it should be returned.
"""
try:
sql = f"SELECT * FROM query_cache WHERE query = '{url}'"
cursor = await db.execute(sql)
except Exception as e:
print(sql)
raise e
query = await cursor.fetchone()
if query:
query_date = datetime.strptime(query[1], "%Y-%m-%d %H:%M:%S.%f")
if ((datetime.now() - query_date).days < MAX_CACHE_AGE):
return query
else:
return None
else:
return None
async def store_response(db, url, response):
""" Store response in SQLite3 DB. """
payload = {
"query": url,
"date": datetime.now(),
"response": response.replace("'", "''")
}
sql = f"INSERT INTO query_cache(query, date, response) VALUES('{payload['query']}', '{payload['date']}', '{payload['response']}')"
await db.execute(sql)
await db.commit()
async def get_url(url, session, db, proxy=None, errors=[]):
""" Gets URL with a random proxy if one has been passed, else None is used.
TODO: Need to add rotating user agent probably.
"""
response = None
query = await check_existing_query(db, url)
headers = {
"host": "stats.nba.com",
"accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3",
"connection": "keep-alive",
"user-agent": "Mozilla/5.0 (X11; Fedora; Linux x86_64; rv:67.0) Gecko/20100101 Firefox/67.0",
}
if not query:
try:
async with timeout(5):
proxy_str = None
if proxy:
proxy_str = "http://" + proxy
response = await session.get(url, proxy=proxy_str, headers=headers)
status = response.status
print(status, url)
response = await response.json()
await store_response(db, url, json.dumps(response))
if status != 200 or response == []:
print("ERROR:", status, url)
errors.append((url, proxy))
elif status == 200 and response != []:
return response
except (asyncio.TimeoutError, TimeoutError):
pass
else:
return json.loads(query[-1])
def pop_urls(urls, n):
if n > len(urls):
n = len(urls)
return [urls.pop() for _ in range(0, n)]
async def proxy_check_gather(session, db, errors, urls, proxies=None):
if proxies:
response = await asyncio.gather(*[get_url(url=j, session=session, db=db, proxy=proxies[i], errors=errors) for i, j in enumerate(urls)])
else:
response = await asyncio.gather(*[get_url(url=i, session=session, db=db, proxy=None, errors=errors) for i in urls])
return response
async def fetch_urls(urls, proxies=None, len_arr=1, responses=[]):
""" Check if URL is cached first via check_existing_query(),
If none is found, fetch then store response.ipyt
Otherwise, return cached response.
"""
if type(urls) is not list:
urls = [urls]
if not os.path.isfile(SQLITE_PATH):
""" Check if SQLite3 database exists already.
If not, create one and create the relevant table.
"""
cur = sqlite3.connect(SQLITE_PATH).cursor()
cur.execute("CREATE TABLE query_cache(query VARCHAR, date DATETIME, response VARCHAR);")
async with aiosqlite.connect(SQLITE_PATH) as db:
async with aiohttp.ClientSession() as session:
assert type(urls) is list, "Input urls are not a list"
while len(urls) > 0:
errors = []
if not proxies:
# Default chnk size set if no proxies are passed through.
# TODO: Need to confirm optimal size here to avoid throttling.
chunk_size = 10
else:
chunk_size = len(proxies)
urls_chunk = pop_urls(urls, chunk_size)
response = await proxy_check_gather(session=session, db=db, errors=errors, urls=urls_chunk, proxies=proxies)
responses += response
if len(errors) > 0:
print(errors)
print(f"{len(errors)} occured, retrying those urls...")
print("Sleeping for 5 seconds before retrying.")
await asyncio.sleep(5)
error_urls = []
for url, proxy in errors:
if proxies:
proxies.remove(proxy)
error_urls.append(url)
responses += await fetch_urls(urls=error_urls, proxies=proxies)
responses = [i for i in responses if i is not None]
print(f"{len(response)} of {len(urls_chunk)} urls processed, sleeping for 5 seconds.")
await asyncio.sleep(5)
return responses
def construct_url(endpoint, params=None):
""" Construct URL based on endpoint name.
https://github.com/seemethere/nba_py/wiki/stats.nba.com-Endpoint-Documentation
Documentation states https://stat.nba.com/stats/<endpoint>/?<params>
"""
if params:
params = urlencode(params)
url = f"https://stats.nba.com/stats/{endpoint}?{params}"
return url
|
# The lists/dictionaries in this file let vendors build/link custom libraries
# paths are relative to platform/crosvm dir
DLLS = [
]
VS_PROJECTS_FROM_CMAKE = {
# Format of this dictionary is:
# "dll_path": { "src": "source_code_path", "cmake_flags": "flags", "cmake_flags_for_features": {"feature": "flags"}}
}
WINDOWS_BUILDABLE_DLLS = {
# Format of this dictionary is:
# dll_path: (proj_path/sln_path, build_flags)
}
BINARIES = [
# List of binaries to include.
]
|
import os
import sys
# sys.path.append('./')
from transformers import BertTokenizer
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder
import numpy as np
from utils.arguments_parse import args
import json
import torch
from torch import nn
from torch.utils.data import DataLoader, Dataset
import unicodedata, re
from data_preprocessing import tools
tokenizer=tools.get_tokenizer()
def load_data(file_path):
event_type_dict=tools.load_schema()
with open(file_path,'r',encoding='utf8') as f:
lines=f.readlines()
sentences=[]
for line in lines:
data=json.loads(line)
text=data['text']
title=data['title']
if 'event_list' in data.keys() and data['event_list'] !=[]:
for event in data['event_list']:
event_type = event['event_type']
if event_type !='无事件':
role_list = event_type_dict[event_type]
for role in role_list:
sent = event_type+'[unused1]'+role+'[SEP]'+text
sentences.append(sent)
return sentences
def encoder(sentences):
encode_sent_list=[]
token_type_ids_list=[]
attention_mask_list=[]
for sent in sentences:
encode_dict=tokenizer.encode_plus(sent,max_length=args.max_length,pad_to_max_length=True)
encode_sent_list.append(encode_dict['input_ids'])
token_type_ids_list.append(encode_dict['token_type_ids'])
attention_mask_list.append(encode_dict['attention_mask'])
return encode_sent_list,token_type_ids_list,attention_mask_list
|
# uncompyle6 version 3.7.4
# Python bytecode 3.7 (3394)
# Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)]
# Embedded file name: T:\InGame\Gameplay\Scripts\Server\objects\lighting\lighting_object_interactions.py
# Compiled at: 2020-04-17 02:47:04
# Size of source mod 2**32: 1327 bytes
from objects.lighting.lighting_interactions import SwitchLightImmediateInteraction
from objects.object_state_utils import ObjectStateHelper
import sims4
logger = sims4.log.Logger('LightingAndObjectState', default_owner='mkartika')
class SwitchLightAndStateImmediateInteraction(SwitchLightImmediateInteraction):
INSTANCE_TUNABLES = {'state_settings': ObjectStateHelper.TunableFactory(description='\n Find objects in the same room or lot based on the tags and \n set state to the desired state.\n ')}
def _run_interaction_gen(self, timeline):
yield from super()._run_interaction_gen(timeline)
self.state_settings.execute_helper(self.target)
if False:
yield None |
from kivy.uix.relativelayout import RelativeLayout
from kivy.properties import ListProperty, StringProperty
class AdventureMap(RelativeLayout):
player_icon = StringProperty()
player_pos = ListProperty()
def __init__(self, **kw):
super(AdventureMap, self).__init__(**kw)
self.map_size = kw['map_size'] if 'map_size' in kw else (3, 3)
self.room_size = (1./self.map_size[0], 1./self.map_size[1])
self.rooms = dict()
self.center_room = None # TODO set coords to offset drawing of rooms
def draw_rooms(self):
self.clear_widgets()
for k, v in self.rooms.iteritems():
x = k[0] * self.room_size[0]
y = k[1] * self.room_size[1]
v.pos_hint = {'x': x, 'y': y}
v.size_hint = self.room_size
self.add_widget(v)
v.draw()
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.contrib import rnn
class BidirectionalRNN:
def __init__(self, name, rnn_size, data_type=tf.float32):
self.data_type = data_type
self.name = name
with tf.variable_scope(self.name):
self.forward_rnn = rnn.MultiRNNCell([rnn.GRUCell(rnn_size[i]) for i in range(len(rnn_size))])
self.backward_rnn = rnn.MultiRNNCell([rnn.GRUCell(rnn_size[i]) for i in range(len(rnn_size))])
def run(self, data, reuse=False, time_major=False, pooling=False):
time_axis = 0 if time_major else 1
with tf.variable_scope(self.name):
with tf.variable_scope("ForwardRNN", reuse=reuse) as scope:
forward_output, state = tf.nn.dynamic_rnn(self.forward_rnn, data, dtype=self.data_type, time_major=time_major, scope=scope)
if pooling == 'mean':
forward_output = tf.reduce_mean(forward_output, time_axis)
else:
forward_output = forward_output[-1, :, :] if time_major else forward_output[:, -1, :]
with tf.variable_scope("BackwardRNN", reuse=reuse) as scope:
data = tf.reverse(data, axis=[time_axis])
backward_output, state = tf.nn.dynamic_rnn(self.backward_rnn, data, dtype=self.data_type, time_major=time_major, scope=scope)
if pooling == 'mean':
backward_output = tf.reduce_mean(backward_output, time_axis)
else:
backward_output = backward_output[-1, :, :] if time_major else backward_output[:, -1, :]
tf.summary.histogram('forward_rnn_output', forward_output)
tf.summary.histogram('backward_rnn_output', backward_output)
return (forward_output + backward_output) / 2
|
"""Module for testing Coding DNA Insertion Validator."""
import unittest
from variation.validators import CodingDNAInsertion
from variation.classifiers import CodingDNAInsertionClassifier
from .validator_base import ValidatorBase
from variation.tokenizers import GeneSymbol
from variation.data_sources import TranscriptMappings, SeqRepoAccess, \
MANETranscriptMappings, UTA
from variation.mane_transcript import MANETranscript
from ga4gh.vrs.dataproxy import SeqRepoDataProxy
from ga4gh.vrs.extras.translator import Translator
from gene.query import QueryHandler as GeneQueryHandler
class TestCodingDNAInsertionValidator(ValidatorBase, unittest.TestCase):
"""A class to test the CodingDNAInsertion Validator."""
def validator_instance(self):
"""Return coding dna insertion instance."""
seqrepo_access = SeqRepoAccess()
transcript_mappings = TranscriptMappings()
uta = UTA()
dp = SeqRepoDataProxy(seqrepo_access.seq_repo_client)
tlr = Translator(data_proxy=dp)
return CodingDNAInsertion(
seqrepo_access, transcript_mappings,
GeneSymbol(GeneQueryHandler()),
MANETranscript(seqrepo_access, transcript_mappings,
MANETranscriptMappings(), uta),
uta, dp, tlr
)
def classifier_instance(self):
"""Return the coding dna insertion classifier instance."""
return CodingDNAInsertionClassifier()
def fixture_name(self):
"""Return the fixture name for coding dna insertion."""
return 'coding_dna_insertion'
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Sep 15 12:57:44 2018
@author: wolfpack
"""
import pandas as pd
from scipy.cluster import hierarchy as hc
import matplotlib.pyplot as plt
mean_all = pd.read_csv('marks_mean.csv')
mean_all = mean_all.drop("Unnamed: 0",axis=1)
#correlation matrices
corr_mat_pearson = mean_all.corr(method='pearson')
corr_mat_spearman = mean_all.corr(method='spearman')
corr_mat_kendall = mean_all.corr(method='kendall')
#corr_condensed = hc.distance.squareform(corr_mat_pearson) # convert to condensed
z = hc.linkage(corr_mat_pearson, method='average',metric='cosine')
dendrogram = hc.dendrogram(z, labels=(corr_mat_pearson.columns), orientation='right')
plt.xlabel('cosine distance')
plt.ylabel('subjects')
plt.title('Hierachical Cluster Dendogram for pearson correaltion matix with cosine distance & average cluster method (PCA)')
plt.show()
z = hc.linkage(corr_mat_pearson, method='weighted',metric='cosine')
dendrogram = hc.dendrogram(z, labels=(corr_mat_pearson.columns), orientation='right')
plt.xlabel('cosine distance')
plt.ylabel('subjects')
plt.title('Hierachical Cluster Dendogram for pearson correaltion matix with cosine distance & weighted cluster method(PCA)')
plt.show()
z = hc.linkage(corr_mat_spearman, method='average',metric='cosine')
dendrogram = hc.dendrogram(z, labels=(corr_mat_spearman.columns), orientation='right')
plt.xlabel('cosine distance')
plt.ylabel('subjects')
plt.title('Hierachical Cluster Dendogram for spearman correaltion matix with cosine distance & average cluster method(PCA)')
plt.show()
z = hc.linkage(corr_mat_spearman, method='weighted',metric='cosine')
dendrogram = hc.dendrogram(z, labels=(corr_mat_spearman.columns), orientation='right')
plt.xlabel('cosine distance')
plt.ylabel('subjects')
plt.title('Hierachical Cluster Dendogram for spearman correaltion matix with cosine distance & weighted cluster method(PCA)')
plt.show()
z = hc.linkage(corr_mat_kendall, method='average',metric='cosine')
dendrogram = hc.dendrogram(z, labels=(corr_mat_kendall.columns), orientation='right')
plt.xlabel('cosine distance')
plt.ylabel('subjects')
plt.title('Hierachical Cluster Dendogram for kendall correaltion matix with cosine distance & average cluster method(PCA)')
plt.show()
z = hc.linkage(corr_mat_kendall, method='weighted',metric='cosine')
dendrogram = hc.dendrogram(z, labels=(corr_mat_kendall.columns), orientation='right')
plt.xlabel('cosine distance')
plt.ylabel('subjects')
plt.title('Hierachical Cluster Dendogram for kendall correaltion matix with cosine distance & weighted cluster method(PCA)')
plt.show()
|
import whois
from random import choice
from requests import get, exceptions
def components():
pre = [
'Hi', 'Oxyge', 'Tita', 'Nitro', 'Xeno', 'Ura', 'Vanda', 'Hype',
'Nexu', 'Alpha', 'Ze', 'Zen', 'Force', 'Neo', 'Ne', 'Xe',
'Veno', 'Ze', 'Argo', 'Xe', 'Auro', 'Nebula', 'Cryp', 'Lumi', 'Ve', 'Turbo',
'Zenu', 'Fire', 'Phoe', 'Fo', 'Ve', 'Za', 'Mysti', 'Evo', 'Mythi', 'Micro'
]
mid = [
''
]
end = [
'nix', 'y', 'xy', 'nus', 'vy', 'vex', 'Z', 'X',
'vus', 'nit', 'nox', 'xie', 'xus', 'vos', 'vas', 'tic', 'neo',
'nity', 'nium', 'phix', 'nia', 'vis', 'tix',
'Side', 'Planet', 'World', 'Live', 'One', 'Net', 'lix', 'las', 'mi', 'Q', 'V', 'nion',
'rious', 'ntic', 'ntix', 'night', 'rion', 'tical', 'no', 'tics'
]
return (pre, mid, end)
def build():
(pre, mid, end) = components()
return ''.join([choice(pre), choice(mid), choice(end)])
def check_website(url: str) -> bool:
try:
if whois.whois(url):
return True
return False
except Exception as e:
return False
# # thanks https://stackoverflow.com/a/55799417/14345173
# try:
# response = get(url, timeout=0.75, allow_redirects=False)
# try:
# if response.status_code < 400:
# return True
# except exceptions.HTTPError:
# return False
# except (exceptions.ConnectionError, exceptions.ReadTimeout):
# return False
def generate(check_tld='com'):
generated = build()
if check_tld:
url = f'http://{generated.lower()}.{check_tld}'
avaiable = not check_website(url)
if avaiable:
generated += ' ✔'
return generated
def generate_list(length=5, check_tld=False):
generated = []
for _ in range(length):
generated.append(generate(check_tld=check_tld))
return generated
if __name__ == '__main__':
print(generate_list()) |
import labrad
import numpy as np
import time
import datetime as datetime
from EGGS_labrad.servers.script_scanner.experiment import experiment
class vibration_measurement_ss(experiment):
'''
Records scope trace and its FFT, then converts them to csv
'''
name = 'Vibration Measurement SS'
exp_parameters = []
@classmethod
def all_required_parameters(cls):
return cls.exp_parameters
def initialize(self, cxn, context, ident):
try:
#properties
self.ident = ident
self.cxn = labrad.connect(name = self.name)
#servers
self.dv = self.cxn.data_vault
self.grapher = self.cxn.grapher
self.oscope = self.cxn.oscilloscope_server
self.oscope.select_device()
#dataset context
self.c_oscope = self.cxn.context()
#set up data vault
self.set_up_datavault()
#data: tmp
self.dataDJ = None
except Exception as e:
print(e)
def run(self, cxn, context, replacement_parameters={}):
try:
trace = self.oscope.get_trace(1)
trace = np.array([trace[0], trace[1]]).transpose()
self.dv.add_ex(trace, context = self.c_oscope)
self.dataDJ = trace
except Exception as e:
print(e)
raise
def finalize(self, cxn, context):
np.savetxt('data.csv', self.dataDJ, delimiter = ',')
#todo: fft
self.cxn.disconnect()
def set_up_datavault(self):
#set up folder
date = datetime.datetime.now()
year = str(date.year)
month = '%02d' % date.month # Padded with a zero if one digit
day = '%02d' % date.day # Padded with a zero if one digit
hour = '%02d' % date.hour # Padded with a zero if one digit
minute = '%02d' % date.minute # Padded with a zero if one digit
trunk1 = year + '_' + month + '_' + day
trunk2 = self.name + '_' + hour + ':' + minute
self.filename = trunk2
#create datasets
#oscope
self.dv.cd(['','Experiments', year, month, trunk1, trunk2], True, context=self.c_oscope)
dataset_oscope = self.dv.new('Oscilloscope Trace',[('Time', 's')], [('Scope Trace', 'Scope Trace', '1')], context = self.c_oscope)
#set live plotting
#self.grapher.plot(dataset_oscope, 'bright/dark', False)
if __name__ == '__main__':
cxn = labrad.connect()
scanner = cxn.script_scanner
exprt = vibration_measurement_ss(cxn = cxn)
ident = scanner.register_external_launch(exprt.name)
exprt.execute(ident) |
# Copyright (c) 2019 Graphcore Ltd. All rights reserved.
import numpy as np
import numpy.random as npr
import popart
import onnx
import os
import pytest
import test_util as tu
batch_size = 12
# the number of classes, and the number of features at every depty of the net
hidden_size = 16
num_ipus = 2
# all input and weight values are initialized to 1 - U where U ~ Uniform[0,xi].
xi = 0.1
# learning rate
lr = 0.12
#weight decay
wd = 0.08
# beta 1 (Adam)
b1 = 0.9
# beta 2 (Adam)
b2 = 0.999
# setting this to, say, 100, and the test fails, see T24563
testTilesPerIPU = 1216
sgd_optimizer = popart.SGD({
"defaultLearningRate": (lr, False),
"defaultWeightDecay": (wd, False)
})
adam_optimizer = popart.Adam({
"defaultLearningRate": (lr, False),
"defaultBeta1": (b1, False),
"defaultBeta2": (b2, False),
"defaultWeightDecay": (wd, False),
"defaultEps": (1e-6, True),
})
grad_accl_prefix = popart.reservedAcclPrefix()
def get_micro_batch_size(accum_factor):
"""
no data replication, so micro batch size = batch size / accumlation factor
"""
if (batch_size % accum_factor is not 0):
raise RuntimeError("accum_factor is not a factor of batch_size")
micro_batch_size = batch_size // accum_factor
return micro_batch_size
def get_mm_model(accum_factor, enable_multi_ipu):
"""
out = mm(mm(mm(mm(mm(mm(in)))))) all mm are hidden_size x hidden_size
"""
np.random.seed(1234)
micro_batch_size = get_micro_batch_size(accum_factor)
builder = popart.Builder()
input_shape = [micro_batch_size, hidden_size]
input_tensor_name = builder.addInputTensor(
popart.TensorInfo("FLOAT", input_shape))
x = input_tensor_name
# Put 50% on each IPU if we are using multi IPU
for i in range(6):
w = builder.addInitializedInputTensor(
(1.0 - xi * npr.rand(hidden_size, hidden_size)).astype(np.float32),
"weight")
x = builder.aiOnnx.matmul([x, w])
if enable_multi_ipu:
if i > 3:
builder.virtualGraph(x, 1)
else:
builder.virtualGraph(x, 0)
label_tensor_name = builder.addInputTensor("INT32", [micro_batch_size])
x = builder.aiGraphcore.nllloss([x, label_tensor_name],
reduction=popart.ReductionType.Sum)
if enable_multi_ipu:
builder.virtualGraph(x, 1)
initial_onnx_model = builder.getModelProto()
return initial_onnx_model, input_tensor_name, x, label_tensor_name
def get_complex_model(accum_factor):
"""
out = softmax(reshape(conv(exp(sin(x)), weights))
"""
np.random.seed(1234)
micro_batch_size = get_micro_batch_size(accum_factor)
builder = popart.Builder()
input_shape = [micro_batch_size, 2, 4, 4]
input_tensor_name = builder.addInputTensor(
popart.TensorInfo("FLOAT", input_shape))
w0 = builder.addInitializedInputTensor(
np.ones([2, 2, 3, 3]).astype(np.float32))
x0 = input_tensor_name
s0 = builder.aiOnnx.sin([x0], "s0")
e0 = builder.aiOnnx.exp([s0], "e0")
c0 = builder.aiOnnx.conv([e0, w0],
dilations=[1, 1],
pads=[1, 1, 1, 1],
strides=[1, 1],
debugContext="c0")
r0 = builder.reshape_const(builder.aiOnnx, [c0], [micro_batch_size, 32])
sm = builder.aiOnnx.softmax([r0], axis=1, debugContext="sfm")
output_tensor_name = builder.aiGraphcore.identityloss(
[sm], reduction=popart.ReductionType.Sum)
builder.addOutputTensor(output_tensor_name)
label_shape = [micro_batch_size]
label_tensor_name = builder.addInputTensor(
popart.TensorInfo("INT32", label_shape))
initial_onnx_model = builder.getModelProto()
return initial_onnx_model, input_tensor_name, output_tensor_name, label_tensor_name
def run_graph(optimizer, input_shape, initial_onnx_model, input_tensor_name,
output_tensor_name, label_tensor_name, label_array, accum_factor,
enable_accum, batches_per_step, number_of_steps,
final_proto_filename, enable_multi_ipu, full_anchorage,
inference_mode, explicit_loops):
art = popart.AnchorReturnType("All")
anchorNames = {output_tensor_name: art}
if full_anchorage:
w0 = onnx.load_from_string(
initial_onnx_model).graph.initializer[0].name
anchorNames[popart.reservedGradientPrefix() + w0] = art
if enable_accum:
anchorNames[popart.reservedAccumPrefix() + w0] = art
opts = popart.SessionOptions()
opts.enableGradientAccumulation = enable_accum
opts.accumulationFactor = accum_factor
opts.enableOutlining = False
opts.virtualGraphMode = popart.VirtualGraphMode.Manual if enable_multi_ipu else popart.VirtualGraphMode.Off
if explicit_loops:
opts.enableExplicitMainLoops = True
opts.aliasZeroCopy = True
opts.explicitRecomputation = True
opts.useHostCopyOps = True
if enable_multi_ipu:
device = tu.create_test_device(numIpus=num_ipus,
tilesPerIPU=testTilesPerIPU,
opts={"compileIPUCode": False})
opts.virtualGraphMode = popart.VirtualGraphMode.Manual
else:
device = tu.create_test_device(tilesPerIPU=testTilesPerIPU,
opts={"compileIPUCode": False})
opts.virtualGraphMode = popart.VirtualGraphMode.Off
# only for test purposes, inference with gradient_accumulation should never work
if inference_mode:
popart.InferenceSession(fnModel=initial_onnx_model,
dataFlow=popart.DataFlow(
batches_per_step, anchorNames),
userOptions=opts,
deviceInfo=device)
session = popart.TrainingSession(fnModel=initial_onnx_model,
dataFlow=popart.DataFlow(
batches_per_step, anchorNames),
deviceInfo=device,
loss=output_tensor_name,
optimizer=optimizer,
userOptions=opts)
session.prepareDevice()
session.weightsFromHost()
anchor_arrays = session.initAnchorArrays()
outer_dim = 1
if batches_per_step > 1:
outer_dim *= batches_per_step
label_array = np.repeat(label_array[np.newaxis], batches_per_step, 0)
if accum_factor > 1:
outer_dim *= accum_factor
label_array = label_array.reshape(
[accum_factor * batches_per_step, -1])
if outer_dim > 1:
input_shape = [outer_dim] + input_shape
stepio = popart.PyStepIO(
{
input_tensor_name:
(1.0 - xi * npr.rand(*input_shape)).astype(np.float32),
label_tensor_name:
label_array.astype(np.int32)
}, anchor_arrays)
for i in range(number_of_steps):
session.run(stepio)
final_proto_file = "{}.onnx".format(final_proto_filename)
session.modelToHost(final_proto_filename)
return final_proto_filename, anchor_arrays
def run_complex_graph(optimizer, label_array, accum_factor, enable_accum,
batches_per_step, number_of_steps, final_proto_filename,
enable_multi_ipu, full_anchorage, explicit_loops):
if (enable_multi_ipu):
raise RuntimeError("Cannot enable multi ipu in complex graph")
initial_onnx_model, input_tensor_name, output_tensor_name, label_tensor_name = get_complex_model(
accum_factor)
final_proto_filename, anchor_arrays = run_graph(
optimizer,
input_shape=[get_micro_batch_size(accum_factor), 2, 4, 4],
initial_onnx_model=initial_onnx_model,
input_tensor_name=input_tensor_name,
output_tensor_name=output_tensor_name,
label_tensor_name=label_tensor_name,
label_array=label_array,
accum_factor=accum_factor,
enable_accum=enable_accum,
batches_per_step=batches_per_step,
number_of_steps=number_of_steps,
final_proto_filename=final_proto_filename,
enable_multi_ipu=enable_multi_ipu,
full_anchorage=full_anchorage,
inference_mode=False,
explicit_loops=explicit_loops)
return initial_onnx_model, final_proto_filename, anchor_arrays
def run_mm_graph(optimizer,
label_array,
accum_factor,
enable_accum,
batches_per_step,
number_of_steps,
final_proto_filename,
enable_multi_ipu,
full_anchorage,
inference_mode=False,
explicit_loops=False):
initial_onnx_model, input_tensor_name, output_tensor_name, label_tensor_name = get_mm_model(
accum_factor, enable_multi_ipu)
final_proto_filename, anchor_arrays = run_graph(
optimizer,
input_shape=[get_micro_batch_size(accum_factor), hidden_size],
initial_onnx_model=initial_onnx_model,
input_tensor_name=input_tensor_name,
output_tensor_name=output_tensor_name,
label_tensor_name=label_tensor_name,
label_array=label_array,
accum_factor=accum_factor,
enable_accum=enable_accum,
batches_per_step=batches_per_step,
number_of_steps=number_of_steps,
final_proto_filename=final_proto_filename,
enable_multi_ipu=enable_multi_ipu,
full_anchorage=full_anchorage,
inference_mode=inference_mode,
explicit_loops=explicit_loops)
return initial_onnx_model, final_proto_filename, anchor_arrays
def check_models(model_init, modelA_fn, modelB_fn):
"""
for each weight tensor, check the relative error. That is,
| model_accl - model_no_accl |_1 / | model_accl - model_initial|_1
"""
modelA = onnx.load(modelA_fn)
modelB = onnx.load(modelB_fn)
#the initial model
modelC = onnx.load_from_string(model_init)
for w_i, weightA in enumerate(modelA.graph.initializer):
# We need to avoid the gradient accl initializers as these won't be present
# in the non grad accl models.
if (popart.reservedAcclPrefix() not in weightA.name
and popart.reservedAccl1Prefix() not in weightA.name
and popart.reservedAccl2Prefix() not in weightA.name
and popart.reservedStepPrefix() not in weightA.name
and popart.reservedAccumPrefix() not in weightA.name):
# where A, B, C are weight tensors,
# |A - B|_1
l1AB = 0
# |B - C|_1
l1BC = 0
# |A - C|_1
l1AC = 0
for d_i, dataA in enumerate(weightA.float_data):
dataB = modelB.graph.initializer[w_i].float_data[d_i]
dataC = modelC.graph.initializer[w_i].float_data[d_i]
# abs diff of 2 floats
l1AB += np.abs(dataA - dataB)
l1BC += np.abs(dataB - dataC)
l1AC += np.abs(dataA - dataC)
relative_error = l1AB / (l1AC)
print(
f"{weightA.name}: l1AB = %.2e, l1AC = %.2e, l1BC = %.2e, relative error = %.2e"
% (l1AB, l1AC, l1BC, relative_error))
# check that the weights have moved enough for this to be a valid
assert l1AC > 1e-3, "change since start of A = %.5f" % (l1AC, )
assert l1BC > 1e-3, "change since start of B = %.5f" % (l1BC, )
#relative error assertion
assert 1e-5 > relative_error, "Relative error {}".format(
relative_error)
@tu.requires_ipu_model
@pytest.mark.parametrize("explicit_loops", [True, False])
def test_gradient_accumulation_base(tmpdir, explicit_loops):
"""
base test (as simple as possible)
"""
for graph_runner in [run_complex_graph, run_mm_graph]:
np.random.seed(1234)
label_array = np.random.randint(0, hidden_size, batch_size)
accl_initial_proto, accl_proto_filename, accl_anchor_arrays = graph_runner(
sgd_optimizer,
label_array=label_array,
accum_factor=4,
enable_accum=True,
batches_per_step=1,
number_of_steps=1,
final_proto_filename=os.path.join(tmpdir, "accl"),
enable_multi_ipu=False,
full_anchorage=False,
explicit_loops=explicit_loops)
no_accl_initial_proto, no_accl_proto_filename, no_accl_anchor_arrays = graph_runner(
sgd_optimizer,
label_array=label_array,
accum_factor=1,
enable_accum=False,
batches_per_step=1,
number_of_steps=1,
final_proto_filename=os.path.join(tmpdir, "noAcc"),
enable_multi_ipu=False,
full_anchorage=False,
explicit_loops=explicit_loops)
check_models(accl_initial_proto, accl_proto_filename,
no_accl_proto_filename)
@tu.requires_ipu_model
@pytest.mark.parametrize("explicit_loops", [True, False])
def test_gradient_accumulation_multi_batch(tmpdir, explicit_loops):
"""
from _base: increase batches per step and number of steps
"""
for graph_runner in [run_mm_graph, run_complex_graph]:
np.random.seed(1234)
label_array = np.random.randint(0, hidden_size, batch_size)
accl_initial_proto, accl_proto_filename, accl_anchor_arrays = run_mm_graph(
sgd_optimizer,
label_array=label_array,
accum_factor=4,
enable_accum=True,
batches_per_step=5,
number_of_steps=3,
final_proto_filename=os.path.join(tmpdir, "accl5batches3steps"),
enable_multi_ipu=False,
full_anchorage=False,
explicit_loops=explicit_loops)
no_accl_initial_proto, no_accl_proto_filename, no_accl_anchor_arrays = run_mm_graph(
sgd_optimizer,
label_array=label_array,
accum_factor=1,
enable_accum=False,
batches_per_step=5,
number_of_steps=3,
final_proto_filename=os.path.join(tmpdir, "noAccl5batches3steps"),
enable_multi_ipu=False,
full_anchorage=False,
explicit_loops=explicit_loops)
check_models(accl_initial_proto, accl_proto_filename,
no_accl_proto_filename)
@tu.requires_ipu_model
@pytest.mark.parametrize("explicit_loops", [True, False])
def test_gradient_accumulation_multi_ipu(tmpdir, explicit_loops):
"""
from _multi_batch: enable multi ipus
"""
np.random.seed(1234)
label_array = np.random.randint(0, hidden_size, batch_size)
accl_initial_proto, accl_proto_filename, accl_anchor_arrays = run_mm_graph(
sgd_optimizer,
label_array=label_array,
accum_factor=4,
enable_accum=True,
batches_per_step=5,
number_of_steps=3,
final_proto_filename=os.path.join(tmpdir, "accl5batches3steps"),
enable_multi_ipu=True,
full_anchorage=False,
explicit_loops=explicit_loops)
no_accl_initial_proto, no_accl_proto_filename, no_accl_anchor_arrays = run_mm_graph(
sgd_optimizer,
label_array=label_array,
accum_factor=1,
enable_accum=False,
batches_per_step=5,
number_of_steps=3,
final_proto_filename=os.path.join(tmpdir, "noAccl5batches3steps"),
# we do not enable multiple IPUs in the baseline
enable_multi_ipu=False,
full_anchorage=False,
explicit_loops=explicit_loops)
check_models(accl_initial_proto, accl_proto_filename,
no_accl_proto_filename)
@tu.requires_ipu_model
@pytest.mark.parametrize("explicit_loops", [True, False])
def test_gradient_accumulation_error_inference(tmpdir, explicit_loops):
"""
confirm that there is an error if in inference mode
"""
label_array = np.random.randint(0, hidden_size, batch_size)
with pytest.raises(popart.popart_exception) as e_info:
a, b, c = run_mm_graph(sgd_optimizer,
label_array=label_array,
accum_factor=4,
enable_accum=True,
batches_per_step=5,
number_of_steps=3,
final_proto_filename=os.path.join(
tmpdir, "accl5batches3steps"),
enable_multi_ipu=True,
full_anchorage=False,
inference_mode=True,
explicit_loops=explicit_loops)
assert e_info.value.args[0].startswith(
"Gradient Accumulation only available when training")
@tu.requires_ipu_model
@pytest.mark.parametrize("explicit_loops", [True, False])
def test_gradient_accumulation_error_accum_factor_invalid(
tmpdir, explicit_loops):
"""
confirm that enable_accum = False => accum_factor = 1
"""
label_array = np.random.randint(0, hidden_size, batch_size)
with pytest.raises(popart.popart_exception) as e_info:
a, b, c = run_mm_graph(sgd_optimizer,
label_array=label_array,
accum_factor=4,
enable_accum=False,
batches_per_step=5,
number_of_steps=3,
final_proto_filename=os.path.join(
tmpdir, "accl5batches3steps"),
enable_multi_ipu=True,
full_anchorage=False,
inference_mode=False,
explicit_loops=explicit_loops)
assert e_info.value.args[0].startswith(
"enableGradientAccumulation is false, but accumulationFactor > 1.")
@tu.requires_ipu_model
@pytest.mark.parametrize("explicit_loops", [True, False])
def test_gradient_accumulation_model_proto(tmpdir, explicit_loops):
np.random.seed(1234)
label_array = np.random.randint(0, hidden_size, batch_size)
accl_initial_proto, accl_proto_filename, accl_anchor_arrays = run_mm_graph(
# Using Momentum to create accl tensors.
popart.SGD({
"defaultLearningRate": (0.1, False),
"defaultMomentum": (0.9, True)
}),
label_array=label_array,
accum_factor=4,
enable_accum=True,
batches_per_step=5,
number_of_steps=3,
final_proto_filename=os.path.join(tmpdir, "accl5batches3steps"),
enable_multi_ipu=False,
full_anchorage=False,
explicit_loops=explicit_loops)
model = onnx.load(accl_proto_filename)
names = [t.name for t in model.graph.initializer]
grad_accl_names = []
weight_names = []
for name in names:
if grad_accl_prefix in name:
grad_accl_names.append(name)
elif "weight" in name:
weight_names.append(name)
# Model should have 6 weight tensors
assert len(weight_names) == 6
assert len(grad_accl_names) == len(weight_names)
tensor_mapping = {}
for tensor in model.graph.initializer:
tensor_mapping[tensor.name] = tensor
rev_map = {}
for w_name in weight_names:
assert grad_accl_prefix + w_name in grad_accl_names
rev_map[grad_accl_prefix + w_name] = w_name
for g_a_name in grad_accl_names:
weight_tensor = tensor_mapping[rev_map[g_a_name]]
g_a_tensor = tensor_mapping[g_a_name]
for d_i, v in enumerate(weight_tensor.float_data):
# initialisation as per equations. When velocity scaling != 1 this
# will need changing : T12001
assert g_a_tensor.float_data[d_i] - v * wd < 1e-8
@pytest.mark.parametrize("explicit_loops", [True, False])
def test_loading_saved_gradient_accumulationt_tensors(tmpdir, explicit_loops):
"""
1. Build a model with matmuls, no grad accumulation
2. Write out onnx model, verify initializers contain no accl tensors
3. Create session with model, verify accl tensors initialised correctly
4. Do session.run(), write out model, verify accl tensors have been updated
5. Create new session with same model. This time before run, write out model
and check tensors are still there, with the same value
"""
# 1.
accum_factor = 4
[onnx_model, input_name, output_name,
lb_name] = get_mm_model(accum_factor=accum_factor, enable_multi_ipu=False)
# 2.
model = onnx.load_from_string(onnx_model)
names = [t.name for t in model.graph.initializer]
for name in names:
assert grad_accl_prefix not in name
def getTrainingSession(fn):
opts = popart.SessionOptions()
opts.enableGradientAccumulation = True
opts.accumulationFactor = accum_factor
opts.disableGradAccumulationTensorStreams = False
sess = popart.TrainingSession(
fnModel=fn,
dataFlow=popart.DataFlow(1, {}),
deviceInfo=tu.create_test_device(tilesPerIPU=testTilesPerIPU),
loss=output_name,
# Using momentum to create accl tensors
optimizer=popart.SGD({
"defaultLearningRate": (0.1, False),
"defaultMomentum": (0.9, True)
}),
userOptions=opts)
sess.prepareDevice()
sess.weightsFromHost()
return sess
# 3.
sess = getTrainingSession(onnx_model)
fn = os.path.join(tmpdir, "withInitZeroAcclTensors.onnx")
sess.modelToHost(fn)
model = onnx.load(fn)
weights = {}
accls = {}
for t in model.graph.initializer:
if grad_accl_prefix in t.name:
accls[t.name] = t.float_data
else:
weights[t.name] = t.float_data
for name in weights:
t_weight = np.asarray(weights[name])
t_accl = np.asarray(accls[grad_accl_prefix + name])
# 4.
input_shape = [accum_factor] + sess.getInfo(input_name).shape()
stepio = popart.PyStepIO(
{
input_name: npr.rand(*input_shape).astype(np.float32),
lb_name: np.ones(batch_size).astype(np.int32),
}, sess.initAnchorArrays())
sess.run(stepio)
fn = os.path.join(tmpdir, "withUpdatedAcclTensors.onnx")
sess.modelToHost(fn)
model = onnx.load(fn)
up_accls = {}
for t in model.graph.initializer:
if grad_accl_prefix in t.name:
up_accls[t.name] = np.asarray(t.float_data)
assert np.allclose(np.asarray(t.float_data),
np.asarray(accls[t.name])) is False
# 5.
sess = getTrainingSession(fn)
fn = os.path.join(tmpdir, "withUpdatedAcclTensors_check.onnx")
sess.modelToHost(fn)
model = onnx.load(fn)
for t in model.graph.initializer:
if grad_accl_prefix in t.name:
assert np.array_equal(up_accls[t.name], np.asarray(t.float_data))
@tu.requires_ipu_model
@pytest.mark.parametrize("explicit_loops", [True, False])
def test_adam_gradient_accumulation_base(tmpdir, explicit_loops):
"""
base test (as simple as possible)
"""
for graph_runner in [run_complex_graph, run_mm_graph]:
np.random.seed(1234)
label_array = np.random.randint(0, hidden_size, batch_size)
accum_initial_proto, accum_proto_filename, accum_anchor_arrays = graph_runner(
adam_optimizer,
label_array=label_array,
accum_factor=4,
enable_accum=True,
batches_per_step=1,
number_of_steps=1,
final_proto_filename=os.path.join(tmpdir, "adamAccum"),
enable_multi_ipu=False,
full_anchorage=False,
explicit_loops=explicit_loops)
no_accum_initial_proto, no_accum_proto_filename, no_accum_anchor_arrays = graph_runner(
adam_optimizer,
label_array=label_array,
accum_factor=1,
enable_accum=False,
batches_per_step=1,
number_of_steps=1,
final_proto_filename=os.path.join(tmpdir, "adamNoAccum"),
enable_multi_ipu=False,
full_anchorage=False,
explicit_loops=explicit_loops)
check_models(accum_initial_proto, accum_proto_filename,
no_accum_proto_filename)
@tu.requires_ipu_model
@pytest.mark.parametrize("explicit_loops", [True, False])
def test_adam_gradient_accumulation_multi_batch(tmpdir, explicit_loops):
"""
from _base: increase batches per step and number of steps
"""
for graph_runner in [run_mm_graph, run_complex_graph]:
np.random.seed(1234)
label_array = np.random.randint(0, hidden_size, batch_size)
accum_initial_proto, accum_proto_filename, accum_anchor_arrays = run_mm_graph(
adam_optimizer,
label_array=label_array,
accum_factor=4,
enable_accum=True,
batches_per_step=5,
number_of_steps=3,
final_proto_filename=os.path.join(tmpdir,
"adamAccum5batches3steps"),
enable_multi_ipu=False,
full_anchorage=False,
explicit_loops=explicit_loops)
no_accum_initial_proto, no_accum_proto_filename, no_accum_anchor_arrays = run_mm_graph(
adam_optimizer,
label_array=label_array,
accum_factor=1,
enable_accum=False,
batches_per_step=5,
number_of_steps=3,
final_proto_filename=os.path.join(tmpdir,
"adamNoAccum5batches3steps"),
enable_multi_ipu=False,
full_anchorage=False,
explicit_loops=explicit_loops)
check_models(accum_initial_proto, accum_proto_filename,
no_accum_proto_filename)
@tu.requires_ipu_model
@pytest.mark.parametrize("explicit_loops", [True, False])
def test_adam_gradient_accumulation_multi_ipu(tmpdir, explicit_loops):
"""
from _multi_batch: enable multi ipus
"""
np.random.seed(1234)
label_array = np.random.randint(0, hidden_size, batch_size)
accum_initial_proto, accum_proto_filename, accum_anchor_arrays = run_mm_graph(
adam_optimizer,
label_array=label_array,
accum_factor=4,
enable_accum=True,
batches_per_step=5,
number_of_steps=3,
final_proto_filename=os.path.join(tmpdir, "adamAccum5batches3steps"),
enable_multi_ipu=True,
full_anchorage=False,
explicit_loops=explicit_loops)
no_accum_initial_proto, no_accum_proto_filename, no_accum_anchor_arrays = run_mm_graph(
adam_optimizer,
label_array=label_array,
accum_factor=1,
enable_accum=False,
batches_per_step=5,
number_of_steps=3,
final_proto_filename=os.path.join(tmpdir, "adamNoAccum5batches3steps"),
# we do not enable multiple IPUs in the baseline
enable_multi_ipu=False,
full_anchorage=False,
explicit_loops=explicit_loops)
check_models(accum_initial_proto, accum_proto_filename,
no_accum_proto_filename)
@tu.requires_ipu_model
@pytest.mark.parametrize("explicit_loops", [True, False])
def test_adam_gradient_accumulation_model_proto(tmpdir, explicit_loops):
batches_per_step = 5
for steps in [0, 3]:
np.random.seed(1234)
label_array = np.random.randint(0, hidden_size, batch_size)
accl_initial_proto, accl_proto_filename, accl_anchor_arrays = run_mm_graph(
adam_optimizer,
label_array=label_array,
accum_factor=4,
enable_accum=True,
batches_per_step=batches_per_step,
number_of_steps=steps,
final_proto_filename=os.path.join(tmpdir, "accl5batches3steps"),
enable_multi_ipu=False,
full_anchorage=False,
explicit_loops=explicit_loops)
model = onnx.load(accl_proto_filename)
names = [t.name for t in model.graph.initializer]
weight_names = []
accum_names = []
accl1_names = []
accl2_names = []
step_names = []
for name in names:
if popart.reservedAccumPrefix() in name:
accum_names.append(name)
elif popart.reservedAccl1Prefix() in name:
accl1_names.append(name)
elif popart.reservedAccl2Prefix() in name:
accl2_names.append(name)
elif popart.reservedStepPrefix() in name:
step_names.append(name)
elif "weight" in name:
weight_names.append(name)
# Model should have 6 weight tensors
assert len(weight_names) == 6
assert len(accl1_names) == len(weight_names)
assert len(accl2_names) == len(weight_names)
assert len(step_names) == len(weight_names)
tensor_mapping = {}
for tensor in model.graph.initializer:
tensor_mapping[tensor.name] = tensor
for w_name in weight_names:
assert popart.reservedAccl1Prefix() + w_name in accl1_names
assert popart.reservedAccl2Prefix() + w_name in accl2_names
assert popart.reservedStepPrefix() + w_name in step_names
if steps == 0:
for name in accum_names + accl1_names + accl2_names + step_names:
tensor = tensor_mapping[name]
# All Adam states are initialized to zero
assert np.allclose(tensor.float_data, 0.0)
else:
for name in step_names:
tensor = tensor_mapping[name]
# Steps counted correctly
assert tensor.float_data[0] == steps * batches_per_step
@pytest.mark.parametrize("explicit_loops", [True, False])
def test_adam_loading_saved_gradient_accumulationt_tensors(
tmpdir, explicit_loops):
"""
1. Build a model with matmuls, no grad accumulation
2. Write out onnx model, verify initializers contain no accum tensors
3. Create session with model, verify accl tensors initialised correctly
4. Do session.run(), write out model, verify accl tensors have been updated
5. Create new session with same model. This time before run, write out model
and check tensors are still there, with the same value
"""
# 1.
accum_factor = 4
[onnx_model, input_name, output_name,
lb_name] = get_mm_model(accum_factor=accum_factor, enable_multi_ipu=False)
# 2.
model = onnx.load_from_string(onnx_model)
names = [t.name for t in model.graph.initializer]
for name in names:
assert popart.reservedAccumPrefix() not in name
def getTrainingSession(fn):
opts = popart.SessionOptions()
opts.enableGradientAccumulation = True
opts.accumulationFactor = accum_factor
opts.disableGradAccumulationTensorStreams = False
if explicit_loops:
opts.enableExplicitMainLoops = True
opts.aliasZeroCopy = True
opts.explicitRecomputation = True
opts.useHostCopyOps = True
sess = popart.TrainingSession(
fnModel=fn,
dataFlow=popart.DataFlow(1, {}),
deviceInfo=tu.create_test_device(tilesPerIPU=testTilesPerIPU),
loss=output_name,
optimizer=adam_optimizer,
userOptions=opts)
sess.prepareDevice()
sess.weightsFromHost()
return sess
# 3.
sess = getTrainingSession(onnx_model)
fn = os.path.join(tmpdir, "withInitZeroAccumTensors.onnx")
sess.modelToHost(fn)
model = onnx.load(fn)
weights = {}
optstates = {}
for t in model.graph.initializer:
if (popart.reservedAccumPrefix() in t.name
or popart.reservedAccl1Prefix() in t.name
or popart.reservedAccl2Prefix() in t.name
or popart.reservedStepPrefix() in t.name):
optstates[t.name] = t.float_data
assert np.allclose(np.asarray(t.float_data), 0.0)
else:
weights[t.name] = t.float_data
# 4.
input_shape = [accum_factor] + sess.getInfo(input_name).shape()
stepio = popart.PyStepIO(
{
input_name: npr.rand(*input_shape).astype(np.float32),
lb_name: np.ones(batch_size).astype(np.int32),
}, sess.initAnchorArrays())
sess.run(stepio)
fn = os.path.join(tmpdir, "withUpdatedAcclTensors.onnx")
sess.modelToHost(fn)
model = onnx.load(fn)
for t in model.graph.initializer:
if (popart.reservedAccl1Prefix() in t.name
or popart.reservedAccl2Prefix() in t.name
or popart.reservedStepPrefix() in t.name):
# Nonzero, updated accl1, accl2 and step tensors
assert np.allclose(np.asarray(t.float_data),
optstates[t.name]) is False
optstates[t.name] = np.asarray(t.float_data)
elif popart.reservedAccumPrefix() in t.name:
# Because the accumulator is always set to zero after being applied
# to accl1 and accl2
assert np.allclose(np.asarray(t.float_data), 0.0)
optstates[t.name] = np.asarray(t.float_data)
# 5.
sess = getTrainingSession(fn)
fn = os.path.join(tmpdir, "withUpdatedAcclTensors_check.onnx")
sess.modelToHost(fn)
model = onnx.load(fn)
for t in model.graph.initializer:
if (popart.reservedAccumPrefix() in t.name
or popart.reservedAccl1Prefix() in t.name
or popart.reservedAccl2Prefix() in t.name
or popart.reservedStepPrefix() in t.name):
assert np.array_equal(optstates[t.name], np.asarray(t.float_data))
|
import sys
import time
import warnings
import cupy
from cupy import cuda
from cupy.cuda import nccl
from cupy import testing
from cupyx.distributed import init_process_group
from cupyx.distributed._nccl_comm import NCCLBackend
from cupyx.distributed._store import ExceptionAwareProcess
from cupyx.scipy import sparse
nccl_available = nccl.available
N_WORKERS = 2
def _launch_workers(func, args=(), n_workers=N_WORKERS):
processes = []
# TODO catch exceptions
for rank in range(n_workers):
p = ExceptionAwareProcess(
target=func,
args=(rank,) + args)
p.start()
processes.append(p)
for p in processes:
p.join()
def broadcast(dtype, use_mpi=False):
if dtype in 'hH':
return # nccl does not support int16
def run_broadcast(rank, root, dtype, use_mpi=False):
dev = cuda.Device(rank)
dev.use()
comm = NCCLBackend(N_WORKERS, rank, use_mpi=use_mpi)
expected = cupy.arange(2 * 3 * 4, dtype=dtype).reshape((2, 3, 4))
if rank == root:
in_array = expected
else:
in_array = cupy.zeros((2, 3, 4), dtype=dtype)
comm.broadcast(in_array, root)
testing.assert_allclose(in_array, expected)
if use_mpi:
from mpi4py import MPI
# This process was run with mpiexec
run_broadcast(MPI.COMM_WORLD.Get_rank(), 0, dtype, False)
run_broadcast(MPI.COMM_WORLD.Get_rank(), 1, dtype, False)
else:
_launch_workers(run_broadcast, (0, dtype))
_launch_workers(run_broadcast, (1, dtype))
def reduce(dtype, use_mpi=False):
if dtype in 'hH':
return # nccl does not support int16
def run_reduce(rank, root, dtype, use_mpi=False):
dev = cuda.Device(rank)
dev.use()
comm = NCCLBackend(N_WORKERS, rank, use_mpi=use_mpi)
in_array = cupy.arange(2 * 3 * 4, dtype='f').reshape(2, 3, 4)
out_array = cupy.zeros((2, 3, 4), dtype='f')
comm.reduce(in_array, out_array, root)
if rank == root:
testing.assert_allclose(out_array, 2 * in_array)
if use_mpi:
from mpi4py import MPI
# This process was run with mpiexec
run_reduce(MPI.COMM_WORLD.Get_rank(), 0, dtype, False)
run_reduce(MPI.COMM_WORLD.Get_rank(), 1, dtype, False)
else:
_launch_workers(run_reduce, (0, dtype))
_launch_workers(run_reduce, (1, dtype))
def all_reduce(dtype, use_mpi=False):
if dtype in 'hH':
return # nccl does not support int16
def run_all_reduce(rank, dtype, use_mpi=False):
dev = cuda.Device(rank)
dev.use()
comm = NCCLBackend(N_WORKERS, rank, use_mpi=use_mpi)
in_array = cupy.arange(2 * 3 * 4, dtype='f').reshape(2, 3, 4)
out_array = cupy.zeros((2, 3, 4), dtype='f')
comm.all_reduce(in_array, out_array)
testing.assert_allclose(out_array, 2 * in_array)
if use_mpi:
from mpi4py import MPI
# This process was run with mpiexec
run_all_reduce(MPI.COMM_WORLD.Get_rank(), dtype, False)
else:
_launch_workers(run_all_reduce, (dtype,))
def reduce_scatter(dtype, use_mpi=False):
if dtype in 'hH':
return # nccl does not support int16
def run_reduce_scatter(rank, dtype, use_mpi=False):
dev = cuda.Device(rank)
dev.use()
comm = NCCLBackend(N_WORKERS, rank, use_mpi=use_mpi)
in_array = 1 + cupy.arange(
N_WORKERS * 10, dtype='f').reshape(N_WORKERS, 10)
out_array = cupy.zeros((10,), dtype='f')
comm.reduce_scatter(in_array, out_array, 10)
testing.assert_allclose(out_array, 2 * in_array[rank])
if use_mpi:
from mpi4py import MPI
# This process was run with mpiexec
run_reduce_scatter(MPI.COMM_WORLD.Get_rank(), dtype, False)
else:
_launch_workers(run_reduce_scatter, (dtype,))
def all_gather(dtype, use_mpi=False):
if dtype in 'hH':
return # nccl does not support int16
def run_all_gather(rank, dtype, use_mpi=False):
dev = cuda.Device(rank)
dev.use()
comm = NCCLBackend(N_WORKERS, rank, use_mpi=use_mpi)
in_array = (rank + 1) * cupy.arange(
N_WORKERS * 10, dtype='f').reshape(N_WORKERS, 10)
out_array = cupy.zeros((N_WORKERS, 10), dtype='f')
comm.all_gather(in_array, out_array, 10)
expected = 1 + cupy.arange(N_WORKERS).reshape(N_WORKERS, 1)
expected = expected * cupy.broadcast_to(
cupy.arange(10, dtype='f'), (N_WORKERS, 10))
testing.assert_allclose(out_array, expected)
if use_mpi:
from mpi4py import MPI
# This process was run with mpiexec
run_all_gather(MPI.COMM_WORLD.Get_rank(), dtype, False)
else:
_launch_workers(run_all_gather, (dtype,))
def send_and_recv(dtype, use_mpi=False):
if dtype in 'hH':
return # nccl does not support int16
def run_send_and_recv(rank, dtype, use_mpi=False):
dev = cuda.Device(rank)
dev.use()
comm = NCCLBackend(N_WORKERS, rank, use_mpi=use_mpi)
in_array = cupy.arange(10, dtype='f')
out_array = cupy.zeros((10,), dtype='f')
if rank == 0:
comm.send(in_array, 1)
else:
comm.recv(out_array, 0)
testing.assert_allclose(out_array, in_array)
if use_mpi:
from mpi4py import MPI
# This process was run with mpiexec
run_send_and_recv(MPI.COMM_WORLD.Get_rank(), dtype, False)
else:
_launch_workers(run_send_and_recv, (dtype,))
def send_recv(dtype, use_mpi=False):
if dtype in 'hH':
return # nccl does not support int16
def run_send_recv(rank, dtype, use_mpi=False):
dev = cuda.Device(rank)
dev.use()
comm = NCCLBackend(N_WORKERS, rank, use_mpi=use_mpi)
in_array = cupy.arange(10, dtype='f')
for i in range(N_WORKERS):
out_array = cupy.zeros((10,), dtype='f')
comm.send_recv(in_array, out_array, i)
testing.assert_allclose(out_array, in_array)
if use_mpi:
from mpi4py import MPI
# This process was run with mpiexec
run_send_recv(MPI.COMM_WORLD.Get_rank(), dtype, False)
else:
_launch_workers(run_send_recv, (dtype,))
def scatter(dtype, use_mpi=False):
if dtype in 'hH':
return # nccl does not support int16
def run_scatter(rank, root, dtype, use_mpi=False):
dev = cuda.Device(rank)
dev.use()
comm = NCCLBackend(N_WORKERS, rank, use_mpi=use_mpi)
in_array = 1 + cupy.arange(
N_WORKERS * 10, dtype='f').reshape(N_WORKERS, 10)
out_array = cupy.zeros((10,), dtype='f')
comm.scatter(in_array, out_array, root)
if rank > 0:
testing.assert_allclose(out_array, in_array[rank])
if use_mpi:
from mpi4py import MPI
# This process was run with mpiexec
run_scatter(MPI.COMM_WORLD.Get_rank(), 0, dtype, False)
run_scatter(MPI.COMM_WORLD.Get_rank(), 1, dtype, False)
else:
_launch_workers(run_scatter, (0, dtype))
_launch_workers(run_scatter, (1, dtype))
def gather(dtype, use_mpi=False):
if dtype in 'hH':
return # nccl does not support int16
def run_gather(rank, root, dtype, use_mpi=False):
dev = cuda.Device(rank)
dev.use()
comm = NCCLBackend(N_WORKERS, rank, use_mpi=use_mpi)
in_array = (rank + 1) * cupy.arange(10, dtype='f')
out_array = cupy.zeros((N_WORKERS, 10), dtype='f')
comm.gather(in_array, out_array, root)
if rank == root:
expected = 1 + cupy.arange(N_WORKERS).reshape(N_WORKERS, 1)
expected = expected * cupy.broadcast_to(
cupy.arange(10, dtype='f'), (N_WORKERS, 10))
testing.assert_allclose(out_array, expected)
if use_mpi:
from mpi4py import MPI
# This process was run with mpiexec
run_gather(MPI.COMM_WORLD.Get_rank(), 0, dtype, False)
run_gather(MPI.COMM_WORLD.Get_rank(), 1, dtype, False)
else:
_launch_workers(run_gather, (0, dtype))
_launch_workers(run_gather, (1, dtype))
def all_to_all(dtype, use_mpi=False):
if dtype in 'hH':
return # nccl does not support int16
def run_all_to_all(rank, dtype, use_mpi=False):
dev = cuda.Device(rank)
dev.use()
comm = NCCLBackend(N_WORKERS, rank, use_mpi=use_mpi)
in_array = cupy.arange(
N_WORKERS * 10, dtype='f').reshape(N_WORKERS, 10)
out_array = cupy.zeros((N_WORKERS, 10), dtype='f')
comm.all_to_all(in_array, out_array)
expected = (10 * rank) + cupy.broadcast_to(
cupy.arange(10, dtype='f'), (N_WORKERS, 10))
testing.assert_allclose(out_array, expected)
if use_mpi:
from mpi4py import MPI
# This process was run with mpiexec
run_all_to_all(MPI.COMM_WORLD.Get_rank(), dtype, False)
else:
_launch_workers(run_all_to_all, (dtype,))
def barrier(use_mpi=False):
def run_barrier(rank, use_mpi=False):
dev = cuda.Device(rank)
dev.use()
comm = NCCLBackend(N_WORKERS, rank, use_mpi=use_mpi)
comm.barrier()
before = time.time()
if rank == 0:
time.sleep(2)
comm.barrier()
after = time.time()
assert int(after - before) == 2
if use_mpi:
from mpi4py import MPI
# This process was run with mpiexec
run_barrier(MPI.COMM_WORLD.Get_rank(), False)
else:
_launch_workers(run_barrier)
def init(use_mpi=False):
def run_init(rank, use_mpi=False):
dev = cuda.Device(rank)
dev.use()
comm = init_process_group(N_WORKERS, rank, use_mpi=use_mpi)
# Do a simple call to verify we got a valid comm
in_array = cupy.zeros(1)
if rank == 0:
in_array = in_array + 1
comm.broadcast(in_array, 0)
testing.assert_allclose(in_array, cupy.ones(1))
if use_mpi:
from mpi4py import MPI
# This process was run with mpiexec
run_init(MPI.COMM_WORLD.Get_rank(), dtype, False)
run_init(MPI.COMM_WORLD.Get_rank(), dtype, False)
else:
_launch_workers(run_init)
def _make_sparse(dtype):
data = cupy.array([0, 1, 2, 3], dtype)
indices = cupy.array([0, 1, 3, 2], 'i')
indptr = cupy.array([0, 2, 3, 4], 'i')
return sparse.csr_matrix((data, indices, indptr), shape=(3, 4))
def _make_sparse_empty(dtype):
data = cupy.array([0], dtype)
indices = cupy.array([0], 'i')
indptr = cupy.array([0], 'i')
return sparse.csr_matrix((data, indices, indptr), shape=(0, 0))
def sparse_send_and_recv(dtype, use_mpi=False):
def run_send_and_recv(rank, dtype, use_mpi=False):
dev = cuda.Device(rank)
dev.use()
comm = NCCLBackend(N_WORKERS, rank, use_mpi=use_mpi)
in_array = _make_sparse(dtype)
out_array = _make_sparse_empty(dtype)
warnings.filterwarnings(
'ignore', '.*transferring sparse.*', UserWarning)
if rank == 0:
comm.send(in_array, 1)
else:
comm.recv(out_array, 0)
testing.assert_allclose(out_array.todense(), in_array.todense())
if use_mpi:
from mpi4py import MPI
# This process was run with mpiexec
run_send_and_recv(MPI.COMM_WORLD.Get_rank(), dtype, False)
else:
_launch_workers(run_send_and_recv, (dtype,))
if __name__ == '__main__':
# Run the templatized test
func = globals()[sys.argv[1]]
# dtype is the char representation
use_mpi = True if sys.argv[2] == "mpi" else False
dtype = sys.argv[3] if len(sys.argv) == 4 else None
if dtype is not None:
func(dtype, use_mpi)
else:
func(use_mpi)
|
import sys
r="";i=1
while True:
l,p,v=map(int,sys.stdin.readline().split())
if l==0==p==v: break
t=(v//p)*l
if v%p>l: t+=l
else: t+=v%p
r+="Case "+str(i)+": "+str(t)+'\n'
i+=1
print(r,end="")
|
# Automatically generated by pb2py
# fmt: off
import protobuf as p
class StellarAccountMergeOp(p.MessageType):
MESSAGE_WIRE_TYPE = 218
def __init__(
self,
source_account: str = None,
destination_account: str = None,
) -> None:
self.source_account = source_account
self.destination_account = destination_account
@classmethod
def get_fields(cls):
return {
1: ('source_account', p.UnicodeType, 0),
2: ('destination_account', p.UnicodeType, 0),
}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import talib
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 30)
pd.set_option('precision', 7)
pd.options.display.float_format = '{:,.3f}'.format
import warnings
warnings.simplefilter(action = "ignore", category = FutureWarning)
from sklearn import preprocessing, svm, cross_validation, metrics, pipeline, grid_search
from scipy.stats import sem
from sklearn.decomposition import PCA, KernelPCA
'''
读入一支股票指定年份的ohlcv数据
输入:baseDir,stockCode为字符, startYear,yearNum为整数,
输出:dataframe
'''
def readWSDFile(baseDir, stockCode, startYear, yearNum=1):
# 解析日期
dateparse = lambda x: pd.datetime.strptime(x, '%Y-%m-%d').date()
df = 0
for i in range(yearNum):
tempDF = pd.read_csv(baseDir+stockCode+'/wsd_'+stockCode+'_'+str(startYear+i)+'.csv',
index_col=0, sep='\t', usecols=[0,2,3,4,5,6,7,9,10,12,15], header=None,
skiprows=1, names=['Date','Open','High','Low','Close','Volume','Amount',
'Chg','Chg Pct','Avg','Turn'],
parse_dates=True, date_parser=dateparse)
if i==0: df = tempDF
else: df = df.append(tempDF)
return df
usecols = [0, 2, 3, 4, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 33, 34, 36, 37]
usecols = [0, 6, 16, 17, 24, 31]
def readWSDIndexFile(baseDir, stockCode, startYear, yearNum=1):
# 解析日期
dateparse = lambda x: pd.datetime.strptime(x, '%Y-%m-%d').date()
df = 0
for i in range(yearNum):
tempDF = pd.read_csv(baseDir+'I'+stockCode+'/wsd_'+stockCode+'_'+str(startYear+i)+'.csv',
index_col=0, sep=',', parse_dates=True, date_parser=dateparse, usecols=usecols)
if i==0: df = tempDF
else: df = df.append(tempDF)
return df
def prepareData(df, dfi, win=5):
# open(开盘价均值),high(最高价均值),low(最低价均值),volume(成交量均值),amount(成交额均值),
# change(涨跌均值),changePct(涨跌幅均值),average(均价均值),turn(换手率均值),
# r(收益率均值),
# 38种技术指标
open = pd.rolling_mean(df['Open'], window=win)
high = pd.rolling_mean(df['High'], window=win)
low = pd.rolling_mean(df['Low'], window=win)
volume = pd.rolling_mean(df['Volume'], window=win)
amount = pd.rolling_mean(df['Amount'], window=win)
change = pd.rolling_mean(df['Chg'], window=win)
changePct = pd.rolling_mean(df['Chg Pct'], window=win)
average = pd.rolling_mean(df['Avg'], window=win)
turn = pd.rolling_mean(df['Turn'], window=win)
dailyreturn = df['Close'].pct_change()
dailyreturn[0] = dailyreturn[1]
r = pd.rolling_mean(dailyreturn, window=win)
techDF = pd.rolling_mean(dfi, window=win)
tempX = np.column_stack((open[win-1:], high[win-1:], low[win-1:], volume[win-1:], amount[win-1:],
change[win-1:], changePct[win-1:], average[win-1:], turn[win-1:], r[win-1:]))
X = np.hstack((tempX, techDF.values[win-1:]))
y = []
for i in range(win-1, len(dailyreturn)):
if dailyreturn[i]<0: y.append(-1)
elif dailyreturn[i]>0: y.append(1)
else: y.append(y[-1]) # 按前一个值填充
return X, y
def optimizeSVM(X_norm, y, kFolds=10):
clf = pipeline.Pipeline([
('svc', svm.SVC(kernel='rbf')),
])
# grid search 多参数优化
parameters = {
# 'svc__gamma': np.logspace(-1, 3, 20),
# 'svc__C': np.logspace(-1, 3, 10),
# 'svc__gamma': np.logspace(-3, 11, 8, base=2),
# 'svc__C': np.logspace(-3, 15, 10, base=2),
'svc__gamma': np.logspace(-3, 11, 8, base=2),
'svc__C': np.logspace(-3, 15, 10, base=2),
}
gs = grid_search.GridSearchCV(clf, parameters, verbose=1, refit=False, cv=kFolds)
gs.fit(X_norm, y)
return gs.best_params_['svc__gamma'], gs.best_params_['svc__C'], gs.best_score_
def plot3D(X_pca, y):
red_x, red_y, red_z = [], [], []
blue_x, blue_y, blue_z = [], [], []
for i in range(len(X_pca)):
if y[i]==-1:
red_x.append(X_pca[i][0])
red_y.append(X_pca[i][1])
red_z.append(X_pca[i][2])
elif y[i]==1:
blue_x.append(X_pca[i][0])
blue_y.append(X_pca[i][1])
blue_z.append(X_pca[i][2])
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(red_x, red_y, red_z, c='r', marker='x')
ax.scatter(blue_x, blue_y, blue_z, c='g', marker='.')
ax.set_xlabel('X Label')
ax.set_ylabel('Y Label')
ax.set_zlabel('Z Label')
plt.show()
baseDir = '/Users/eugene/Downloads/data/'
stockCodes = ['000300.SH', '000016.SH', '000905.SH']
i = 0
startYear = 2014
number = 2
df = readWSDFile(baseDir, stockCodes[i], startYear, number)
print 'Day count:', len(df)
# print df.head(5)
dfi = readWSDIndexFile(baseDir, stockCodes[i], startYear, number)
X, y = prepareData(df, dfi, win=12)
print np.shape(X), np.shape(y)
# print np.shape(X)
normalizer = preprocessing.Normalizer().fit(X) # fit does nothing
X_norm = normalizer.transform(X)
# estimator = PCA(n_components=10)
# estimator_kernel = KernelPCA(n_components=12, kernel='rbf')
# # X_pca = estimator.fit_transform(X_norm)
# X_pca = estimator_kernel.fit_transform(X_norm)
# plot3D(X_pca, y)
# grid search 多参数优化
gamma, C, score = optimizeSVM(X_norm, y, kFolds=10)
print 'gamma=',gamma, 'C=',C, 'score=',score |
from fantasydjhousekeeper import leagues, util
from fantasydjhousekeeper.entities import SongStat
import pytest
TMPL_KEY = '-K{}'
TMPL_DT = '2017-01-0{}'
TMPL_SONG_ID = '-Ksong{}'
def __create_stats(*popularities, **kwargs):
day = 1
sg_idx = kwargs['sg_idx'] if 'sg_idx' in kwargs else 0
koffset = kwargs['koffset'] if 'koffset' in kwargs else 0
stats = []
for popularity in popularities:
stat = SongStat(
TMPL_KEY.format(day + koffset),
TMPL_SONG_ID.format(sg_idx),
util.date_from_str(TMPL_DT.format(day)),
popularity
)
print(
'stat: id={}, songId={}, date={}, popularity={}'.format(
stat.id, stat.songId, stat.date, stat.popularity
)
)
stats.append(stat)
day += 1
return stats
def __assert_day_points(points, day, expected):
assert points[TMPL_DT.format(day)] == expected
def test_calc_points():
points = leagues.calc_points(None)
assert points is True
points = leagues.calc_points(__create_stats())
assert points is True
points = leagues.calc_points(__create_stats(90))
assert points is True
points = leagues.calc_points(__create_stats(90, 90))
__assert_day_points(points, 2, 0)
points = leagues.calc_points(__create_stats(90, 91))
__assert_day_points(points, 2, 1)
points = leagues.calc_points(__create_stats(90, 89))
__assert_day_points(points, 2, -1)
def __create_user_points(*poplists, **kwargs):
sg_idx = kwargs['sg_idx'] if 'sg_idx' in kwargs else 0
koffset = kwargs['koffset'] if 'koffset' in kwargs else 0
songs = {}
for poplist in poplists:
stats = __create_stats(*poplist, sg_idx=sg_idx, koffset=koffset)
songId = stats[0].songId
points = leagues.calc_points(stats)
songs[songId] = points
sg_idx += 1
koffset += len(poplist)
return songs
TMPL_USERNAME = 'u{}'
def __create_points(*poplistslists):
usr_idx = 0
sg_idx = 0
sg_idx_dx = len(poplistslists[0])
koffset = 0
koffset_dx = len(poplistslists[0][0]) * sg_idx_dx
points = {}
for poplistslist in poplistslists:
points[TMPL_USERNAME.format(usr_idx)] = \
__create_user_points(
*poplistslist,
sg_idx=sg_idx,
koffset=koffset
)
sg_idx += sg_idx_dx
koffset += koffset_dx
usr_idx += 1
return points
def test_calc_winner():
with pytest.raises(ValueError):
leagues.calc_winner(None)
points = __create_points([
[0], [0], [0]
], [
[0], [0], [0]
])
assert leagues.calc_winner(points) is None
points = __create_points([
[0, 0], [0, 0], [0, 0]
], [
[0, 0], [0, 0], [0, 0]
])
assert leagues.calc_winner(points) is False
points = __create_points([
[0, 0], [0, 0], [0, 1]
], [
[0, 0], [0, 0], [0, 0]
])
assert leagues.calc_winner(points) == 'u0'
points = __create_points([
[0, 0], [0, 0], [0, -1]
], [
[0, 0], [0, 0], [0, 0]
])
assert leagues.calc_winner(points) == 'u1'
|
import casbin
from casbin import persist
from mongoengine import Document
from mongoengine import connect
from mongoengine.fields import IntField, StringField
class CasbinRule(Document):
'''
CasbinRule model
'''
__tablename__ = "casbin_rule"
ptype = StringField(required=True, max_length=255)
v0 = StringField(max_length=255)
v1 = StringField(max_length=255)
v2 = StringField(max_length=255)
v3 = StringField(max_length=255)
v4 = StringField(max_length=255)
v5 = StringField(max_length=255)
v6 = StringField(max_length=255)
def __str__(self):
text = self.ptype
if self.v0:
text = text+', '+self.v0
if self.v1:
text = text+', '+self.v1
if self.v2:
text = text+', '+self.v2
if self.v3:
text = text+', '+self.v3
if self.v4:
text = text+', '+self.v4
if self.v5:
text = text+', '+self.v5
if self.v6:
text = text+', '+self.v6
return text
def __repr__(self):
return '<CasbinRule :"{}">'.format(str(self))
class Adapter(persist.Adapter):
"""the interface for Casbin adapters."""
def __init__(self,dbname,host):
connect(db=dbname,host=host)
def load_policy(self, model):
'''
implementing add Interface for casbin \n
load all policy rules from mongodb \n
'''
lines = CasbinRule.objects()
for line in lines:
persist.load_policy_line(str(line),model)
def _save_policy_line(self, ptype, rule):
line = CasbinRule(ptype=ptype)
if len(rule) > 0:
line.v0 = rule[0]
if len(rule) > 1:
line.v1 = rule[1]
if len(rule) > 2:
line.v2 = rule[2]
if len(rule) > 3:
line.v3 = rule[3]
if len(rule) > 4:
line.v4 = rule[4]
if len(rule) > 5:
line.v5 = rule[5]
line.save()
def save_policy(self, model):
'''
implementing add Interface for casbin \n
save the policy in mongodb \n
'''
for sec in ["p", "g"]:
if sec not in model.model.keys():
continue
for ptype, ast in model.model[sec].items():
for rule in ast.policy:
self._save_policy_line(ptype, rule)
return True
def add_policy(self, sec, ptype, rule):
"""add policy rules to mongodb"""
self._save_policy_line(ptype, rule)
def remove_policy(self, sec, ptype, rule):
"""delete policy rules from mongodb"""
pass
def remove_filtered_policy(self, sec, ptype, field_index, *field_values):
"""
delete policy rules for matching filters from mongodb
"""
pass
|
import six
from sevenbridges.meta.fields import IntegerField, DateTimeField
from sevenbridges.meta.resource import Resource
class Rate(Resource):
"""
Rate resource.
"""
limit = IntegerField(read_only=True)
remaining = IntegerField(read_only=True)
reset = DateTimeField()
def __str__(self):
return six.text_type(
'<Rate: limit={limit}, remaining={rem}>'
.format(limit=self.limit, rem=self.remaining)
)
|
# Copyright (C) 2009 Valmantas Paliksa <walmis at balticum-tv dot lt>
#
# Licensed under the GNU General Public License Version 3
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from blueman.plugins.AppletPlugin import AppletPlugin
import dbus
from gi.repository import GObject
from blueman.main.SignalTracker import SignalTracker
from blueman.gui.Notification import Notification
from blueman.Sdp import *
from blueman.Functions import get_icon, composite_icon
import weakref
class ConnectionHandler:
def __init__(self, parent, device, uuid, reply, err):
self.parent = parent
self.device = device
self.uuid = uuid
self.reply = reply
self.err = err
self.rfcomm_dev = None
self.timeout = None
self.signals = SignalTracker()
self.signals.Handle("dbus", self.parent.bus,
self.on_mm_device_added,
"DeviceAdded",
"org.freedesktop.ModemManager")
#for some reason these handlers take a reference and don't give it back
#so i have to workaround :(
w = weakref.ref(self)
device.Services["serial"].Connect(uuid,
reply_handler=lambda *args: w() and w().on_connect_reply(*args),
error_handler=lambda *args: w() and w().on_connect_error(*args))
def __del__(self):
dprint("deleting")
def on_connect_reply(self, rfcomm):
self.rfcomm_dev = rfcomm
self.timeout = GObject.timeout_add(10000, self.on_timeout)
def on_connect_error(self, *args):
self.err(*args)
self.cleanup()
def cleanup(self):
if self.timeout:
GObject.source_remove(self.timeout)
self.signals.DisconnectAll()
del self.device
def on_mm_device_added(self, path):
dprint(path)
props = self.parent.bus.call_blocking("org.freedesktop.ModemManager",
path,
"org.freedesktop.DBus.Properties",
"GetAll",
"s",
["org.freedesktop.ModemManager.Modem"])
if self.rfcomm_dev and props["Driver"] == "bluetooth" and props["Device"] in self.rfcomm_dev:
dprint("It's our bluetooth modem!")
modem = get_icon("modem", 24)
blueman = get_icon("blueman", 48)
icon = composite_icon(blueman, [(modem, 24, 24, 255)])
Notification(_("Bluetooth Dialup"),
_("DUN connection on %s will now be available in Network Manager") % self.device.Alias,
pixbuf=icon,
status_icon=self.parent.Applet.Plugins.StatusIcon)
self.reply(self.rfcomm_dev)
self.cleanup()
def on_timeout(self):
self.timeout = None
self.err(dbus.DBusException(_("Modem Manager did not support the connection")))
self.cleanup()
class NMDUNSupport(AppletPlugin):
__depends__ = ["StatusIcon", "DBusService"]
__conflicts__ = ["PPPSupport", "NMIntegration"]
__icon__ = "modem"
__author__ = "Walmis"
__description__ = _("Provides support for Dial Up Networking (DUN) with ModemManager and NetworkManager 0.8")
__priority__ = 1
def on_load(self, applet):
self.bus = dbus.SystemBus()
def on_unload(self):
pass
def rfcomm_connect_handler(self, device, uuid, reply, err):
uuid16 = sdp_get_serial_type(device.Address, uuid)
if DIALUP_NET_SVCLASS_ID in uuid16:
ConnectionHandler(self, device, uuid, reply, err)
return True
else:
return False
|
import logging
import urlparse
from bitfield.models import BitField
from django.contrib.auth.models import User
from django.core.exceptions import SuspiciousOperation
from django.db import models
from django.db.models import Count
from django_gravatar.helpers import has_gravatar, get_gravatar_url
from sorl.thumbnail import get_thumbnail
from allauth.socialaccount.models import SocialAccount
from sorl import thumbnail
from sorl.thumbnail.helpers import ThumbnailError
from core.utils.file import generate_save_file_name
from core.utils.url import unique_slugify
from dss import settings
from spa.models.basemodel import BaseModel
from spa.models.fields import MultiSelectField
logger = logging.getLogger(__name__)
def avatar_name(instance, filename):
return generate_save_file_name(str(instance.id), 'avatars', filename)
class UserProfileManager(models.Manager):
def get_query_set(self):
return super(UserProfileManager, self).get_query_set().annotate(mix_count=Count('mixes'))
class UserProfile(BaseModel):
class Meta:
app_label = 'spa'
objects = UserProfileManager()
ACTIVITY_SHARE_NETWORK_FACEBOOK = 1
ACTIVITY_SHARE_NETWORK_TWITTER = 2
user = models.OneToOneField(User, unique=True, related_name='userprofile')
avatar_type = models.CharField(max_length=15, default='social')
avatar_image = models.ImageField(max_length=1024, blank=True, upload_to=avatar_name)
display_name = models.CharField(blank=True, max_length=35)
description = models.CharField(blank=True, max_length=2048)
slug = models.SlugField(max_length=50, blank=True, null=True, default=None)
activity_sharing_networks = models.IntegerField(default=0)
NOTIFICATION_CHOICES = (
('plays', 'Plays'),
('likes', 'Likes'),
('favourites', 'Favourites'),
('follows', 'Follows'),
('comments', 'Comments'),
)
activity_sharing_facebook = BitField(flags=NOTIFICATION_CHOICES, default=0)
activity_sharing_twitter = BitField(flags=NOTIFICATION_CHOICES, default=0)
email_notifications = BitField(flags=NOTIFICATION_CHOICES, default=0)
following = models.ManyToManyField('self', null=True, blank=True, symmetrical=False, related_name='followers')
#location properties
city = models.CharField(max_length=100, blank=True, null=True)
country = models.CharField(max_length=100, blank=True, null=True)
last_known_session = models.CharField(max_length=250, blank=True, null=True)
def __unicode__(self):
return "%s - %s" % (self.user.get_full_name(), self.slug)
def save(self, force_insert=False, force_update=False, using=None, update_fields=None):
"""
Save Photo after ensuring it is not blank. Resize as needed.
"""
if self.slug is None or self.slug == '':
self.slug = unique_slugify(self, self.user.get_username())
print "Slugified: %s" % self.slug
return super(UserProfile, self).save(force_insert, force_update, using, update_fields)
def get_username(self):
return self.user.username
def get_email(self):
return self.user.email
email = property(get_email)
def get_first_name(self):
return self.user.first_name
first_name = property(get_first_name)
def get_last_name(self):
return self.user.last_name
last_name = property(get_last_name)
def __create_slug(self):
try:
unique_slugify(self, self.get_username() or self.user.get_full_name(), slug_separator='_')
self.save()
except Exception, e:
self.logger.error("Unable to create profile slug: %s", e.message)
def toggle_favourite(self, mix, value):
try:
if value:
if self.activity.filter(mix=mix).count() == 0:
self.activity.model.add(mix=mix, user=self)
self.favourites.model.save()
else:
self.favourites.model.delete(mix=mix)
except Exception, ex:
self.logger.error("Exception updating favourite: %s" % ex.message)
def is_follower(self, user):
try:
return user.get_profile() in self.followers.all()
except Exception, ex:
logger.error(ex.message)
return False
def get_absolute_url(self):
if self.slug is None or len(self.slug) == 0:
self.__create_slug()
return "user/%s" % self.slug
def get_nice_name(self):
return self.display_name or self.first_name + ' ' + self.last_name
#TODO Refactor the below into something sane
def get_medium_profile_image(self):
try:
image = self.get_avatar_image()
if self.avatar_type == 'custom':
image = "%s%s" % (settings.MEDIA_URL, get_thumbnail(image, "170x170", crop='center').name)
return image
except SuspiciousOperation, ex:
self.logger.warn("Error getting medium profile image: %s", ex.message)
except IOError, ex:
self.logger.warn("Error getting medium profile image: %s", ex.message)
def get_small_profile_image(self):
try:
if self.avatar_type == 'custom':
image = self.avatar_image
if image.name.startswith('http'):
return image.name
image = "%s%s" % (settings.MEDIA_URL, get_thumbnail(image, "32x32", crop='center').name)
return image
except SuspiciousOperation, ex:
self.logger.exception("Error getting small profile image")
except IOError, ex:
self.logger.exception("Error getting small profile image")
except ThumbnailError:
pass
return self.get_avatar_image()
def get_sized_avatar_image(self, width, height):
try:
image = self.get_avatar_image()
sized = thumbnail.get_thumbnail(image, "%sx%s" % (width, height), crop="center")
return urlparse.urljoin(settings.MEDIA_URL, sized.name)
except SuspiciousOperation:
return UserProfile.get_default_avatar_image()
except Exception, ex:
return UserProfile.get_default_avatar_image()
def get_avatar_image(self):
avatar_type = self.avatar_type
if avatar_type == 'gravatar':
gravatar_exists = has_gravatar(self.email)
if gravatar_exists:
return get_gravatar_url(self.email)
elif avatar_type == 'social' or avatar_type == '':
try:
social_account = SocialAccount.objects.filter(user=self.user)[0]
if social_account:
provider = social_account.get_provider_account()
return provider.get_avatar_url()
except Exception, ex:
pass
elif avatar_type == 'custom' or avatar_type:
return urlparse.urljoin(settings.MEDIA_URL, self.avatar_image.name)
return UserProfile.get_default_avatar_image()
def get_profile_url(self):
return '/user/%s' % (self.slug)
def get_profile_description(self):
try:
if not self.description:
social_account = SocialAccount.objects.filter(user=self.user)[0]
if social_account is not None:
provider = social_account.get_provider_account()
if 'bio' in provider.account.extra_data:
return provider.account.extra_data['bio']
else:
return self.description
except Exception, ex:
pass
return "Just another<br>DSS lover"
@classmethod
def get_default_avatar_image(cls):
return urlparse.urljoin(settings.STATIC_URL, "img/default-avatar-32.png")
@classmethod
def get_default_moniker(cls):
return "Anonymouse"
|
import os
from conans import ConanFile, tools
from conans.errors import ConanInvalidConfiguration
required_conan_version = ">=1.33.0"
class VequeConan(ConanFile):
name = "veque"
url = "https://github.com/conan-io/conan-center-index"
homepage = "https://github.com/Shmoopty/veque"
description = "Fast C++ container combining the best features of std::vector and std::deque"
topics = ("cpp17", "vector", "deque")
license = "BSL-1.0"
settings = "os", "arch", "compiler", "build_type"
no_copy_source = True
@property
def _source_subfolder(self):
return "source_subfolder"
@property
def _compilers_minimum_version(self):
return {
"gcc": "7",
"clang": "5",
"apple-clang": "10",
"Visual Studio": "15.7",
}
def validate(self):
if self.settings.compiler.get_safe("cppstd"):
tools.check_min_cppstd(self, "17")
def lazy_lt_semver(v1, v2):
lv1 = [int(v) for v in v1.split(".")]
lv2 = [int(v) for v in v2.split(".")]
min_length = min(len(lv1), len(lv2))
return lv1[:min_length] < lv2[:min_length]
minimum_version = self._compilers_minimum_version.get(str(self.settings.compiler), False)
if minimum_version and lazy_lt_semver(str(self.settings.compiler.version), minimum_version):
raise ConanInvalidConfiguration("{} requires C++17, which your compiler does not support.".format(self.name))
def package_id(self):
self.info.header_only()
def source(self):
tools.get(**self.conan_data["sources"][self.version],
destination=self._source_subfolder, strip_root=True)
def package(self):
self.copy("*.hpp", dst="include", src=os.path.join(self._source_subfolder, "include"))
self.copy("LICENSE", dst="licenses", src=self._source_subfolder)
|
import logging
from threading import Event, Lock, RLock, Thread
logger = logging.getLogger(__name__)
class CriticalTask(object):
"""Represents a critical task in a background process that we either need to cancel or get the result of.
Fields of this object may be accessed only when holding a lock on it. To perform the critical task the background
thread must, while holding lock on this object, check `is_cancelled` flag, run the task and mark the task as
complete using `complete()`.
The main thread must hold async lock to prevent the task from completing, hold lock on critical task object,
call cancel. If the task has completed `cancel()` will return False and `result` field will contain the result of
the task. When cancel returns True it is guaranteed that the background task will notice the `is_cancelled` flag.
"""
def __init__(self):
self._lock = Lock()
self.is_cancelled = False
self.result = None
def reset(self):
"""Must be called every time the background task is finished.
Must be called from async thread. Caller must hold lock on async executor when calling."""
self.is_cancelled = False
self.result = None
def cancel(self):
"""Tries to cancel the task, returns True if the task has already run.
Caller must hold lock on async executor and the task when calling."""
if self.result is not None:
return False
self.is_cancelled = True
return True
def complete(self, result):
"""Mark task as completed along with a result.
Must be called from async thread. Caller must hold lock on task when calling."""
self.result = result
def __enter__(self):
self._lock.acquire()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self._lock.release()
class AsyncExecutor(object):
def __init__(self, cancellable, ha_wakeup):
self._cancellable = cancellable
self._ha_wakeup = ha_wakeup
self._thread_lock = RLock()
self._scheduled_action = None
self._scheduled_action_lock = RLock()
self._is_cancelled = False
self._finish_event = Event()
self.critical_task = CriticalTask()
@property
def busy(self):
return self.scheduled_action is not None
def schedule(self, action):
with self._scheduled_action_lock:
if self._scheduled_action is not None:
return self._scheduled_action
self._scheduled_action = action
self._is_cancelled = False
self._finish_event.set()
return None
@property
def scheduled_action(self):
with self._scheduled_action_lock:
return self._scheduled_action
def reset_scheduled_action(self):
with self._scheduled_action_lock:
self._scheduled_action = None
def run(self, func, args=()):
wakeup = False
try:
with self:
if self._is_cancelled:
return
self._finish_event.clear()
self._cancellable.reset_is_cancelled()
# if the func returned something (not None) - wake up main HA loop
wakeup = func(*args) if args else func()
return wakeup
except Exception:
logger.exception('Exception during execution of long running task %s', self.scheduled_action)
finally:
with self:
self.reset_scheduled_action()
self._finish_event.set()
with self.critical_task:
self.critical_task.reset()
if wakeup is not None:
self._ha_wakeup()
def run_async(self, func, args=()):
Thread(target=self.run, args=(func, args)).start()
def try_run_async(self, action, func, args=()):
prev = self.schedule(action)
if prev is None:
return self.run_async(func, args)
return 'Failed to run {0}, {1} is already in progress'.format(action, prev)
def cancel(self):
with self:
with self._scheduled_action_lock:
if self._scheduled_action is None:
return
logger.warning('Cancelling long running task %s', self._scheduled_action)
self._is_cancelled = True
self._cancellable.cancel()
self._finish_event.wait()
with self:
self.reset_scheduled_action()
def __enter__(self):
self._thread_lock.acquire()
def __exit__(self, *args):
self._thread_lock.release()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author: Milan Ondrasovic <[email protected]>
from typing import Iterable, Optional, Union, Callable, cast
import cv2 as cv
import numpy as np
import torch
from got10k.trackers import Tracker
from sot.bbox import BBox
from sot.cfg import TrackerConfig
from sot.model import SiamFCModel
from sot.utils import (
assure_int_bbox, calc_bbox_side_size_with_context, center_crop_and_resize,
ImageT, pil_to_tensor,
)
TrackImgCb = Optional[Callable[[Union[np.ndarray, ImageT]], None]]
def _create_square_cosine_window(size: int) -> np.ndarray:
# Create a normalized cosine (Hanning) window.
hanning_1d = np.hanning(size)
hanning_2d = np.outer(hanning_1d, hanning_1d)
hanning_2d /= np.sum(hanning_2d)
return hanning_2d
def _create_search_scales(scale_step: float, count: int) -> np.ndarray:
n_half_search_scales = count // 2
search_scales = scale_step ** np.linspace(
-n_half_search_scales, n_half_search_scales, count)
return search_scales
class TrackerSiamFC(Tracker):
def __init__(
self, cfg: TrackerConfig, device: Union[torch.device, str],
model_path: Optional[str] = None,
name: str = 'SiamFC') -> None:
super().__init__(name=name, is_deterministic=True)
self.cfg: TrackerConfig = cfg
if isinstance(device, torch.device):
self.device: torch.device = device
else:
self.device: torch.device = torch.device(device)
self.model: SiamFCModel = SiamFCModel(self.cfg.response_map_scale)
if model_path is not None:
self.model.load_state_dict(
torch.load(
model_path, map_location=lambda storage, location: storage))
self.model = self.model.to(self.device)
self.response_size_upscaled: int = \
self.cfg.response_size * self.cfg.response_upscale
self.cosine_win: np.ndarray = _create_square_cosine_window(
self.response_size_upscaled)
self.search_scales: np.ndarray = _create_search_scales(
self.cfg.scale_step, self.cfg.n_scales)
self.curr_instance_side_size: int = self.cfg.instance_size
self.target_bbox = None
self.exemplar_emb = None
self.on_exemplar_img_extract: TrackImgCb = None
self.on_instance_img_extract: TrackImgCb = None
self.on_response_map_calc: TrackImgCb = None
@torch.no_grad()
def init(self, img: ImageT, bbox: np.ndarray) -> None:
self.model.eval()
bbox = assure_int_bbox(bbox)
self.target_bbox = BBox(*bbox)
self.curr_instance_side_size = calc_bbox_side_size_with_context(
self.target_bbox)
size_ratio = self.cfg.exemplar_size / self.cfg.instance_size
exemplar_side_size = int(round(
self.curr_instance_side_size * size_ratio))
exemplar_bbox = BBox.build_from_center_and_size(
self.target_bbox.center,
np.asarray((exemplar_side_size, exemplar_side_size)))
exemplar_img = center_crop_and_resize(
img, exemplar_bbox,
(self.cfg.exemplar_size, self.cfg.exemplar_size))
if self.on_exemplar_img_extract:
self.on_exemplar_img_extract(exemplar_img)
exemplar_img_tensor = torch.unsqueeze(pil_to_tensor(exemplar_img), 0)
exemplar_img_tensor = exemplar_img_tensor.to(self.device)
self.exemplar_emb = self.model.extract_visual_features(
exemplar_img_tensor)
@torch.no_grad()
def update(self, img: ImageT) -> np.ndarray:
self.model.eval()
# Search for the object over multiple different scales
# (smaller and bigger).
instance_size = (self.cfg.instance_size, self.cfg.instance_size)
instances_imgs = [
center_crop_and_resize(img, bbox, instance_size)
for bbox in self.iter_target_centered_scaled_instance_bboxes()]
instances_imgs_tensor = torch.stack(
[pil_to_tensor(img) for img in instances_imgs])
instances_imgs_tensor = instances_imgs_tensor.to(self.device)
instances_features = self.model.extract_visual_features(
instances_imgs_tensor)
responses = self.model.calc_response_map(
self.exemplar_emb, instances_features)
# Remove the channel dimension, as it is just 1.
responses = responses.squeeze(1).cpu().numpy()
# Increase response maps size.
response_size_upscaled = (
self.response_size_upscaled, self.response_size_upscaled)
responses = np.stack(
[cv.resize(r, response_size_upscaled, interpolation=cv.INTER_CUBIC)
for r in responses])
# Penalize scales.
responses[:self.cfg.n_scales // 2] *= self.cfg.scale_penalty
responses[self.cfg.n_scales // 2 + 1:] *= self.cfg.scale_penalty
peak_scale_pos = np.argmax(np.amax(responses, axis=(1, 2)))
peak_scale = self.search_scales[peak_scale_pos]
# Normalize response map so that it sums to one.
response = responses[peak_scale_pos]
response -= response.min()
response /= response.sum() + 1.e-16
response = (1 - self.cfg.cosine_win_influence) * response + \
self.cfg.cosine_win_influence * self.cosine_win
if self.on_instance_img_extract:
self.on_instance_img_extract(
instances_imgs[cast(int, peak_scale_pos)])
if self.on_response_map_calc:
self.on_response_map_calc(response)
# The assumption is that the peak response value is in the center of the
# response map. Thus, we compute the change with respect to the center
# and convert it back to the pixel coordinates in the image.
peak_response_pos = np.asarray(
np.unravel_index(response.argmax(), response.shape))
disp_in_response = peak_response_pos - self.response_size_upscaled // 2
disp_in_instance = disp_in_response * \
(self.cfg.total_stride / self.cfg.response_upscale)
disp_in_image = disp_in_instance * self.curr_instance_side_size * \
(peak_scale / self.cfg.instance_size)
disp_in_image = disp_in_image.round().astype(np.int)
# Update target scale.
new_scale = (1 - self.cfg.scale_damping) * 1.0 + \
(self.cfg.scale_damping * peak_scale)
self.curr_instance_side_size *= new_scale
# Change from [row, col] to [x, y] coordinates.
self.target_bbox.shift(disp_in_image[::-1])
self.target_bbox.rescale(new_scale, new_scale)
return self.target_bbox.as_xywh()
def iter_target_centered_scaled_instance_bboxes(self) -> Iterable[BBox]:
side_size = int(round(self.curr_instance_side_size))
size = np.asarray((side_size, side_size))
bbox = BBox.build_from_center_and_size(self.target_bbox.center, size)
for scale in self.search_scales:
yield bbox.rescale(scale, scale, in_place=False)
|
from __future__ import annotations
from typing import Literal
from prettyqt import core
from prettyqt.qt import QtNetwork
from prettyqt.utils import InvalidParamError, bidict
mod = QtNetwork.QLocalSocket
LOCAL_SOCKET_ERROR = bidict(
connection_refused=mod.LocalSocketError.ConnectionRefusedError,
peer_closed=mod.LocalSocketError.PeerClosedError,
server_not_found=mod.LocalSocketError.ServerNotFoundError,
socket_access=mod.LocalSocketError.SocketAccessError,
socket_resource=mod.LocalSocketError.SocketResourceError,
socket_timeout=mod.LocalSocketError.SocketTimeoutError,
datagram_too_large=mod.LocalSocketError.DatagramTooLargeError,
connection=mod.LocalSocketError.ConnectionError,
unsupported_socket_operation=mod.LocalSocketError.UnsupportedSocketOperationError,
operation=mod.LocalSocketError.OperationError,
unknown_socket=mod.LocalSocketError.UnknownSocketError,
)
LocalSocketErrorStr = Literal[
"connection_refused",
"peer_closed",
"server_not_found",
"socket_access",
"socket_resource",
"socket_timeout",
"datagram_too_large",
"connection",
"unsupported_socket_operation",
"operation",
"unknown_socket",
]
LOCAL_SOCKET_STATE = bidict(
unconnected=mod.LocalSocketState.UnconnectedState,
connecting=mod.LocalSocketState.ConnectingState,
connected=mod.LocalSocketState.ConnectedState,
closing=mod.LocalSocketState.ClosingState,
)
LocalSocketStateStr = Literal[
"unconnected",
"connecting",
"connected",
"closing",
]
QtNetwork.QLocalSocket.__bases__ = (core.IODevice,)
class LocalSocket(QtNetwork.QLocalSocket):
def __bool__(self):
return self.isValid()
def get_error(self) -> LocalSocketErrorStr:
return LOCAL_SOCKET_ERROR.inverse[self.error()]
def get_state(self) -> LocalSocketStateStr:
return LOCAL_SOCKET_STATE.inverse[self.state()]
def connect_to_server(
self,
server_name: str | None = None,
mode: core.iodevice.OpenModeStr = "read_write",
):
if mode not in core.iodevice.OPEN_MODES:
raise InvalidParamError(mode, core.iodevice.OPEN_MODES)
if server_name is not None:
self.connectToServer(server_name, core.iodevice.OPEN_MODES[mode])
else:
self.connectToServer(core.iodevice.OPEN_MODES[mode])
if __name__ == "__main__":
server = LocalSocket()
|
import decimal
import uuid
from datetime import datetime
from flask import request
from flask_restplus import Resource, reqparse
from ..models.mine_expected_document import MineExpectedDocument
from app.extensions import api
from ....utils.access_decorators import requires_role_mine_view, requires_role_mine_create, requires_any_of, MINE_VIEW, MINE_CREATE, MINESPACE_PROPONENT
from ....utils.resources_mixins import UserMixin, ErrorMixin
class ExpectedDocumentResource(Resource, UserMixin, ErrorMixin):
parser = reqparse.RequestParser()
parser.add_argument(
'document', type=dict, required=True, help='document to change', location="json")
@api.doc(
params={
'exp_doc_guid':
'Required: Mine number or guid. returns list of expected documents for the mine'
})
@requires_role_mine_view
def get(self, exp_doc_guid=None):
if exp_doc_guid is None:
return self.create_error_payload(404, 'Must provide a expected document guid.'), 404
mine_exp_doc = MineExpectedDocument.find_by_exp_document_guid(exp_doc_guid)
if mine_exp_doc is None:
return self.create_error_payload(404, 'Expected document not found'), 404
return {'expected_document': mine_exp_doc.json()}
@api.doc(params={'exp_doc_guid': 'Required: Mine number or guid. Updates expected document'})
@requires_any_of([MINE_CREATE, MINESPACE_PROPONENT])
def put(self, exp_doc_guid=None):
if exp_doc_guid is None:
return self.create_error_payload(404, 'Must provide a expected document guid.'), 404
exp_doc = MineExpectedDocument.find_by_exp_document_guid(exp_doc_guid)
if exp_doc is None:
return self.create_error_payload(
404, f'expected_document with guid "{exp_doc_guid}" not found'), 404
data = self.parser.parse_args()
updated_doc = data['document']
if str(exp_doc.exp_document_guid) != updated_doc['exp_document_guid']:
return self.create_error_payload(500, 'exp_document does not match guid provided'), 500
exp_doc.exp_document_name = updated_doc.get('exp_document_name')
exp_doc.exp_document_description = updated_doc.get('exp_document_description')
if updated_doc.get('due_date') is not None:
exp_doc.due_date = updated_doc.get('due_date')
exp_doc.received_date = updated_doc.get('received_date')
exp_doc.exp_document_description = updated_doc.get('exp_document_description')
updated_doc_status = updated_doc.get('exp_document_status')
if updated_doc_status is not None:
updated_doc_status_code = updated_doc_status.get('exp_document_status_code')
if updated_doc_status_code is not None:
exp_doc.exp_document_status_code = updated_doc_status_code
exp_doc.save()
return {'expected_document': exp_doc.json()}
@api.doc(params={'exp_doc_guid': 'Required: Mine number or guid. Deletes expected document.'})
@requires_role_mine_create
def delete(self, exp_doc_guid=None):
if exp_doc_guid is None:
return self.create_error_payload(404, 'Must provide a expected document guid.'), 404
exp_doc = MineExpectedDocument.find_by_exp_document_guid(exp_doc_guid)
if exp_doc is not None:
exp_doc.active_ind = False
exp_doc.save()
return {'status': 200, 'message': 'expected_document deleted successfully.'}
return self.create_error_payload(
404, f'expected_document with guid "{exp_doc_guid}" not found'), 404
|
# -*- coding:utf-8 -*-
# Copyright 2019 Huawei Technologies Co.,Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use
# this file except in compliance with the License. You may obtain a copy of the
# License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
from openstack import connection
# create connection
username = "xxxxxx"
password = "xxxxxx"
userDomainId = "xxxxxxxxxxxxxxxxxxxxxxxxxxxx" # user account ID
auth_url = "xxxxxxxxxxxxxxxxxxxxxxxxxxxx" # endpoint url
if __name__ == '__main__':
conn = connection.Connection(auth_url=auth_url,
user_domain_id=userDomainId,
domain_id=userDomainId,
username=username,
password=password)
data = {
"customerId": "3d2c6b3ab1fd4e26846b0f2c46e67bda",
"regionCode": "cn-north-1",
"cloudServiceTypeCode": "hws.service.type.ebs",
"resourceTypeCode": "hws.resource.type.volume",
"resourceIds": ["71e3eeb5-4b77-44ae-9c42-119ee7976cf7", "39d90d01-4774-4af6-8136-83ba5732bccf"],
"startTimeBegin": "2019-06-01T11:00:00Z",
"startTimeEnd": "2019-06-30T11:00:00Z"
}
'''
A customer can query its pay-per-use resources on the partner sales platform.
The on-demand resource data has a latency, and the latency for each cloud service data varies.
The data obtained using this API is for reference only.
This API can be invoked using the partner AK/SK or token only.
'''
ff = conn.bss.query_customer_resource(userDomainId, **data)
print(ff)
|
# -*- coding: utf-8 -*-
import factory
from django.contrib.gis.geos import Point
from wallet.factory import WalletFactory
from .models import User
class UserFactory(factory.django.DjangoModelFactory):
class Meta:
model = User # Equivalent to ``model = myapp.models.User``
#django_get_or_create = ('username',)
name = factory.Faker('name')
email = factory.Faker('email')
username = factory.Faker('name')
location = Point(-3.7035285, 40.4169473) # Puerta del Sol, Madrid
picture = factory.django.ImageField()
wallet = factory.RelatedFactory(WalletFactory, 'owner')
|
#!/usr/bin/env python
"""
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
import boto3
# this is a tool to generate all the s3 sync commands for mediapackage regions
bucket_base = "rodeolabz"
mediapackage_regions = boto3.session.Session().get_available_regions(service_name='mediapackage')
for region in mediapackage_regions:
print("aws s3 sync . s3://rodeolabz-{region}/speke/ --acl public-read --delete".format(region=region))
|
import inspect
import logging
import numbers
import re
from datetime import date, datetime, timedelta
from functools import partial, wraps
from typing import (
Any,
Callable,
List,
NamedTuple,
Optional,
Sequence,
Tuple,
TypeVar,
Union,
cast,
)
import sentry_sdk
from dateutil.parser import parse as dateutil_parse
import _strptime # NOQA fixes _strptime deferred import issue
from snuba import settings
from snuba.clickhouse.escaping import escape_string
from snuba.query.parsing import ParsingContext
from snuba.query.schema import CONDITION_OPERATORS
from snuba.utils.metrics import MetricsBackend
from snuba.utils.metrics.timer import Timer
from snuba.utils.metrics.types import Tags
logger = logging.getLogger("snuba.util")
T = TypeVar("T")
# example partition name: "('2018-03-13 00:00:00', 90)"
PART_RE = re.compile(r"\('(\d{4}-\d{2}-\d{2})',\s*(\d+)\)")
QUOTED_LITERAL_RE = re.compile(r"^'[\s\S]*'$")
SAFE_FUNCTION_RE = re.compile(r"-?[a-zA-Z_][a-zA-Z0-9_]*$")
def to_list(value: Union[T, List[T]]) -> List[T]:
return value if isinstance(value, list) else [value]
def qualified_column(column_name: str, alias: str = "") -> str:
"""
Returns a column in the form "table.column" if the table is not
empty. If the table is empty it returns the column itself.
"""
return column_name if not alias else f"{alias}.{column_name}"
def parse_datetime(value: str, alignment: int = 1) -> datetime:
dt = dateutil_parse(value, ignoretz=True).replace(microsecond=0)
return dt - timedelta(seconds=(dt - dt.min).seconds % alignment)
# TODO: Fix the type of Tuple concatenation when mypy supports it.
def is_function(column_expr: Any, depth: int = 0) -> Optional[Tuple[Any, ...]]:
"""
Returns a 3-tuple of (name, args, alias) if column_expr is a function,
otherwise None.
A function expression is of the form:
[func, [arg1, arg2]] => func(arg1, arg2)
If a string argument is followed by list arg, the pair of them is assumed
to be a nested function call, with extra args to the outer function afterward.
[func1, [func2, [arg1, arg2], arg3]] => func1(func2(arg1, arg2), arg3)
Although at the top level, there is no outer function call, and the optional
3rd argument is interpreted as an alias for the entire expression.
[func, [arg1], alias] => function(arg1) AS alias
"""
if (
isinstance(column_expr, (tuple, list))
and len(column_expr) >= 2
and isinstance(column_expr[0], str)
and isinstance(column_expr[1], (tuple, list))
and (depth > 0 or len(column_expr) <= 3)
):
assert SAFE_FUNCTION_RE.match(column_expr[0])
if len(column_expr) == 2:
return tuple(column_expr) + (None,)
else:
return tuple(column_expr)
else:
return None
def alias_expr(expr: str, alias: str, parsing_context: ParsingContext) -> str:
"""
Return the correct expression to use in the final SQL. Keeps a cache of
the previously created expressions and aliases, so it knows when it can
subsequently replace a redundant expression with an alias.
1. If the expression and alias are equal, just return that.
2. Otherwise, if the expression is new, add it to the cache and its alias so
it can be reused later and return `expr AS alias`
3. If the expression has been aliased before, return the alias
"""
if expr == alias:
return expr
elif parsing_context.is_alias_present(alias):
return alias
else:
parsing_context.add_alias(alias)
return "({} AS {})".format(expr, alias)
def is_condition(cond_or_list: Sequence[Any]) -> bool:
return (
# A condition is:
# a 3-tuple
len(cond_or_list) == 3
and
# where the middle element is an operator
cond_or_list[1] in CONDITION_OPERATORS
and
# and the first element looks like a column name or expression
isinstance(cond_or_list[0], (str, tuple, list))
)
def columns_in_expr(expr: Any) -> Sequence[str]:
"""
Get the set of columns that are referenced by a single column expression.
Either it is a simple string with the column name, or a nested function
that could reference multiple columns
"""
cols = []
# TODO possibly exclude quoted args to functions as those are
# string literals, not column names.
if isinstance(expr, str):
cols.append(expr.lstrip("-"))
elif (
isinstance(expr, (list, tuple))
and len(expr) >= 2
and isinstance(expr[1], (list, tuple))
):
for func_arg in expr[1]:
cols.extend(columns_in_expr(func_arg))
return cols
def tuplify(nested: Any) -> Any:
if isinstance(nested, (list, tuple)):
return tuple(tuplify(child) for child in nested)
return nested
def escape_literal(
value: Optional[Union[str, datetime, date, List[Any], Tuple[Any], numbers.Number]]
) -> str:
"""
Escape a literal value for use in a SQL clause.
"""
if isinstance(value, str):
return escape_string(value)
elif isinstance(value, datetime):
value = value.replace(tzinfo=None, microsecond=0)
return "toDateTime('{}', 'Universal')".format(value.isoformat())
elif isinstance(value, date):
return "toDate('{}', 'Universal')".format(value.isoformat())
elif isinstance(value, (list, tuple)):
return "({})".format(", ".join(escape_literal(v) for v in value))
elif isinstance(value, numbers.Number):
return str(value)
elif value is None:
return ""
else:
raise ValueError("Do not know how to escape {} for SQL".format(type(value)))
def time_request(name):
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
kwargs["timer"] = Timer(name)
return func(*args, **kwargs)
return wrapper
return decorator
class Part(NamedTuple):
date: datetime
retention_days: int
def decode_part_str(part_str: str) -> Part:
match = PART_RE.match(part_str)
if not match:
raise ValueError("Unknown part name/format: " + str(part_str))
date_str, retention_days = match.groups()
date = datetime.strptime(date_str, "%Y-%m-%d")
return Part(date, int(retention_days))
def force_bytes(s: Union[bytes, str]) -> bytes:
if isinstance(s, bytes):
return s
elif isinstance(s, str):
return s.encode("utf-8", "replace")
else:
raise TypeError(f"cannot convert {type(s).__name__} to bytes")
def create_metrics(prefix: str, tags: Optional[Tags] = None) -> MetricsBackend:
"""Create a DogStatsd object if DOGSTATSD_HOST and DOGSTATSD_PORT are defined,
with the specified prefix and tags. Return a DummyMetricsBackend otherwise.
Prefixes must start with `snuba.<category>`, for example: `snuba.processor`.
"""
host = settings.DOGSTATSD_HOST
port = settings.DOGSTATSD_PORT
if host is None and port is None:
from snuba.utils.metrics.backends.dummy import DummyMetricsBackend
return DummyMetricsBackend()
elif host is None or port is None:
raise ValueError(
f"DOGSTATSD_HOST and DOGSTATSD_PORT should both be None or not None. Found DOGSTATSD_HOST: {host}, DOGSTATSD_PORT: {port} instead."
)
from datadog import DogStatsd
from snuba.utils.metrics.backends.datadog import DatadogMetricsBackend
return DatadogMetricsBackend(
partial(
DogStatsd,
host=host,
port=port,
namespace=prefix,
constant_tags=[f"{key}:{value}" for key, value in tags.items()]
if tags is not None
else None,
),
)
F = TypeVar("F", bound=Callable[..., Any])
def with_span(op: str = "function") -> Callable[[F], F]:
""" Wraps a function call in a Sentry AM span
"""
def decorator(func: F) -> F:
frame_info = inspect.stack()[1]
filename = frame_info.filename
@wraps(func)
def wrapper(*args, **kwargs) -> Any:
with sentry_sdk.start_span(description=func.__name__, op=op) as span:
span.set_data("filename", filename)
return func(*args, **kwargs)
return cast(F, wrapper)
return decorator
|
from flask import Flask, render_template, request, flash, redirect, url_for,\
jsonify, send_from_directory, Blueprint, abort, current_app as app
from flask_mail import Mail, Message
from flask_login import logout_user, login_user, current_user, login_required
from app.models.user import User, VerifyEmailRequest, RemoteSourceUser
from database.db_adapter import db
from app.http.middleware.decorators import validate_request
from werkzeug.security import generate_password_hash
from wtforms import Form, BooleanField, StringField, PasswordField, validators
import hashlib
from app.http.middleware.generators import generate_hash
blueprint = Blueprint('register', __name__)
CLIENT_ID = "352888396080-mcvh515uocehs0l517bh2qfrrutq04ul.apps.googleusercontent.com"
@blueprint.route('/', methods=["GET", "POST"])
def register():
def post():
try:
password = generate_password_hash(request.form.get('password'))
email = request.form.get("email")
name = request.form.get("name")
# Check if an account with the given credentials already exists
if (User.query.filter(User.email == email).first()):
flash('Sorry, that email is already in use', "danger")
else:
next = create_user(name, email,
profile_image_url=None, password=password)
return redirect(next)
except Exception as e:
print(e)
db.rollback()
flash("There was an error processing your request", "danger")
return render_template("register/register.html")
if request.method == 'POST':
# redirect to the url post returns
return post()
# else the request is GET
else:
return render_template("register/register.html")
@blueprint.route('/verify', methods=["GET", "POST"])
@login_required
def verify_user():
def post():
from app.http.controllers.mail_senders import send_verify_email
send_verify_email()
return render_template("register/email_resent.html")
if request.method == 'POST':
return post()
else:
return render_template("register/verify.html")
@blueprint.route('/activate', methods=["GET"])
def activate_user():
try:
token = request.args.get("token")
# check if the token matches the database
verify_obj = VerifyEmailRequest.query.filter(VerifyEmailRequest.token == hashlib.sha256(token).hexdigest()).first()
if (verify_obj is not None):
# get the related user
user = User.query.filter(User.id == verify_obj.user_id).first()
if (user.verified):
abort(404)
user.verified = True
verify_obj.completed = True
db.commit()
return render_template("register/successfully_verified.html")
# else the user is not authorized
else:
db.rollback()
abort(404)
except Exception:
abort(404)
@blueprint.route("/terms", methods=["GET"])
def show_terms():
return render_template("policies/terms.html")
@blueprint.route("/policy", methods=["GET"])
def show_policy():
return render_template("policies/policy.html")
@blueprint.route('/social', methods=["GET", "POST"])
def register_social_account():
def post():
google_id = request.form.get("google-id")
if google_id:
google_id = check_id_integrity(google_id)
if not google_id:
flash("Invalid token, has someone been tampering?", "danger")
return redirect(url_for("login.login"))
facebook_id = request.form.get("fb-id")
email = request.form.get("social-email")
name = request.form.get("social-name")
image_url = request.form.get("social-image")
if email is None:
return render_template("register/register_email_only.html",
google_id=google_id,
facebook_id=facebook_id,
name=name,
image_url=image_url)
else:
# check if the email is already taken
user = User.query.filter(User.email == email).first()
# if it does log them in
if user:
login_user(user)
print("User exists {}".format(user.remote_user))
# check if this user has used a social login before,
# try to merge accounts
# check which id was supplied
if user.remote_user.google_id == google_id:
return redirect(check_current_user_level())
elif user.remote_user.fb_id == facebook_id:
return redirect(check_current_user_level())
elif google_id:
user.remote_user.google_id = google_id
db.commit()
else:
user.remote_user.fb_id = facebook_id
db.commit()
# if the user has not verified their account redirect
# them to the verification portal
return redirect(url_for("register.verify_user"))
# user is logged in with this function
next = create_user(name, email, image_url)
print(next)
print("Next finished")
create_remote_user(google_id, facebook_id)
return redirect(next)
if request.method == 'POST':
return post()
# Check google id integrity with google api
def check_id_integrity(token):
from oauth2client import client, crypt
try:
idinfo = client.verify_id_token(token, CLIENT_ID)
if idinfo['iss'] not in ['accounts.google.com', 'https://accounts.google.com']:
raise crypt.AppIdentityError("Wrong issuer.")
except crypt.AppIdentityError:
return False
# Invalid token
userid = idinfo['sub']
return userid
def check_current_user_level():
if (current_user.is_active):
if current_user.access_level >= 2:
return url_for("dashboard.dashboard")
else:
return url_for("home.account")
else:
print("Attempting to send email")
from app.http.controllers.mail_senders import send_verify_email
send_verify_email(current_user)
return url_for("register.verify_user")
def create_remote_user(google_id, facebook_id):
# hence current_user can be accessed afterwards
if google_id:
remote_user = RemoteSourceUser(user_id=current_user.id,
google_id=google_id)
else:
remote_user = RemoteSourceUser(user_id=current_user.id,
fb_id=facebook_id)
try:
db.add(remote_user)
db.commit()
except Exception:
db.rollback()
@blueprint.route('/social/email', methods=["GET", "POST"])
def missing_email():
def post():
name = request.form.get("name")
email = request.form.get("email")
image_url = request.form.get("social-image")
google_id = request.form.get("google-id")
facebook_id = request.form.get("fb-id")
# user is logged in with this function
next = create_user(name, email, image_url)
# hence current_user can be accessed afterwards
if google_id:
remote_user = RemoteSourceUser(user_id=current_user.id,
google_id=google_id)
else:
remote_user = RemoteSourceUser(user_id=current_user.id,
fb_id=facebook_id)
try:
db.add(remote_user)
db.commit()
except Exception:
db.rollback()
return redirect(next)
if request.method == 'POST':
return post()
def create_user(name, email, profile_image_url=None, password=""):
if profile_image_url is None:
profile_image_url = url_for('static', filename='images/default_logo.png')
user = User(name=name,
email=email,
password=password,
profile_image_url=profile_image_url)
# save the new user
db.add(user)
db.commit()
# check if there is a user logged in, if so log them out
if (current_user):
logout_user()
# login the current user so that we have a handle on the object
login_user(user)
print("Attempting to send email")
from app.http.controllers.mail_senders import send_verify_email
send_verify_email(user)
return url_for("register.verify_user")
|
import json
import random
import logging
from django.views import View
from django.http import HttpResponse, JsonResponse
from users import models
from verifications import contains
from utils.res_code.res_code import Code
from utils.captcha.captcha import captcha
from verifications.forms import SmsCodeForm
from celery_tasks.sms.tasks import send_sms_code
from utils.plugins.error_str import error_message
from utils.res_code.json_function import to_json_data
from utils.plugins.captcha import ImageCaptcha, MobileCaptcha
logger = logging.getLogger("django")
class ImageCodesView(View):
"""
image_code view
"""
def get(self, request, image_code_id): # 从前端获取到传过来的参数uuid
verify_text, verify_image = captcha.generate_captcha() # 生成验证码文本和图片
print("前端传来存储的:{}".format(image_code_id))
# 链接redis步骤 将生成的验证码文本和图片保存到redis中
image_code = ImageCaptcha(image_code_id, verify_text, alias="verify_code",
expire=contains.IMAGE_CODE_EXPIRE_TIME)
image_code.captcha_cache()
logger.info('verify_text:{}'.format(verify_text)) # 后台显示验证码信息
return HttpResponse(content=verify_image, content_type='image/jpg') # 把验证码和图片返回到前端
class CheckUsernameView(View):
"""
create username verify view
# 1. 创建一个类
request: GET
params: username
"""
# 2. 创建个get方法来处理逻辑
def get(self, request, username):
# 3. 从数据库中查看是否存在该用户
data = {
"username": username,
"count": models.Users.objects.filter(username=username).count() # 获取数据库中有几条这个信息:无则0
}
# 4. 返回到前端
return JsonResponse({"data": data})
class CheckMobileView(View):
"""
create mobile verify view
# 1.创建一个类
request: GET
params: mobile
"""
# 2. 创建个get方法来处理逻辑
def get(self, request, mobile):
# 3. 从数据库中查看是否存在该用户
data = {
"mobile": mobile,
"count": models.Users.objects.filter(mobile=mobile).count()
}
# 5. 返回到前端
return JsonResponse({"data": data})
class SmsCodeView(View):
"""
# 1. 创建一个SmsCodeView类
param: mobile、image_text、image_code_id
"""
# 2. 创建一个post方法用来处理逻辑
def post(self, request):
# 3. 获取前端传来的数据
json_data = request.body
# 4. 将数据转化为字典
dict_data = json.loads(json_data)
# 5. 将数据传递给SmsCodeForm表单进行校验
form = SmsCodeForm(data=dict_data)
# 6. 校验成功处理方式
if form.is_valid():
# 7. 获取校验后的数据
mobile = form.cleaned_data.get("mobile")
# 8. 生成短信验证码
sms_text = "%06d" % random.randint(0, 999999)
# 9. 将短信验证码和和过期时间保存到redis中
mobile_captcha = MobileCaptcha(mobile, sms_text, alias="verify_code", expire=contains.SMS_CODE_EXPIRE_TIME,
re_expire=contains.SMS_REPEAT_EXPIRE_TIME)
mobile_captcha.captcha_cache()
# 使用celery异步处理短信发动任务
print(sms_text)
send_sms_code.delay(mobile, sms_text, contains.SMS_CODE_EXPIRE_TIME, contains.SMS_TEMPLATE)
return to_json_data(errno=Code.OK, errmsg="短信验证码发送成功")
# 校验未通过
else:
err_info = error_message(form)
return to_json_data(errno=Code.PARAMERR, errmsg=err_info)
|
class Solution:
# @param A : integer
# @return a list of list of integers
def prettyPrint(self, A):
arr = []
for i in range(0, A):
new_arr = []
c = A
flag = 0
for j in range(0, 2*A - 1):
new_arr.append(c)
if i == 2*A-j-2:
flag = 1
if flag == 0 and c > A-i:
c -= 1
if flag == 1:
c+= 1
arr.append(new_arr)
for i in range(len(arr)-2, -1, -1):
arr.append(arr[i])
return arr |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
from . import outputs
from ._inputs import *
__all__ = ['TopicPermissionsArgs', 'TopicPermissions']
@pulumi.input_type
class TopicPermissionsArgs:
def __init__(__self__, *,
permissions: pulumi.Input[Sequence[pulumi.Input['TopicPermissionsPermissionArgs']]],
user: pulumi.Input[str],
vhost: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a TopicPermissions resource.
:param pulumi.Input[Sequence[pulumi.Input['TopicPermissionsPermissionArgs']]] permissions: The settings of the permissions. The structure is
described below.
:param pulumi.Input[str] user: The user to apply the permissions to.
:param pulumi.Input[str] vhost: The vhost to create the resource in.
"""
pulumi.set(__self__, "permissions", permissions)
pulumi.set(__self__, "user", user)
if vhost is not None:
pulumi.set(__self__, "vhost", vhost)
@property
@pulumi.getter
def permissions(self) -> pulumi.Input[Sequence[pulumi.Input['TopicPermissionsPermissionArgs']]]:
"""
The settings of the permissions. The structure is
described below.
"""
return pulumi.get(self, "permissions")
@permissions.setter
def permissions(self, value: pulumi.Input[Sequence[pulumi.Input['TopicPermissionsPermissionArgs']]]):
pulumi.set(self, "permissions", value)
@property
@pulumi.getter
def user(self) -> pulumi.Input[str]:
"""
The user to apply the permissions to.
"""
return pulumi.get(self, "user")
@user.setter
def user(self, value: pulumi.Input[str]):
pulumi.set(self, "user", value)
@property
@pulumi.getter
def vhost(self) -> Optional[pulumi.Input[str]]:
"""
The vhost to create the resource in.
"""
return pulumi.get(self, "vhost")
@vhost.setter
def vhost(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "vhost", value)
@pulumi.input_type
class _TopicPermissionsState:
def __init__(__self__, *,
permissions: Optional[pulumi.Input[Sequence[pulumi.Input['TopicPermissionsPermissionArgs']]]] = None,
user: Optional[pulumi.Input[str]] = None,
vhost: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering TopicPermissions resources.
:param pulumi.Input[Sequence[pulumi.Input['TopicPermissionsPermissionArgs']]] permissions: The settings of the permissions. The structure is
described below.
:param pulumi.Input[str] user: The user to apply the permissions to.
:param pulumi.Input[str] vhost: The vhost to create the resource in.
"""
if permissions is not None:
pulumi.set(__self__, "permissions", permissions)
if user is not None:
pulumi.set(__self__, "user", user)
if vhost is not None:
pulumi.set(__self__, "vhost", vhost)
@property
@pulumi.getter
def permissions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['TopicPermissionsPermissionArgs']]]]:
"""
The settings of the permissions. The structure is
described below.
"""
return pulumi.get(self, "permissions")
@permissions.setter
def permissions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['TopicPermissionsPermissionArgs']]]]):
pulumi.set(self, "permissions", value)
@property
@pulumi.getter
def user(self) -> Optional[pulumi.Input[str]]:
"""
The user to apply the permissions to.
"""
return pulumi.get(self, "user")
@user.setter
def user(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "user", value)
@property
@pulumi.getter
def vhost(self) -> Optional[pulumi.Input[str]]:
"""
The vhost to create the resource in.
"""
return pulumi.get(self, "vhost")
@vhost.setter
def vhost(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "vhost", value)
class TopicPermissions(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
permissions: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['TopicPermissionsPermissionArgs']]]]] = None,
user: Optional[pulumi.Input[str]] = None,
vhost: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
The ``TopicPermissions`` resource creates and manages a user's set of
topic permissions.
## Example Usage
```python
import pulumi
import pulumi_rabbitmq as rabbitmq
test_v_host = rabbitmq.VHost("testVHost")
test_user = rabbitmq.User("testUser",
password="foobar",
tags=["administrator"])
test_topic_permissions = rabbitmq.TopicPermissions("testTopicPermissions",
permissions=[rabbitmq.TopicPermissionsPermissionArgs(
exchange="amq.topic",
read=".*",
write=".*",
)],
user=test_user.name,
vhost=test_v_host.name)
```
## Import
Permissions can be imported using the `id` which is composed of
`user@vhost`. E.g.
```sh
$ pulumi import rabbitmq:index/topicPermissions:TopicPermissions test user@vhost
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['TopicPermissionsPermissionArgs']]]] permissions: The settings of the permissions. The structure is
described below.
:param pulumi.Input[str] user: The user to apply the permissions to.
:param pulumi.Input[str] vhost: The vhost to create the resource in.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: TopicPermissionsArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
The ``TopicPermissions`` resource creates and manages a user's set of
topic permissions.
## Example Usage
```python
import pulumi
import pulumi_rabbitmq as rabbitmq
test_v_host = rabbitmq.VHost("testVHost")
test_user = rabbitmq.User("testUser",
password="foobar",
tags=["administrator"])
test_topic_permissions = rabbitmq.TopicPermissions("testTopicPermissions",
permissions=[rabbitmq.TopicPermissionsPermissionArgs(
exchange="amq.topic",
read=".*",
write=".*",
)],
user=test_user.name,
vhost=test_v_host.name)
```
## Import
Permissions can be imported using the `id` which is composed of
`user@vhost`. E.g.
```sh
$ pulumi import rabbitmq:index/topicPermissions:TopicPermissions test user@vhost
```
:param str resource_name: The name of the resource.
:param TopicPermissionsArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(TopicPermissionsArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
permissions: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['TopicPermissionsPermissionArgs']]]]] = None,
user: Optional[pulumi.Input[str]] = None,
vhost: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = TopicPermissionsArgs.__new__(TopicPermissionsArgs)
if permissions is None and not opts.urn:
raise TypeError("Missing required property 'permissions'")
__props__.__dict__["permissions"] = permissions
if user is None and not opts.urn:
raise TypeError("Missing required property 'user'")
__props__.__dict__["user"] = user
__props__.__dict__["vhost"] = vhost
super(TopicPermissions, __self__).__init__(
'rabbitmq:index/topicPermissions:TopicPermissions',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
permissions: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['TopicPermissionsPermissionArgs']]]]] = None,
user: Optional[pulumi.Input[str]] = None,
vhost: Optional[pulumi.Input[str]] = None) -> 'TopicPermissions':
"""
Get an existing TopicPermissions resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['TopicPermissionsPermissionArgs']]]] permissions: The settings of the permissions. The structure is
described below.
:param pulumi.Input[str] user: The user to apply the permissions to.
:param pulumi.Input[str] vhost: The vhost to create the resource in.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _TopicPermissionsState.__new__(_TopicPermissionsState)
__props__.__dict__["permissions"] = permissions
__props__.__dict__["user"] = user
__props__.__dict__["vhost"] = vhost
return TopicPermissions(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def permissions(self) -> pulumi.Output[Sequence['outputs.TopicPermissionsPermission']]:
"""
The settings of the permissions. The structure is
described below.
"""
return pulumi.get(self, "permissions")
@property
@pulumi.getter
def user(self) -> pulumi.Output[str]:
"""
The user to apply the permissions to.
"""
return pulumi.get(self, "user")
@property
@pulumi.getter
def vhost(self) -> pulumi.Output[Optional[str]]:
"""
The vhost to create the resource in.
"""
return pulumi.get(self, "vhost")
|
# -*- coding: utf-8 -*-
# Settings for running the test application testcases
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from future import standard_library
standard_library.install_aliases()
from builtins import *
import os
import sys
DEBUG = True
TEMPLATE_DEBUG = DEBUG
PROJECT_ROOT = os.path.normpath(os.path.dirname(__file__))
sys.path.insert(0, os.path.join(PROJECT_ROOT, '..'))
ADMINS = (
# ('Your Name', '[email protected]'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'personnel', # Or path to database file if using sqlite3.
'USER': 'personnel', # Not used with sqlite3.
'PASSWORD': 'personnel', # Not used with sqlite3.
'PORT': '5432',
'HOST': 'localhost',
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'Europe/Helsinki'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = PROJECT_ROOT + '/media/'
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = '/media/'
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/media/admin/'
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'MNKopiqeu0r9hIACS()TQ#öphwssadkjbfuasdjco+e33q2ja7(YHOLA'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'personnel.urls'
'''
TEMPLATE_DIRS = (
PROJECT_ROOT + "/personnel/templates/"
)
'''
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
'DIRS': [
os.path.join(PROJECT_ROOT, '/personnel/templates/'),
],
'OPTIONS': {
# 'debug': DEBUG,
'context_processors':
(
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
# not needed? 'django.template.context_processors.static',
# not needed? 'django.template.context_processors.tz',
# not needed? 'django.template.context_processors.csrf',
'django.contrib.messages.context_processors.messages',
'ws.dws.context_processors.django_conf_settings',
)
}
},
]
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.admin',
'personnel',
'transhistory',
'cli_query', # for querying objects from command line
'django_nose', # testing nose test runner
)
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
TEMPLATE_CONTEXT_PROCESSORS = (
'django.core.context_processors.auth',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.request',
)
LOGIN_URL = "/login"
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'handlers': {
'postgres':{
'level':'DEBUG',
'class':'transhistory.db_log_util.PostgresLogHandler',
'formatter': 'simple'
},
'console':{
'level':'DEBUG',
'class':'logging.StreamHandler',
'formatter': 'simple'
},
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
'personnel': {
'handlers': ['console', 'postgres'], # change in real use
'level': 'DEBUG',
'propagate': True,
},
'transhistory': {
'handlers': ['console', 'postgres'], # change in real use
'level': 'DEBUG',
'propagate': True,
},
}
}
|
from typing import Any
import tornado.websocket
import tornado.web
import tornado.ioloop
import time
import random
import threading
import asyncio
from tornado import httputil
class MyWebSocketHandler(tornado.websocket.WebSocketHandler):
connect_users = set()
def __init__(self, application: tornado.web.Application, request: httputil.HTTPServerRequest, **kwargs: Any):
super().__init__(application, request, **kwargs)
self.byteType = type(b"")
def check_origin(self, origin: str):
'''重写同源检查 解决跨域问题'''
return True
def open(self):
print("WebSocket opened")
# 打开连接时将用户保存到connect_users中
self.connect_users.add(self)
def solveMessage(self, message):
print(message)
def on_message(self, message):
messageType = type(message)
if messageType != self.byteType:
self.solveMessage(message)
return
lenmessage = len(message)
global stream
stream.write(message)
def on_close(self):
print("WebSocket closed")
# 关闭连接时将用户从connect_users中移除
self.connect_users.remove(self)
@classmethod
def send_demand_updates(cls, message):
# 使用@classmethod可以使类方法在调用的时候不用进行实例化
# 给所有用户推送消息(此处可以根据需要,修改为给指定用户进行推送消息)
for user in cls.connect_users:
user.write_message(message)
class Application(tornado.web.Application):
def __init__(self):
handlers = [
(r'/ws', MyWebSocketHandler)
]
tornado.web.Application.__init__(self, handlers)
print("websocket listening")
import tornado.platform.asyncio
import pyaudio
p = pyaudio.PyAudio()
stream = p.open(format=p.get_format_from_width(2), channels=1,
rate=44100, output=True)
# asyncio.set_event_loop_policy(tornado.platform.asyncio.AnyThreadEventLoopPolicy())
app = Application()
app.listen(20482)
tornado.ioloop.IOLoop.current().start()
|
# terrascript/chef/r.py
import terrascript
class chef_acl(terrascript.Resource):
pass
class chef_client(terrascript.Resource):
pass
class chef_cookbook(terrascript.Resource):
pass
class chef_data_bag(terrascript.Resource):
pass
class chef_data_bag_item(terrascript.Resource):
pass
class chef_environment(terrascript.Resource):
pass
class chef_node(terrascript.Resource):
pass
class chef_role(terrascript.Resource):
pass
|
testes = int(input())
for i in range(0, testes):
num = input()
E = int(num.split()[0])
D = int(num.split()[1])
if E == D and E == 1:
print('coma')
else:
print('pense')
|
import Agent
import Location
import random
import re
import time
import streamlit as st
class ReadConfiguration():
def __init__(self):
self.worlds=None
self.time_steps=None
self.starting_exposed_percentage=None
self.agent_info_keys=None
self.interaction_info_keys=None
self.f = open('config.txt','r')
self.worlds=(int)(self.get_value())
self.time_steps=(int)(self.get_value())
self.agent_info_keys=self.get_value()
self.agents_filename=self.get_value()
self.interaction_info_keys=self.get_value()
self.interactions_files_list=self.get_value()
self.location_info_keys=self.get_value()
self.locations_filename=self.get_value()
self.event_info_keys=self.get_value()
self.events_files_list=self.get_value()
if 'Agent Index' not in self.agent_info_keys.split(':'):
print("Error! Agent file does not contain parameter \'Agent Index\'")
return None
if 'Agent Index' not in self.interaction_info_keys.split(':'):
print("Interaction definition does not contain parameter \'Agent Index\'")
if 'Interacting Agent Index' not in self.interaction_info_keys.split(':'):
print("Interaction definition does not contain parameter \'Interacting Agent Index\'")
if 'Location Index' not in self.location_info_keys.split(':'):
print('Location file does not contain parameter \'Location Index\'')
if 'Location Index' not in self.event_info_keys.split(':'):
print('Event definition does not contain parameter \'Location Index\'')
if 'Agents' not in self.event_info_keys.split(':'):
print('Event definition does not contain parameter \'Agents\'')
def get_value(self):
line=self.f.readline()
l = re.findall("\<.*?\>", line)
if len(l)!=1:
print("Error! Invalid entry in config.txt")
return None
value=(((l[0])[1:])[:-1])
return value
class ReadAgents():
def __init__(self,filename,config_obj):
f=open(filename,'r')
self.n=int(self.get_value(f.readline()))
agent_info_keys=self.get_value(f.readline())
if agent_info_keys != config_obj.agent_info_keys:
print("Error! Agent Information parameters donot match the config.txt file")
return None
self.parameter_keys=agent_info_keys.split(':')
self.agents={}
for i in range(self.n):
info_dict=self.create_info_dict(self.get_value(f.readline()).split(':'))
state=None#config_obj.default_state
agent=Agent.Agent(state,info_dict)
self.agents[agent.index]=agent
def create_info_dict(self,info_list):
info_dict={}
for i,key in enumerate(self.parameter_keys):
info_dict[key]=info_list[i]
return info_dict
def get_value(self,line):
if line.endswith('\n'):
line=line[:-1]
return line
class ReadFilesList():
def __init__(self,filename):
self.file_list=[]
f=open(filename,'r')
lines=f.readlines()
separator=' '
text=separator.join(lines)
l = re.findall("\<.*?\>", text)
for filename in l:
self.file_list.append(((filename)[1:])[:-1])
class ReadInteractions():
def __init__(self,filename,config_obj,agents_obj):
self.config_obj=config_obj
self.agents_obj=agents_obj
if filename=="" or filename==None:
return
f=open(filename,'r')
self.no_interactions=int(self.get_value(f.readline()))
interaction_info_keys=self.get_value(f.readline())
if interaction_info_keys != config_obj.interaction_info_keys:
print("Error! Interaction parameters donot match the config.txt file")
return None
self.parameter_keys=interaction_info_keys.split(':')
for i in range(self.no_interactions):
parameter_list=(self.get_value(f.readline())).split(':')
agent_index,info_dict=self.get_interaction(parameter_list)
agents_obj.agents[agent_index].add_contact(info_dict)
def get_interaction(self,parameter_list):
info_dict={}
agent_index=None
contact_agent_index=None
for i,key in enumerate(self.parameter_keys):
if key=='Agent Index':
agent_index=parameter_list[i]
info_dict[key]=parameter_list[i]
return agent_index,info_dict
def get_value(self,line):
if line.endswith('\n'):
line=line[:-1]
return line
class ReadLocations():
def __init__(self,filename,config_obj):
self.config_obj=config_obj
self.locations={}
if filename=="" or filename==None:
return
f=open(filename,'r')
self.no_locations=int(self.get_value(f.readline()))
location_info_keys=self.get_value(f.readline())
if location_info_keys != config_obj.location_info_keys:
print("Error! Location parameters donot match the config.txt file")
return None
self.parameter_keys=location_info_keys.split(':')
for i in range(self.no_locations):
info_dict=self.create_info_dict(self.get_value(f.readline()).split(':'))
location=Location.Location(info_dict)
self.locations[location.index]=location
def create_info_dict(self,info_list):
info_dict={}
for i,key in enumerate(self.parameter_keys):
info_dict[key]=info_list[i]
return info_dict
def get_value(self,line):
if line.endswith('\n'):
line=line[:-1]
return line
class ReadEvents():
def __init__(self,filename,config_obj,locations_obj):
self.config_obj=config_obj
self.locations_obj=locations_obj
if filename=="" or filename==None:
return
f=open(filename,'r')
self.no_events=int(self.get_value(f.readline()))
event_info_keys=self.get_value(f.readline())
if event_info_keys != config_obj.event_info_keys:
print("Error! Event parameters donot match the config.txt file")
return None
self.parameter_keys=event_info_keys.split(':')
for i in range(self.no_events):
parameter_list=(self.get_value(f.readline())).split(':')
location_index,info_dict=self.get_event(parameter_list)
self.locations_obj.locations[location_index].add_event(info_dict)
def get_event(self,parameter_list):
info_dict={}
location_index=None
for i,key in enumerate(self.parameter_keys):
if key=='Location Index':
location_index=parameter_list[i]
if key=='Agents':
info_dict[key]=list(set(parameter_list[i].split(',')))
if info_dict[key][-1]=='':
info_dict[key]=info_dict[:-1]
else:
info_dict[key]=parameter_list[i]
if location_index==None:
print("Error! No event to read")
return location_index,info_dict
def get_value(self,line):
if line.endswith('\n'):
line=line[:-1]
return line
|
from __future__ import annotations
from typing import List, Union, TextIO, Any
from io import StringIO
from ml.utils.ansi import ANSI
from ml.utils.printer import Printer
from .ast import (
Pattern,
BaseAST,
Axiom,
Axiom,
Sort,
SortInstance,
SortDefinition,
SymbolDefinition,
Variable,
Module,
Definition,
AliasDefinition,
Application,
MLPattern,
SortVariable,
ImportStatement,
SymbolInstance,
StringLiteral,
KoreVisitor,
)
class PrettyPrinter(Printer, KoreVisitor[BaseAST[Any], None]):
COLOR_KEYWORD = ANSI.in_blue
COLOR_SYMBOL_INSTANCE = ANSI.in_green
COLOR_STRING_LITERAL = ANSI.in_magenta
COLOR_ML_CONSTRUCT = ANSI.in_bright_cyan
COLOR_SORT_VARIABLE = lambda x: x
COLOR_VARIABLE = lambda x: x
def __init__(
self,
output: TextIO,
tab: str = ANSI.in_gray("|") + " " if ANSI.supports_color() else " ",
limit: int = 80, # if the encoded version exceeds this limit length, try to print arguments on a new line
demangle: bool = True, # demangle kore labels
compact: bool = False, # force compact format
skip_empty_sorts: bool = True, # skip empty sort arguments
):
super().__init__(output, tab)
self.limit = limit
self.demangle = demangle
self.compact = compact
self.skip_empty_sorts = skip_empty_sorts
@staticmethod
def encode(output: TextIO, ast: BaseAST[Any], *args: Any, **kwargs: Any) -> None:
printer = PrettyPrinter(output, *args, **kwargs)
printer.visit(ast)
printer.flush()
@staticmethod
def encode_string(ast: BaseAST[Any], *args: Any, **kwargs: Any) -> str:
stream = StringIO()
PrettyPrinter.encode(stream, ast, *args, **kwargs)
return stream.getvalue()
"""
Keys are all four bytes
https://github.com/kframework/k/blob/66162c38b4eded58eac3cbfbc58d0ac08a96fc26/kore/src/main/scala/org/kframework/parser/kore/parser/KoreToK.scala
"""
DEMANGLE_LABEL_MAP = {
r"Spce": " ",
r"Bang": "!",
r"Quot": "\"",
r"Hash": "#",
r"Dolr": "$",
r"Perc": "%",
r"And-": "&",
r"Apos": "'",
r"LPar": "(",
r"RPar": ")",
r"Star": "*",
r"Plus": "+",
r"Comm": ",",
r"Hyph": "-",
r"Stop": ".",
r"Slsh": "/",
r"Coln": ":",
r"SCln": ";",
r"-LT-": "<",
r"Eqls": "=",
r"-GT-": ">",
r"Ques": "?",
r"-AT-": "@",
r"LSqB": "[",
r"RSqB": "]",
r"Bash": "\\",
r"Xor-": "^",
r"Unds": "_",
r"BQuo": "`",
r"LBra": "{",
r"Pipe": "|",
r"RBra": "}",
r"Tild": "~",
}
@staticmethod
def demangle_label(label: str) -> str:
"""
Demangle the label format generated by K
https://github.com/kframework/k/blob/66162c38b4eded58eac3cbfbc58d0ac08a96fc26/kore/src/main/scala/org/kframework/parser/kore/parser/KoreToK.scala#L81
"""
result = ""
is_literal = False
i = 0
while i < len(label):
if label[i] == "'":
is_literal = not is_literal
i += 1
elif is_literal and i + 4 <= len(label):
code = label[i:i + 4]
symbol = PrettyPrinter.DEMANGLE_LABEL_MAP.get(code, code)
result += symbol
i += 4
else:
result += label[i]
i += 1
return result[3:] if result.startswith("Lbl") else result
def write_sort_arguments(self, sorts: Union[List[Sort], List[SortVariable]]) -> None:
if self.skip_empty_sorts and len(sorts) == 0:
return
self.write("{")
for i, sort in enumerate(sorts):
if i != 0:
self.write(", ")
self.visit(sort)
self.write("}")
def postvisit_default(self, x: Any, *args: Any) -> None:
raise NotImplementedError()
def postvisit_definition(self, definition: Definition, *args: Any) -> None:
for i, module in enumerate(definition.module_map.values()):
if i != 0:
self.write("\n\n")
self.visit(module)
def postvisit_module(self, module: Module, *args: Any) -> None:
self.write(PrettyPrinter.COLOR_KEYWORD("module") + f" {module.name}\n")
with self.indentation():
for axiom in module.all_sentences:
self.visit(axiom)
self.write("\n")
self.write(PrettyPrinter.COLOR_KEYWORD("endmodule"))
def postvisit_import_statement(self, import_stmt: ImportStatement, *args: Any) -> None:
self.write(PrettyPrinter.COLOR_KEYWORD("import") + f" {import_stmt.get_module_name()}")
def postvisit_sort_definition(self, sort_definition: SortDefinition, *args: Any) -> None:
self.write(PrettyPrinter.COLOR_KEYWORD("sort") + " ")
self.write(sort_definition.sort_id)
self.write_sort_arguments(sort_definition.sort_variables)
def postvisit_sort_instance(self, sort_instance: SortInstance, *args: Any) -> None:
self.write(sort_instance.get_sort_id())
self.write_sort_arguments(sort_instance.arguments)
def postvisit_sort_variable(self, sort_variable: SortVariable) -> None:
self.write(PrettyPrinter.COLOR_SORT_VARIABLE(sort_variable.name)) # type: ignore
def postvisit_symbol_definition(self, definition: SymbolDefinition, *args: Any) -> None:
self.write(PrettyPrinter.COLOR_KEYWORD("symbol") + " ")
symbol_name = definition.symbol
if self.demangle:
symbol_name = PrettyPrinter.demangle_label(symbol_name)
self.write(symbol_name)
self.write_sort_arguments(definition.sort_variables)
self.write("(")
for i, sort in enumerate(definition.input_sorts):
if i != 0:
self.write(", ")
self.visit(sort)
self.write("): ")
self.visit(definition.output_sort)
def postvisit_symbol_instance(self, instance: SymbolInstance, *args: Any) -> None:
symbol_name = instance.get_symbol_name()
if self.demangle:
symbol_name = PrettyPrinter.demangle_label(symbol_name)
symbol_name = PrettyPrinter.COLOR_SYMBOL_INSTANCE(symbol_name)
self.write(symbol_name)
self.write_sort_arguments(instance.sort_arguments)
def postvisit_alias_definition(self, alias: AliasDefinition, *args: Any) -> None:
self.write(PrettyPrinter.COLOR_KEYWORD("alias") + " ")
symbol_name = alias.definition.symbol
if self.demangle:
symbol_name = PrettyPrinter.demangle_label(symbol_name)
self.write(symbol_name)
self.write_sort_arguments(alias.definition.sort_variables)
self.write("(")
for i, sort in enumerate(alias.definition.input_sorts):
if i != 0:
self.write(", ")
self.visit(sort)
self.write("): ")
self.visit(alias.definition.output_sort)
self.write(" " + PrettyPrinter.COLOR_KEYWORD("where") + "\n")
with self.indentation():
old_compact = self.compact
self.compact = True
self.visit(alias.lhs)
self.compact = old_compact
self.write(" := ")
self.visit(alias.rhs)
def postvisit_axiom(self, axiom: Axiom, *args: Any) -> None:
self.write(PrettyPrinter.COLOR_KEYWORD("axiom"))
self.write_sort_arguments(axiom.sort_variables)
self.write(" ")
self.visit(axiom.pattern)
def postvisit_variable(self, var: Variable, *args: Any) -> None:
self.write(PrettyPrinter.COLOR_VARIABLE(var.name)) # type: ignore
self.write(":")
self.visit(var.sort)
def postvisit_string_literal(self, literal: StringLiteral) -> None:
self.write(PrettyPrinter.COLOR_STRING_LITERAL(f"\"{literal.content}\""))
def decide_if_compact(self, ast: BaseAST[Any]) -> bool:
"""
Decide if the given ast should be printed using the compact format
"""
if self.compact: return True
for line in PrettyPrinter.encode_string(ast, compact=True).split("\n"):
if len(line) > self.limit:
return False
return True
def postvisit_application(self, application: Application, *args: Any) -> None:
use_compact = self.decide_if_compact(application)
self.visit(application.symbol)
self.write("(")
if not use_compact:
self.write("\n")
with self.indentation():
for i, argument in enumerate(application.arguments):
if i != 0:
self.write(", ")
if not use_compact:
self.write("\n")
self.visit(argument)
self.write(")")
def postvisit_ml_pattern(self, ml_pattern: MLPattern, *args: Any) -> None:
use_compact = self.compact or len(str(ml_pattern)) <= self.limit
self.write(PrettyPrinter.COLOR_ML_CONSTRUCT(ml_pattern.construct))
self.write_sort_arguments(ml_pattern.sorts)
self.write("(")
if not use_compact:
self.write("\n")
with self.indentation():
for i, argument in enumerate(ml_pattern.arguments):
if i != 0:
self.write(", ")
if not use_compact:
self.write("\n")
self.visit(argument)
self.write(")")
|
#!/usr/bin/python3
"""rabinkarp.py: A simple implementation of the Rabin-Karp string searching algorithm."""
__author__ = '[email protected] (Andrei Muntean)'
from sys import maxsize
# Raises x to the specified power. Must specify a mod as well.
def pow(x, exponent, mod):
if exponent == 0:
return 1
elif exponent % 2 == 0:
return pow(x * x % mod, exponent / 2, mod)
else:
return x * pow(x, exponent - 1, mod) % mod
# Maps the specified lowercase character to a value between 0 and 25.
def to_num(character):
return ord(character) - 97
# Computes a hash value (a positive integer) for the specified string.
def get_hash(string, mod = None):
if mod == None:
mod = maxsize
hash = 0
for index in range(0, len(string)):
value = to_num(string[index])
hash += value * pow(26, len(string) - index - 1, mod)
return hash
# Determines whether a string contains the specified substring.
def rabin_karp(string, substring):
if len(string) < len(substring):
return False
target_hash = get_hash(substring)
for index in range(0, len(string) - len(substring) + 1):
hash = get_hash(string[index : index + len(substring)])
if hash == target_hash:
return True
return False
# Tests the algorithm. This implementation only works for lowercase characters.
print(rabin_karp('abcd', 'abcd'))
print(rabin_karp('abcdefg', 'a'))
print(rabin_karp('abcdefg', 'abc'))
print(rabin_karp('abcdefg', 'cde'))
print(rabin_karp('abcdefg', 'efg'))
print(rabin_karp('abcdefg', 'abcdefg'))
print(rabin_karp('x', 'abc'))
print(rabin_karp('abcdefg', 'acd'))
print(rabin_karp('abcdefg', 'efgg'))
print(rabin_karp('abcdefg', 'abcx')) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.