blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2f6b30e1b2d944c0354a536d1c08f9eff2fa2e31 | 867846ed1df7f560ccc473413a70020155f66ad4 | /fixMarkdownHeadings.py | 1645916fdfb4c594d76c9f5feaf75e762005a85a | [] | no_license | abhineet123/PTF | 84297bf5aa95320dbc2d34f422f2dd563ff65a58 | 0c63f7f8251af0d70c329b2cef53694db76c1656 | refs/heads/master | 2023-08-18T18:34:40.513936 | 2023-08-09T17:28:51 | 2023-08-09T17:28:51 | 157,794,848 | 5 | 1 | null | 2021-05-16T18:48:32 | 2018-11-16T01:24:05 | MATLAB | UTF-8 | Python | false | false | 2,487 | py | import pyperclip
from Tkinter import Tk
from anytree import Node, RenderTree
def findChildren(_headings, root_level, _start_id, _root_node, n_headings):
nodes = []
_id = _start_id
while _id < n_headings:
_heading, line_id = _headings[_id]
words = _heading.split(' ')
curr_level = words[0].count('#')
if curr_level <= root_level:
break
heading_words = []
for word in words[1:]:
if word.startswith('@'):
break
if word and not word.isspace():
heading_words.append(word)
parent_text = ''
if _root_node is not None and _root_node.parent is not None:
parent_text = _root_node.name
if curr_level > 2:
# parent_text = str(_root_node)
parent_text = '{}/{}'.format(parent_text, _root_node.parent_text)
heading_text = '_'.join(heading_words)
new_node = Node(heading_text, parent=_root_node, orig_text=_heading, parent_text=parent_text,
marker=words[0], line_id=line_id)
nodes.append(new_node)
child_nodes, ___id = findChildren(_headings, curr_level, _id + 1, new_node, n_headings)
nodes += child_nodes
_id = ___id
return nodes, _id
def main():
in_txt = Tk().clipboard_get()
lines = in_txt.split('\n')
lines = [line for line in lines]
start_t = None
curr_t = None
curr_root = Node("root_node")
headings = [(k, i) for i, k in enumerate(lines) if k.startswith('#')]
n_headings = len(headings)
heading_id = 0
level = 0
nodes, _ = findChildren(headings, 0, 0, curr_root, n_headings)
print(RenderTree(curr_root))
# out_txt = in_txt
for node in nodes:
if node.is_root or node.parent.is_root:
continue
orig_text = node.orig_text
new_text = '{} {} @ {}'.format(node.marker, node.name, node.parent_text)
lines[node.line_id] = new_text
print('{}: new_text: {}'.format(node, new_text))
# out_txt = out_txt.replace(orig_text + '\n', new_text)
out_txt = '\n'.join(lines)
# print(out_txt)
# with open(out_fname, 'w') as out_fid:
# out_fid.write(out_txt)
try:
pyperclip.copy(out_txt)
spam = pyperclip.paste()
except pyperclip.PyperclipException as e:
print('Copying to clipboard failed: {}'.format(e))
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
271a78833a1218dfa0f8b72a67a4f57a00c22f77 | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /pa65DgwG5HMbtf6iY_17.py | 75b738a5317af85ec0083358898136fca0256512 | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 415 | py |
class player():
def __init__(self, name, age, height, weight):
self.name = name
self.age = age
self.height = height
self.weight = weight
def get_age(self):
return self.name + " is age " + str(self.age)
def get_height(self):
return self.name + " is " + str(self.height) + "cm"
def get_weight(self):
return self.name + " weighs " + str(self.weight) + "kg"
| [
"[email protected]"
] | |
f92344a7b55176c215b569cd01d99ec9c4fd8ee9 | 4c7baee40b96e6499f96d6fe81935437264c9c88 | /stock_scraper/Indicators/RSI.py | d153d32174cf5e1a805325812081c1df70ba9210 | [
"MIT"
] | permissive | webclinic017/Stock-Analysis | 083d376484adebcad2d52113749a513aa48b09a8 | eea8cb5bcb635f12eb15ac13306ef16e2892cd92 | refs/heads/master | 2022-04-13T00:20:54.287730 | 2020-03-29T21:05:22 | 2020-03-29T21:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,277 | py | import sys
from os import path
sys.path.append( path.dirname( path.dirname( path.abspath(__file__) ) ) )
import pandas as pd
class kRSI():
def __init__(self):
#do nothing
print "In kRSI class"
def CalculateRSI(self, avgGain, avgLoss):
if avgLoss == 0:
return 100
rs = avgGain / abs(avgLoss)
rsi = 100 - ( 100 / ( 1 + rs))
return rsi
def Calculate(self, dataFrame):
##### ALGORITHM #####
# 100
# RSI = 100 - --------
# 1 + RS
# RS = Average Gain / Average Loss
# The very first calculations for average gain and average loss are simple 14-period averages.
# First Average Gain = Sum of Gains over the past 14 periods / 14.
# First Average Loss = Sum of Losses over the past 14 periods / 14
# The second, and subsequent, calculations are based on the prior averages and the current gain loss:
# Average Gain = [(previous Average Gain) x 13 + current Gain] / 14.
# Average Loss = [(previous Average Loss) x 13 + current Loss] / 14.
close = dataFrame['Close']
change = close.diff()
change = change.fillna(0)
firstAvgGain = 0
firstAvgLoss = 0
rsiSeries = pd.Series()
for i in range(14):
# Appending first 14 dummy value to RSI series
rsiSeries = rsiSeries.append(pd.Series({dataFrame.index[i]: 0}))
if change[i]>0:
firstAvgGain = firstAvgGain + change[i]
else:
firstAvgLoss = firstAvgLoss + change[i]
firstAvgGain = firstAvgGain/14
firstAvgLoss = firstAvgLoss/14
rsiValue = self.CalculateRSI(firstAvgGain, firstAvgLoss)
rsiSeries[13] = rsiValue
avgGain = firstAvgGain;
avgLoss = firstAvgLoss
for i in range(14, close.count()):
if change[i]>0:
avgGain = ((avgGain * 13) + change[i]) / 14
else:
avgLoss = ((avgLoss * 13) + change[i]) / 14
rsiValue = self.CalculateRSI(avgGain, avgLoss)
rsiSeries = rsiSeries.append(pd.Series({dataFrame.index[i]: rsiValue}))
#print rsiSeries
return rsiSeries
| [
"[email protected]"
] | |
2d46b4f1438042afecc40c4acf20344806223487 | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /data/p3BR/R1/benchmark/startQiskit_Class245.py | b531b8a8a74a4a467b8ee86bca4983619acd57d1 | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,321 | py | # qubit number=3
# total number=46
import numpy as np
from qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ
from qiskit.visualization import plot_histogram
from typing import *
from pprint import pprint
from math import log2
from collections import Counter
from qiskit.test.mock import FakeVigo, FakeYorktown
kernel = 'circuit/bernstein'
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f: Callable[[str], str]) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
# oracle.draw('mpl', filename=(kernel + '-oracle.png'))
return oracle
def build_circuit(n: int, f: Callable[[str], str]) -> QuantumCircuit:
# implement the Bernstein-Vazirani circuit
zero = np.binary_repr(0, n)
b = f(zero)
# initial n + 1 bits
input_qubit = QuantumRegister(n+1, "qc")
classicals = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classicals)
# inverse last one (can be omitted if using O_f^\pm)
prog.x(input_qubit[n])
# circuit begin
prog.h(input_qubit[1]) # number=1
prog.rx(-0.09738937226128368,input_qubit[2]) # number=2
prog.h(input_qubit[1]) # number=33
prog.cz(input_qubit[2],input_qubit[1]) # number=34
prog.h(input_qubit[1]) # number=35
prog.h(input_qubit[1]) # number=3
# apply H to get superposition
for i in range(n):
prog.h(input_qubit[i])
prog.h(input_qubit[n])
prog.barrier()
# apply oracle O_f
oracle = build_oracle(n, f)
prog.append(
oracle.to_gate(),
[input_qubit[i] for i in range(n)] + [input_qubit[n]])
# apply H back (QFT on Z_2^n)
for i in range(n):
prog.h(input_qubit[i])
prog.barrier()
# measure
return prog
def get_statevector(prog: QuantumCircuit) -> Any:
state_backend = Aer.get_backend('statevector_simulator')
statevec = execute(prog, state_backend).result()
quantum_state = statevec.get_statevector()
qubits = round(log2(len(quantum_state)))
quantum_state = {
"|" + np.binary_repr(i, qubits) + ">": quantum_state[i]
for i in range(2 ** qubits)
}
return quantum_state
def evaluate(backend_str: str, prog: QuantumCircuit, shots: int, b: str) -> Any:
# Q: which backend should we use?
# get state vector
quantum_state = get_statevector(prog)
# get simulate results
# provider = IBMQ.load_account()
# backend = provider.get_backend(backend_str)
# qobj = compile(prog, backend, shots)
# job = backend.run(qobj)
# job.result()
backend = Aer.get_backend(backend_str)
# transpile/schedule -> assemble -> backend.run
results = execute(prog, backend, shots=shots).result()
counts = results.get_counts()
a = Counter(counts).most_common(1)[0][0][::-1]
return {
"measurements": counts,
# "state": statevec,
"quantum_state": quantum_state,
"a": a,
"b": b
}
def bernstein_test_1(rep: str):
"""011 . x + 1"""
a = "011"
b = "1"
return bitwise_xor(bitwise_dot(a, rep), b)
def bernstein_test_2(rep: str):
"""000 . x + 0"""
a = "000"
b = "0"
return bitwise_xor(bitwise_dot(a, rep), b)
def bernstein_test_3(rep: str):
"""111 . x + 1"""
a = "111"
b = "1"
return bitwise_xor(bitwise_dot(a, rep), b)
if __name__ == "__main__":
n = 2
a = "11"
b = "1"
f = lambda rep: \
bitwise_xor(bitwise_dot(a, rep), b)
prog = build_circuit(n, f)
sample_shot =4000
writefile = open("../data/startQiskit_Class245.csv", "w")
# prog.draw('mpl', filename=(kernel + '.png'))
backend = BasicAer.get_backend('statevector_simulator')
circuit1 = transpile(prog, FakeYorktown())
circuit1.h(qubit=2)
circuit1.x(qubit=3)
info = execute(circuit1,backend=backend, shots=sample_shot).result().get_counts()
print(info, file=writefile)
print("results end", file=writefile)
print(circuit1.depth(), file=writefile)
print(circuit1, file=writefile)
writefile.close()
| [
"[email protected]"
] | |
0c5ab6e315afa21be876ef1a62aeaa6b3aaaff97 | 379a473d6f572b7fb0c00ffa3387931a6bebb082 | /Chapter9/plot_confusion_matrix.py | 061034801b4a3a58edac77979b4adc9847936748 | [] | no_license | Willianan/Data_Analysis_and_Mining | 6746f75dcade79f9134574d5962ec5bc19da51de | 8c526e5d12a535fde1b8b5c84b21007289b8eb20 | refs/heads/master | 2020-04-28T15:26:58.227850 | 2019-04-07T13:51:41 | 2019-04-07T13:51:41 | 175,367,669 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,053 | py | import matplotlib.pyplot as plt
import numpy as np
import itertools
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, cm[i, j],
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
| [
"[email protected]"
] | |
4decd0022d8b9f5903a2348dd6c8ca81a7787360 | d25c89a54ad980c68bc8d247eb43f88499617dda | /src/ocr_line_curation_chars.py | 1dda0fa5a2a17ad15a2000d2ca7bd195293a99de | [] | no_license | nakamura196/amami | 5922e396b89850d18660e465d8e6af498b28b967 | 19bdc712bb24ab325806c3cb2bb4666d16182768 | refs/heads/master | 2023-01-07T05:29:19.145424 | 2020-11-06T04:31:16 | 2020-11-06T04:31:16 | 286,902,467 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,639 | py | from PIL import Image
import sys
sys.path.append('/path/to/dir')
import json
import pyocr
import pyocr.builders
tools = pyocr.get_available_tools()
if len(tools) == 0:
print("No OCR tool found")
sys.exit(1)
tool = tools[0]
print("Will use tool '%s'" % (tool.get_name()))
langs = tool.get_available_languages()
print("Available languages: %s" % ", ".join(langs))
builder = pyocr.builders.TextBuilder()
builder = pyocr.tesseract.CharBoxBuilder()
builder = pyocr.builders.LineBoxBuilder() # tesseract_layout=6
with open('../docs/iiif/amami/manifest.json') as f:
df = json.load(f)
canvases = df["sequences"][0]["canvases"]
members = []
for i in range(len(canvases)):
page = str(i+1).zfill(4)
canvas = canvases[i]
canvas_id = canvas["@id"]
image_url = canvas["images"][0]["resource"]["@id"]
filename = image_url.split("/")[-1]
im_path = "../docs/files/large/"+filename
char_boxes = tool.image_to_string(
Image.open(im_path),
lang='jpn_vert',
builder=builder
)
# print(char_boxes)
im = Image.open(im_path)
for j in range(len(char_boxes)):
box = char_boxes[j]
# box.position は左下を原点とした ((min-x, min-y), (max-x, max-y)) らしい。
# ここでは左上を原点とした x, y, width, height に変換してみる
x = box.position[0][0]
y = im.height - box.position[1][1]
width = box.position[1][0] - x
height = im.height - box.position[0][1] - y
text = box.content.replace(" ", "")
print("\t".join([
text, # 文字
str(x), str(y), str(width), str(height),
# 確信度 str(box.confidence),
]))
if text == "":
continue
member_id = canvas_id+"#xywh="+str(x)+","+str(y)+","+str(width)+","+str(height)
member = {
"label": "Page "+page+"_"+str(j+1),
"metadata": [
{
"label": "Annotation",
"value": [
{
"on": member_id,
"resource": {
"@type": "cnt:ContentAsText",
"chars": text,
"marker": {
"text": text,
},
"format": "text/html"
},
"motivation": "sc:painting",
"@id": "http://codh.rois.ac.jp/char-shape/book/200003803/annotation/"+"Page "+page+"_"+str(j+1),
"@type": "oa:Annotation"
}
]
}
],
"@id": member_id,
"@type": "sc:Canvas"
}
members.append(member)
break
curation = {
"@context": [
"http://iiif.io/api/presentation/2/context.json",
"http://codh.rois.ac.jp/iiif/curation/1/context.json"
],
"@type": "cr:Curation",
"@id": "https://mp.ex.nii.ac.jp/api/curation/json/aaa5d585-3cd2-4651-ba98-71769b028e19",
"label": "Curating list",
"selections": [
{
"@id": "https://mp.ex.nii.ac.jp/api/curation/json/aaa5d585-3cd2-4651-ba98-71769b028e19/range1",
"@type": "sc:Range",
"label": "Manual curation by IIIF Curation Viewer",
"members": members,
"within": {
"@id": "https://raw.githubusercontent.com/nakamura196/amami/master/docs/iiif/amami/manifest.json",
"@type": "sc:Manifest",
"label": "奄美大島"
}
}
]
}
fw = open("../docs/curation/test_line.json", 'w')
json.dump(curation, fw, ensure_ascii=False, indent=4, sort_keys=True, separators=(',', ': '))
| [
"[email protected]"
] | |
9013611ed7ed83fb4065f9d6e3bb601c6efacc71 | 9f84d91a8ae3df53b07fe3267992fba00a99ac9e | /torch_geometric/graphgym/contrib/__init__.py | 47365d98aadd0b304b67af8266d8e4228eb52f85 | [
"MIT"
] | permissive | pyg-team/pytorch_geometric | ebea601eae228f3905465b5c2349d3fb3bb5cb26 | a52af694b8ce6a80811e20966fe6d08a3e7511fe | refs/heads/master | 2023-08-31T04:13:40.943308 | 2023-08-30T12:48:42 | 2023-08-30T12:48:42 | 106,024,057 | 6,775 | 1,563 | MIT | 2023-09-14T17:10:18 | 2017-10-06T16:03:03 | Python | UTF-8 | Python | false | false | 389 | py | from .act import * # noqa
from .config import * # noqa
from .encoder import * # noqa
from .head import * # noqa
from .layer import * # noqa
from .loader import * # noqa
from .loss import * # noqa
from .network import * # noqa
from .optimizer import * # noqa
from .pooling import * # noqa
from .stage import * # noqa
from .train import * # noqa
from .transform import * # noqa
| [
"[email protected]"
] | |
0ce6670c9e67f8a7231b9c6d03cec2f066c58ab0 | 376c8f2c9051b8dffe851fab7c831f96dcf06ddb | /dp/1965_상자넣기.py | e430c04d04043fdf653ac4dee4d15404c6d63625 | [] | no_license | getChan/algorithm | cad3ac74ac686ec4306ad8db551700d35e27a782 | 6a82c04cdbf670e3140b1a8685480a3f37c82c62 | refs/heads/master | 2021-06-30T01:08:42.270514 | 2020-09-19T07:55:45 | 2020-09-19T07:55:45 | 140,247,854 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 361 | py | n = int(input())
boxes = [int(_) for _ in input().split()]
dp = [1 for _ in range(n)]
# 최장증가수열 문제
# dp[i] : i에서 끝나는 최장증가수열
answer = 1
for i in range(0, n):
for j in range(0, i):
if boxes[i] > boxes[j] and dp[i] < dp[j]+1:
dp[i] = dp[j] + 1
if dp[i] > answer:
answer = dp[i]
print(answer) | [
"[email protected]"
] | |
1a0fbf08f1e836f3287ff05cb95026d3db0e9c4d | 071ca9494ce811cdf52dc585ec863dc621a7865b | /test_coroutines.py | c82bc510a64e9c940b63bebf76764d13498b1922 | [] | no_license | phaustin/parallel_project | 37d7ea7dbc6de8406d50e47142b271e05c1552eb | 821cca5ad7dcf5c3b6caa2ca3f20358b86120d06 | refs/heads/master | 2021-08-22T08:18:01.675473 | 2017-11-29T17:56:57 | 2017-11-29T17:56:57 | 112,510,223 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,407 | py | #http://blog.thumbtack.net/python-coroutines/
def coroutine(f):
def wrapper(*arg, **kw):
c = f(*arg, **kw)
c.send(None)
return c
return wrapper
@coroutine
def logger(prefix="", next=None):
while True:
message = yield
print("{0}: {1}".format(prefix, message))
if next:
next.send(message)
@coroutine
def cache_checker(cache, onsuccess=None, onfail=None):
while True:
request = yield
if request in cache and onsuccess:
onsuccess.send(cache[request])
elif onfail:
onfail.send(request)
@coroutine
def load_balancer(*workers):
while True:
for worker in workers:
request = yield
worker.send(request)
@coroutine
def worker(cache, response, next=None):
while True:
request = yield
cache[request] = response
if next:
next.send(response)
cache = {}
response_logger = logger("Response")
cluster = load_balancer(
logger("Worker 1", worker(cache, 1, response_logger)),
logger("Worker 2", worker(cache, 2, response_logger)),
logger("Worker 3", worker(cache, 3, response_logger)),
)
cluster = cache_checker(cache, response_logger, cluster)
cluster = logger("Request", cluster)
if __name__ == "__main__":
from random import randint
for i in range(20):
cluster.send(randint(1, 5))
| [
"[email protected]"
] | |
957b52ebdc5a273f9ccdd5b16a60f4b0053ff1bd | 2a4be1e256ed19c8dd5d37cb3cfbe7f50bb4f8f6 | /Landing/wsgi.py | 7a21379d42a306591ffa506ec52344f303075fa1 | [] | no_license | Miker69/Landing_telebot | 55c3b34aac3db5753203260421ae7f2584160122 | f32538d563c74108053418b177340b634f26c5f3 | refs/heads/master | 2023-04-04T03:48:58.941446 | 2021-04-13T15:37:36 | 2021-04-13T15:37:36 | 357,578,267 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 391 | py | """
WSGI config for Landing project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Landing.settings')
application = get_wsgi_application()
| [
"[email protected]"
] | |
1a7fafcd552464eccc415a7cd7f6c13f568bd217 | c2c03e034513a766c7de8298be428fb3eab3ab7b | /chainerrl/NeverSay20/env/bin/wheel | 6aec0ddb9ff1fc7d022b5e8ab8645344758aa131 | [] | no_license | hamko/sample | 434adeca12e11587edce8cad799162b84c7f5071 | 9b0624b99e3e551d6b72b632d3a7d1a38aac7a9f | refs/heads/master | 2021-01-17T02:51:25.174354 | 2018-10-23T02:40:04 | 2018-10-23T02:40:04 | 9,640,383 | 1 | 3 | null | null | null | null | UTF-8 | Python | false | false | 255 | #!/home/hamko/git/sample/chainerrl/TicTacToe/env/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from wheel.tool import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
] | ||
f8022236e53f1c2f42d0dfc05b69d592acb001a4 | 0d0c13d80924b6e5cfc74a623eb250a5fd2e2cca | /Stacks/sliding window maximum.py | b848b90e9f5e374590087d07587cd492462bd090 | [
"Apache-2.0"
] | permissive | Akashdeep-Patra/problemSolving | 54e2fc3c3a9587b8c976921f6fc45364af1dfcac | c278e5d090af7370e56789e68b7bb73dc37165f8 | refs/heads/master | 2022-11-15T19:20:54.585886 | 2020-06-29T10:47:39 | 2020-06-29T10:47:39 | 258,956,787 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,145 | py | from collections import deque
class Solution:
# @param A : tuple of integers
# @param B : integer
# @return a list of integers
def slidingMaximum(self, a, k):
q=deque()
for i in range(k):
if(len(q)==0 or a[q[-1]]>=a[i]):
q.append(i)
else:
while(len(q)!=0 and a[q[-1]]<a[i]):
q.pop()
q.append(i)
ans=[]
ans.append(a[q[0]])
for i in range(k,len(a)):
if(len(q)==0 or a[q[-1]]>=a[i]):
q.append(i)
else:
while(len(q)!=0 and a[q[-1]]<a[i]):
q.pop()
q.append(i)
while(len(q)!=0 and q[0]<=i-k):
q.popleft()
ans.append(a[q[0]])
return ans
"""
Sliding Window Maximum
Problem Description
Given an array of integers A. There is a sliding window of size B which is moving from the very left of the array to the very right. You can only see the B numbers in the window. Each time the sliding window moves rightwards by one position. You have to find the maximum for each window.
Return an array C, where C[i] is the maximum value in the array from A[i] to A[i+B-1].
Refer to the given example for clarity.
NOTE: If B > length of the array, return 1 element with the max of the array.
Problem Constraints
1 <= |A|, B <= 106
Input Format
The first argument given is the integer array A.
The second argument given is the integer B.
Output Format
Return an array C, where C[i] is the maximum value of from A[i] to A[i+B-1].
Example Input
Input 1:
A = [1, 3, -1, -3, 5, 3, 6, 7]
B = 3
Input 2:
A = [1, 2, 3, 4, 2, 7, 1, 3, 6]
B = 6
Example Output
Output 1:
[3, 3, 5, 5, 6, 7]
Output 2:
[7, 7, 7, 7]
Example Explanation
Explanation 1:
Window position | Max
--------------------|-------
[1 3 -1] -3 5 3 6 7 | 3
1 [3 -1 -3] 5 3 6 7 | 3
1 3 [-1 -3 5] 3 6 7 | 5
1 3 -1 [-3 5 3] 6 7 | 5
1 3 -1 -3 [5 3 6] 7 | 6
1 3 -1 -3 5 [3 6 7] | 7
Explanation 2:
Window position | Max
--------------------|-------
[1 2 3 4 2 7] 1 3 6 | 7
1 [2 3 4 2 7 1] 3 6 | 7
1 2 [3 4 2 7 1 3] 6 | 7
1 2 3 [4 2 7 1 3 6] | 7
""" | [
"[email protected]"
] | |
779a04c19db092d2dee3ac7a2cee5ec9378b58bc | da47e42519b6d5eb37bdb634fd618672706e79da | /localizacion_metromed/Txt_file_module/models/txt_activo.py | d825bda0ddc8ed2165a4344dd28d866ff52ec13b | [] | no_license | Tysamncaweb/produccion2 | 02bbbccefc4f4cd0d0948b1b0552d931f804fb9b | b95909d0689fc787185290565f0873040a6027cf | refs/heads/master | 2022-04-26T13:51:22.316294 | 2020-04-29T19:58:35 | 2020-04-29T19:58:35 | 260,013,639 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,580 | py | # -*- coding: utf-8 -*-
# Part of BrowseInfo. See LICENSE file for full copyright and licensing details.
from datetime import datetime, timedelta
from odoo import models, api, fields
from logging import getLogger
_logger = getLogger(__name__)
class bono(models.TransientModel):
_inherit = "account.wizard.generacion.txtfile"
@api.multi
def print_bono2(self):
VAR = 0
VAR2 = 0
concepto2 = self.concepto.upper()
totalpago = 0
if self.bancose == 'activo':
# /////////////////////////////Creacion del archivo .txt en carpeta local odoo///////////////////////////////////
file = open("archivo.txt", "w")
# /////////7////////calculos y ceacion de datos para el .txt/////////////////////////////////////////////////////
self.invoices = self.env['hr.payslip'].search(
[('date_to', '<=', self.date_to), ('date_from', '>=', self.date_from)])
_logger.info("\n\n\n {} \n\n\n".format(self.invoices))
date_f = str(self.date_imp)
a = date_f[0:4]
m = date_f[5:7]
d = date_f[8:]
#saco el encabezado
for invoice in self.invoices:
# traigo el numero de cuenta
cuenta = invoice.employee_id.account_number_2
if cuenta:
filtro = cuenta[0:4]
else:
filtro = '1234'
if filtro == '0171':
VAR2 += 1
for n in invoice.line_ids:
#varsuma = n.total
#varsuma = float("{0:.2f}".format(varsuma))..
totalpago += n.total
totalpago = float("{0:.2f}".format(totalpago))
totalpago = str(totalpago)
for i in range(0, len(totalpago)):
if (totalpago[i] == '.'):
cds = totalpago[i + 1:]
if len(cds) == 2:
ceroextra = '0'
imprimir0 = ''
else:
ceroextra = ''
imprimir0 = '0'
#escribo en el txt
totalpago = totalpago.replace(".", ",")
lineas = ['H',
';',
VAR2,
';',
totalpago,
imprimir0,
';',
concepto2,
';',
self.nlote,
';',
d,m,a]
for l in lineas:
file.write(str(l))
file.write('\n')
for invoice in self.invoices:
# traigo el numero de cuenta
cuenta = invoice.employee_id.account_number_2
if cuenta:
filtro = cuenta[0:4]
else:
filtro = '1234'
if filtro == '0171':
letra = invoice.employee_id.nationality
ncedu = invoice.employee_id.identification_id_2
catcedu = len(ncedu)
if catcedu == 7:
catce = '00'
if catcedu == 8:
catce = '0'
# catce, es los ceros que se agregan antes de la cedula
# ncedu, es el numero de cedula
# calculo del monto total de nomina
busqueda = self.env['hr.salary.rule.category'].search([('id', '!=', 0)])
if busqueda:
for a in busqueda:
if a.name == 'Net':
ttotal = a.id
busqueda2 = self.env['hr.payslip.line'].search([('id', '!=', 0)])
for vip in invoice.line_ids:
for vip2 in busqueda2:
if vip == vip2:
if vip2.category_id.id == ttotal:
totalpago = vip2.total
totalpago = float("{0:.2f}".format(totalpago))
totalpago = str(totalpago)
for i in range(0, len(totalpago)):
if (totalpago[i] == '.'):
cds = totalpago[i + 1:]
if len(cds) == 2:
ceroextra = '0'
imprimir0 = ''
else:
ceroextra = ''
imprimir0 = '0'
totalpago = totalpago.replace(".", ",")
VAR += 1
# imprimo en el txt
lineas = ['P',
';',
letra,
catce,
ncedu,
';',
totalpago,
imprimir0,
';',
concepto2,
';',
VAR,
';',
'000'
]
for l in lineas:
file.write(str(l))
file.write('\n')
file.close()
nombretxt = 'CargaMasivadepagodeNómina.txt'
nameclass = 'account.wizard.generacion.txtfile'
return self.imprimir_txt(nombretxt,nameclass)
| [
"[email protected]"
] | |
bcfe1e99173899327dcff8051e09ffc4433afce6 | e6dab5aa1754ff13755a1f74a28a201681ab7e1c | /.parts/lib/django-1.4/tests/regressiontests/views/tests/debug.py | 566b96c0e404e04bcbc2da47bd1409201df46bd8 | [] | no_license | ronkagan/Euler_1 | 67679203a9510147320f7c6513eefd391630703e | 022633cc298475c4f3fd0c6e2bde4f4728713995 | refs/heads/master | 2021-01-06T20:45:52.901025 | 2014-09-06T22:34:16 | 2014-09-06T22:34:16 | 23,744,842 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 108 | py | /home/action/.parts/packages/googleappengine/1.9.4/lib/django-1.4/tests/regressiontests/views/tests/debug.py | [
"[email protected]"
] | |
d4156f6b387f2c2bbfab43d6331fe0e83479c75c | d2332604fc80b6d622a263b2af644425a7e703de | /facebook/trees_and_graphs/12_accounts_merge.py | 9d2fd775ac9e45fdb77ea0793a06731010f2ed00 | [] | no_license | abhijitdey/coding-practice | b3b83a237c1930266768ce38500d6812fc31c529 | 6ae2a565042bf1d6633cd98ed774e4a77f492cc8 | refs/heads/main | 2023-08-14T23:31:06.090613 | 2021-10-18T21:35:56 | 2021-10-18T21:35:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,999 | py | from typing import List
"""
1. First, create a graph (adjacency list) connecting all emails that are related to the same account
2. Second, keep track of the account name for each unique email
3. Find connected components in the graph:
a. Each connected component refers to the same account.
b. So, all unique emails in a connected component belong to the same account. Hence, sorting them gives the answer
- Use DFS to traverse the graph and find the connected components
"""
from collections import defaultdict
class Solution:
def accountsMerge(self, accounts: List[List[str]]) -> List[List[str]]:
if not accounts:
return []
emailToName = dict()
# The adj list will contain all emails connected to each email from all the accounts
adj_list = defaultdict(set)
visited = set()
for account in accounts:
name = account[0]
for email in account[1:]:
first_email = account[1]
adj_list[first_email].add(email)
adj_list[email].add(first_email)
emailToName[email] = name
def dfs(email):
visited.add(email)
stack = [email]
components = [email]
while len(stack) > 0:
email = stack.pop()
for neighbor in adj_list[email]:
if neighbor in visited:
continue
# Add the neighbor to the stack
stack.append(neighbor)
components.append(neighbor)
visited.add(neighbor)
return components
# We need to run DFS from each email and see how many connected components are present
result = []
for email in adj_list:
if email in visited:
continue
components = dfs(email)
result.append([emailToName[email]] + sorted(components))
return result
| [
"[email protected]"
] | |
381eb050befc9726fcc4b57820d8a6e03ecd00a6 | d2abb93fc50ec2ef3513a4a1307c0e6955a27eaf | /generic/threaded.py | 2bb1c2d319ec064bb3ef93f259d786d8568e2606 | [
"MIT",
"LicenseRef-scancode-generic-cla"
] | permissive | staccDOTsol/DialoGPT | 1cddb45ab500afdb6c89690b9440d880c61f7140 | b026278c2ae831f3bc63759300dba56f14bf3228 | refs/heads/master | 2023-01-27T18:27:14.611490 | 2020-12-04T07:48:25 | 2020-12-04T07:48:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 28,303 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import sys
import time
import os.path
import math
import re
import argparse
import traceback
import json
import bz2
import gzip
from nltk.tokenize import TweetTokenizer
from flashtext import KeywordProcessor
import hashlib
def makedirs(fld):
if not os.path.exists(fld):
os.makedirs(fld)
PICKLE_MAX_LEN = 1e4
TAG_COMMENT = 't1_'
TAG_SUBMISSION = 't3_'
dontuse = '__dontuse__'
url_str = '__url__'
parser = argparse.ArgumentParser()
parser.add_argument("dump_name", help="YYYY-MM, dumped files to be loaded")
parser.add_argument("--bl_words", help="list of offensive words, to avoid in responses")
parser.add_argument("--ignore_keys", default=False, type=bool, help="If true ignore any keys provided as arguments")
parser.add_argument("--keep_keys", help="hashes of instances to keep")
parser.add_argument("--discard_tgt_keys", help="hashes of targets to discard")
parser.add_argument("--freq_words", help="words sorted by their corpus frequencies")
parser.add_argument("--bl_subreddits", help="blocklist of offensive subreddits")
parser.add_argument("--wl_subreddits", help="whitelist of relatively safe subreddits")
parser.add_argument("--reddit_input", default="d:/data/reddit/bz2/", help="Location of the input reddit data (bz2 files)")
parser.add_argument("--reddit_output", default="d:/data/reddit/", help="Location of the output reddit data (conversations)")
parser.add_argument("--max_len", default=30, type=int)
# 30 words means roughly 70 characters on average for Reddit
parser.add_argument("--max_len_type", default='w') # w for words, c for chars
parser.add_argument("--min_depth", default=2, type=int)
parser.add_argument("--max_depth", default=10, type=int)
parser.add_argument("--min_score", default=0, type=int)
parser.add_argument("--use_title", default=1, type=int)
parser.add_argument("--leaves_only", default=0, type=int)
parser.add_argument("--split_size", default=int(5e5), type=int)
parser.add_argument("--task", default='conv')
parser.add_argument("--parallel", default=False, type=bool)
parser.add_argument("--pre_tok", default=False, type=bool, help="whether to tokenize during the extract step")
parser.add_argument("--clean", default=False, type=bool, help="apply some filters to significantly reduce number of instances")
args = parser.parse_args()
print("Args: %s" % args, file=sys.stderr)
fields_subm = [ "id", "score", "num_comments", "domain", "permalink", "title" ]
fields_comm = [ "id", "author", "parent_id", "link_id", "score", "n_char", "body"]
bl_words = KeywordProcessor()
bl_subreddits = {}
wl_subreddits = {}
keys = {}
keys_rm = {}
def get_submission_id(submission):
return TAG_SUBMISSION + submission["id"]
def get_comment_id(comment):
return TAG_COMMENT + comment["id"]
def norm_sentence(txt, is_extract):
if is_extract:
return minimal_norm_sentence(txt)
else:
return gpt_norm_sentence(txt)
def minimal_norm_sentence(txt):
txt = txt.replace(chr(92),'') # chr(92) = '\'. as twitter has 'b\/c' rather than 'b/c'
txt = txt.replace('\n', ' ')
txt = txt.replace('\r', ' ')
txt = txt.replace('\t', ' ')
#print ("Tokenized: [%s]" % txt, file=sys.stderr)
return txt
def gpt_norm_sentence(txt):
# url and tag
words = []
for word in txt.split():
if word[0] == '#': # don't allow tag
continue
i = word.lower().find('http')
if i >= 0:
word = word[:i] + ' ' + '__url__'
words.append(word.strip())
txt = ' '.join(words)
# remove illegal char
txt = txt.replace(chr(92),'') # chr(92) = '\'. as twitter has 'b\/c' rather than 'b/c'
txt = txt.replace("b/c","because").replace('j/k','just kidding').replace('w/o','without').replace('w/','with')
txt = re.sub('__mention__','MENTION',txt)
txt = re.sub('__url__','URL',txt)
txt = re.sub(r"[^A-Za-z0-9()\[\]:,.!?'“” ]", " ", txt)
txt = re.sub('MENTION','__mention__',txt)
txt = re.sub('URL','__url__',txt)
tokenizer = TweetTokenizer(preserve_case=True)
txt = ' ' + ' '.join(tokenizer.tokenize(txt)) + ' '
# remove un-necessary space
return ' '.join(txt.split())
def extract_submissions(fld_bz2, fld_split, which, size=2e5):
path_in = fld_bz2 + '/RS_%s.bz2'%args.dump_name
n = 0
m = 0
n2 = 0
m2 = 0
sub = 0
sid2 = []
sids = []
lines = []
try:
submissions = dict()
subreddit = reddit.subreddit(which)
for submission2 in subreddit.top(limit=7500):
try:
n += 1
#if n%1e4 == 0:
#print('[%s] selected %.3fM from %.2fM submissions'%(
#args.dump_name, m/1e6, n/1e6))
try:
submission = {}
submission["id"] = submission2.id
submission["score"] = submission2.score
submission["domain"] = submission2.domain
submission["permalink"] = submission2.permalink
submission["title"] = submission2.title
submission["num_comments"] = submission2.num_comments
if int(submission['num_comments']) >= 2: # filter 1
submission['title'] = norm_sentence(submission['title'], True)
submission = submission
submissions[get_submission_id(submission)] = submission
lines.append('\t'.join([str(submission[k]) for k in fields_subm]))
m += 1
sid2.append(get_submission_id(submission))
if len(sid2) == size:
#print('writing submissions_sub%i'%sub)
sids.append(set(sid2))
with open(fld_split + '/rs_sub%i.tsv'%sub, 'w', encoding='utf-8') as f:
f.write('\n'.join(lines))
sid2 = []
lines = []
except Exception as e:
print(e)
traceback.print_exc()
continue
lines2 = []
#for sub in range(n_sub):
# open(fld_split + '/rc_sub%i.tsv'%sub, 'w')
comments = dict()
for top_level_comment in submission2.comments:
try:
n2 += 1
comment = {}
comment["id"] = top_level_comment.id
try:
if top_level_comment.author is not None:
comment["author"] = top_level_comment.author.name
else:
comment["author"] = "None"
except:
comment["author"] = "None"
comment["parent_id"] = top_level_comment.parent_id
try:
comment["link_id"] = top_level_comment.link_id
comment["score"] = top_level_comment.score
comment["body"] = top_level_comment.body
except:
comment["link_id"] = comment["parent_id"]
comment["score"] = 0
comment["body"] = ""
#if args.keep_keys:
# k = '\t'.join([comment['link_id'], get_comment_id(comment), 'dep'])
# if k not in keys.keys():
# continue
if comment['body'] != '[deleted]': # filter 1
#if '>' in comment['body'] or '>' in comment['body']: # filter 3: '>' means '>'
# continue
#sid = comment['link_id']
comment['n_char'] = len(comment['body'])
comment['body'] = norm_sentence(comment['body'], True)
#print(comment)
if len(comment['body'].split()) >= 2: # filter 2
comment = comment
comments[get_comment_id(comment)] = comment
lines2.append('\t'.join([str(comment[k]) for k in fields_comm]))
m2 += 1
#break
except Exception as e:
print(e)
traceback.print_exc()
sorted_id = sorted([(
comments[cid]['link_id'],
comments[cid]['parent_id'],
cid
) for cid in comments])
n = len(comments)
#print('total comments: %i'%n)
i = 0
m = 0
lines = []
sum_resp_len = 0
skip_id = {}
if args.leaves_only:
for _, pid, _ in sorted_id:
skip_id[pid] = 1
#print("leaves ratio : %f" % (len(skip_id) / len(sorted_id)), file=sys.stderr)
for sid, pid, cid in sorted_id:
i += 1
if i%1e5 == 0:
#print('selected %.2fM from %.1f/%.1fM comments'%(m/1e6, i/1e6, n/1e6), file=sys.stderr)
if len(lines) > 0:
with open(path_out, 'a', encoding="utf-8") as f:
f.write('\n'.join(lines) + '\n')
lines = []
subreddit = ''
domain = ''
if sid in submissions.keys():
subreddit = submissions[sid]['permalink'].split('/')[2].lower()
domain = submissions[sid]['domain'].lower()
info = subreddit + '\t' + domain
#if args.bl_subreddits:
# if not subreddit:
#print("skip\tmissing\t%s\tN/A\tmissing submission: %s" % (info, sid), file=sys.stderr)
# continue
# if subreddit in bl_subreddits:
#print("skip\tbad_subreddit\t%s\tN/A\toffensive subreddit: %s" % (info, subreddit), file=sys.stderr)
# continue
comment = comments[cid]
if comment['score'] == 'None':
score = 0
else:
score = int(comment['score'])
if score < args.min_score: # filter 1
#print("skip\tlow_score\t%s\t%s\tscore %d < %d" % (info, comment['body'], score, args.min_score), file=sys.stderr)
continue
txts = []
for c in comments:
txts.append(comments[c]['body'])
#print(txts)
#txts = get_convo(sid, cid, cid, submissions, comments) # filter 2
#print(len(txts))
if len(txts) < args.min_depth: # filter 3
#print("skip\tmin_depth\t%s\t%s\tdepth %d < %d: %s" % (info, comment['body'], len(txts), args.min_depth, "|".join(txts)), file=sys.stderr)
continue
for i in range(len(txts)):
txts[i] = norm_sentence(txts[i], False)
if args.leaves_only and args.clean:
sc = '1.0'
skip_target = False
if args.discard_tgt_keys:
tgt_h = hashlib.sha224(txts[i].encode("utf-8")).hexdigest()
if tgt_h in keys_rm.keys():
skip_target = True
if bl_words.extract_keywords(txts[i]) or skip_target:
sc = '0.0'
txts[i] = sc + ' ' + txts[i]
src = ' EOS '.join(txts[:-1])
tgt = txts[-1]
header = ','.join([sid, pid, cid])
lines.append(header + '\t' + src + '\t' + tgt)
sum_resp_len += len(tgt.split())
m += 1
#avg_len = sum_resp_len/m
with open(fld_split + '/%s.tsv'%args.dump_name, 'a', encoding="utf-8") as f:
f.write('\n'.join(lines) + '\n')
#print('finally selected %i/%i'%(m, n))#, avg_len))
with open(fld_split + '/rc_sub%i.tsv'%sub, 'a', encoding='utf-8') as f:
#print(lines2[sub])
f.write('\n'.join(lines2))
except Exception as e:
print(e)
traceback.print_exc()
#sids, ms, ns, mc, ns = extract_submissions(fld_root_in, fld_split, size=args.split_size)
#mc, nc = extract_comments(fld_root_in, fld_split, sids)
#with open(fld_split + '/stat.tsv', 'a') as f:
# f.write('\t'.join(map(str, [args.dump_name, m2, n2, m, n])) + '\n')
#print('extract_comments done.\n')
#return m, n
#print('writing submissions_sub%i'%sub)
sids.append(set(sid))
with open(fld_split + '/rs_sub%i.tsv'%sub, 'a', encoding='utf-8') as f:
f.write('\n'.join(lines))
lines = []
sub += 1
except Exception as e:
print(e)
print('extract_submissions done.\n')
return sids, m, n, m2, n2
def extract_comments(fld_bz2, fld_split, sids):
path_in = fld_bz2 + '/RC_%s.bz2'%args.dump_name
n = 0
m = 0
n_sub = len(sids)
lines = [[] for i in range(n_sub)]
#for sub in range(n_sub):
# open(fld_split + '/rc_sub%i.tsv'%sub, 'w')
try:
subreddit = reddit.subreddit(subreddits[0])
for submission2 in subreddit.top(limit=5000):
try:
submission = {}
submission["id"] = submission2.id
submission["score"] = submission2.score
submission["domain"] = submission2.domain
submission["permalink"] = submission2.permalink
submission["title"] = submission2.title
submission["num_comments"] = submission2.num_comments
n += 1
if n%1e4 == 0:
print('[%s] selected %.3fM from %.2fM comments'%(
args.dump_name, m/1e6, n/1e6))
for sub in range(n_sub):
print(' sub %i: %i'%(sub, len(lines[sub])))
if len(lines[sub]) > 0:
with open(fld_split + '/rc_sub%i.tsv'%sub, 'a', encoding='utf-8') as f:
f.write('\n'.join(lines[sub]) + '\n')
lines[sub] = []
for top_level_comment in submission2.comments:
try:
comment = {}
comment["id"] = top_level_comment.id
if top_level_comment.author is not None:
comment["author"] = top_level_comment.author.name
else:
comment["author"] = "None"
comment["parent_id"] = top_level_comment.parent_id
comment["link_id"] = top_level_comment.link_id
comment["score"] = top_level_comment.score
comment["body"] = top_level_comment.body
if args.keep_keys:
k = '\t'.join([comment['link_id'], get_comment_id(comment), 'dep'])
if k not in keys.keys():
continue
if comment['body'] == '[deleted]': # filter 1
continue
if '>' in comment['body'] or '>' in comment['body']: # filter 3: '>' means '>'
continue
sid = comment['link_id']
for sub in range(n_sub):
if sid in sids[sub]:
comment['n_char'] = len(comment['body'])
comment['body'] = norm_sentence(comment['body'], True)
if len(comment['body'].split()) < 2: # filter 2
break
lines[sub].append('\t'.join([str(comment[k]) for k in fields_comm]))
m += 1
break
except Exception:
traceback.print_exc()
except Exception as e:
print(e)
except Exception as e:
print(e)
print('the rest...')
for sub in range(n_sub):
print(' sub %i: %i'%(sub, len(lines[sub])))
with open(fld_split + '/rc_sub%i.tsv'%sub, 'a', encoding='utf-8') as f:
f.write('\n'.join(lines[sub]))
print('extract_comments done.\n')
return m, n
def get_convo(sid, rootid, cid, submissions, comments, depth=10, txts2=[]):
#print(depth)
if depth == 0:
return []
comment = comments[cid]
pid = comment['link_id']
txts2.append(comment['body'])
#print(txts2)
for c in comments:
if pid == comments[c]['link_id']:
txts2.append(comments[c]['body'])
print(comments[c]['body'])
#print(txts2)
#if args.max_len_type == 'w' and len(c['body'].split()) > args.max_len: # len filter
#return []
#if args.max_len_type == 'c' and int(c['n_char']) > args.max_len:
#return []
return txts2
def filter_instance(src, tgt, info):
# Remove offensive words:
if args.bl_words and not args.leaves_only:
bad_words = bl_words.extract_keywords(tgt)
if bad_words:
print("skip\toffensive\t%s\t%s\tbad word(s): %s" % (info, tgt, bad_words), file=sys.stderr)
return True
# Remove empty targets:
tgttoks = tgt.split()
if len(tgttoks) <= 1: # 1 means there is only a weight, and 0 means there's a bug..
print("skip\temptytarget\t%s\t%s" % (info, tgt), file=sys.stderr)
return True
# Skip if word too long:
toolong = False
for w in tgttoks:
if len(w) > 30:
toolong = True
break
if toolong:
print("skip\tlongword\t%s\t%s\tword too long" % (info, tgt), file=sys.stderr)
return True
srctoks = src.split()
# Remove empty sources: (should probably uncomment, but left for reproducibility)
#if len(srctoks) <= 1: # 1 means there is only a weight, and 0 means there's a bug..
# print("skip\temptysource\t%s\t%s" % (info, src), file=sys.stderr)
# return True
# Remove too long turns:
nsrctgt = len(srctoks) + len(tgttoks)
if nsrctgt > 200:
print("skip\ttoolong\t%s\t%s\tsrc+tgt too long, src=[%s]" % (info, tgt, src), file=sys.stderr)
return True
# Skip turns with URLs:
srctgt = src + " " + tgt
if "__url__" in srctgt:
print("skip\turl\t%s\t%s\turl in tgt, or src =[%s]" % (info, tgt, src), file=sys.stderr)
return True
# Skip responses with meta data:
if re.search("[\[\]\(\)]", srctgt) != None:
print("skip\ttags\t%s\t%s\ttag in tgt (or src: [%s])" % (info, tgt, src), file=sys.stderr)
return True
# Skip yelling:
if re.search("[A-Z]{5,}", srctgt) != None:
print("skip\tallcaps\t%s\t%s\tall caps in tgt (or src: [%s])" % (info, tgt, src), file=sys.stderr)
return True
# Skip word repetitions:
reps = False
for i in range(2, len(tgttoks)):
if tgttoks[i-2] == tgttoks[i] and tgttoks[i-1] == tgttoks[i]:
reps = True
break
if reps:
print("skip\trepetitions\t%s\t%s\ttoo many repetitions" % (info, tgt), file=sys.stderr)
return True
return False
import praw
import codecs
import os
subreddits = os.environ['subs'].split('","')
reddit = praw.Reddit(
client_id="tc1xRzCUpCBQNg",
client_secret="YSzJ2wK4mFyhnquUEH_ILxtkxSc",
user_agent="my user agent"
)
f = codecs.open('./redditnew.txt', "a", "utf-8")
import re
def save_convo(path_rs, path_rc, path_out):
print(path_rc)
#print('reading submissions...')
submissions = dict()
with gzip.open(path_rs, mode='rt', encoding='utf-8') as f:
for line in f:
cells = line.strip('\n').strip().split('\t')
try:
submission = dict([(fields_subm[i], cells[i]) for i in range(len(fields_subm))])
except Exception:
#traceback.print_exc()
continue
submissions[get_submission_id(submission)] = submission
#print('reading comments...')
comments = dict()
with gzip.open(path_rc, mode='rt', encoding='utf-8') as f:
for line in f:
cells = line.strip('\n').strip().split('\t')
try:
comment = dict([(fields_comm[i], cells[i]) for i in range(len(fields_comm))])
except Exception:
traceback.print_exc()
continue
comments[get_comment_id(comment)] = comment
sorted_id = sorted([(
comments[cid]['link_id'],
comments[cid]['parent_id'],
cid
) for cid in comments])
n = len(comments)
print('total comments: %i'%n)
i = 0
m = 0
lines = []
sum_resp_len = 0
skip_id = {}
if args.leaves_only:
for _, pid, _ in sorted_id:
skip_id[pid] = 1
#print("leaves ratio : %f" % (len(skip_id) / len(sorted_id)), file=sys.stderr)
for sid, pid, cid in sorted_id:
i += 1
if i%1e5 == 0:
#print('selected %.2fM from %.1f/%.1fM comments'%(m/1e6, i/1e6, n/1e6), file=sys.stderr)
if len(lines) > 0:
with open(path_out, 'a', encoding="utf-8") as f:
f.write('\n'.join(lines) + '\n')
lines = []
subreddit = ''
domain = ''
if sid in submissions.keys():
subreddit = submissions[sid]['permalink'].split('/')[2].lower()
domain = submissions[sid]['domain'].lower()
info = subreddit + '\t' + domain
#if args.bl_subreddits:
# if not subreddit:
#print("skip\tmissing\t%s\tN/A\tmissing submission: %s" % (info, sid), file=sys.stderr)
# continue
# if subreddit in bl_subreddits:
#print("skip\tbad_subreddit\t%s\tN/A\toffensive subreddit: %s" % (info, subreddit), file=sys.stderr)
# continue
comment = comments[cid]
if comment['score'] == 'None':
score = 0
else:
score = int(comment['score'])
if score < args.min_score: # filter 1
#print("skip\tlow_score\t%s\t%s\tscore %d < %d" % (info, comment['body'], score, args.min_score), file=sys.stderr)
continue
txts = get_convo(sid, cid, cid, submissions, comments) # filter 2
#print(len(txts))
if len(txts) < args.min_depth: # filter 3
#print("skip\tmin_depth\t%s\t%s\tdepth %d < %d: %s" % (info, comment['body'], len(txts), args.min_depth, "|".join(txts)), file=sys.stderr)
continue
for i in range(len(txts)):
txts[i] = norm_sentence(txts[i], False)
if args.leaves_only and args.clean:
sc = '1.0'
skip_target = False
if args.discard_tgt_keys:
tgt_h = hashlib.sha224(txts[i].encode("utf-8")).hexdigest()
if tgt_h in keys_rm.keys():
skip_target = True
if bl_words.extract_keywords(txts[i]) or skip_target:
sc = '0.0'
txts[i] = sc + ' ' + txts[i]
src = ' EOS '.join(txts[:-1])
tgt = txts[-1]
header = ','.join([sid, pid, cid])
lines.append(header + '\t' + src + '\t' + tgt)
sum_resp_len += len(tgt.split())
m += 1
#avg_len = sum_resp_len/m
with open(path_out, 'a', encoding="utf-8") as f:
f.write('\n'.join(lines) + '\n')
print('finally selected %i/%i'%(m, n))#, avg_len))
return m, n, 1
import random
import threading
from time import sleep
def extract():
makedirs(fld_split)
print(threading.active_count())
for sub in subreddits:
#sleep(random.randint(0,1))
t = threading.Thread(target=extract_submissions, args=(fld_root_in, fld_split, sub,))
t.daemon = True
t.start()
done = False
while done == False:
sleep(1)
print(threading.active_count())
if threading.active_count() == 1:
done = True
#sids, ms, ns, mc, nc = extract_submissions(fld_root_in, fld_split, size=args.split_size)
#mc, nc = extract_comments(fld_root_in, fld_split, sids)
#with open(fld_split + '/stat.tsv', 'a') as f:
#f.write('\t'.join(map(str, [args.dump_name, mc, nc, ms, ns])) + '\n')
def build_conv(fld_out):
makedirs(fld_out)
path_out = fld_out + '/%s.tsv'%args.dump_name
print(path_out)
if args.parallel:
fs = open(fld_out + '/' + args.dump_name + '.stat.tsv', 'w')
else:
fs = open(fld_out + '/stat.tsv', 'a')
sub = 0
sum_m = 0
sum_n = 0
while True:
path_rs = fld_split + '/rs_sub%i.tsv.gz'%sub
if not os.path.exists(path_rs):
if sub == 0:
print('no such file: '+path_rs)
break
print('-'*10 + ' sub%i '%sub + '-'*10)
path_rc = path_rs.replace('/rs_', '/rc_')
m, n, avg_len = save_convo(path_rs, path_rc, path_out)
fs.write('\t'.join([args.dump_name, str(sub), str(m), str(n), '%.2f'%avg_len]) + '\n')
sum_m += m
sum_n += n
sub += 1
fs.write('\t'.join([args.dump_name, 'all', str(sum_m), str(sum_n), '']) + '\n')
fs.close()
def load_keys(key_file):
d = {}
with gzip.open(key_file, 'rt', encoding="utf-8") as f:
for line in f:
k = line.rstrip()
if args.task == 'conv' and k.endswith('\tdep'):
continue
d[k] = 1
return d
if args.freq_words:
with open(args.freq_words, 'rt', encoding="utf-8") as f:
n = 0
for line in f:
n += 1
w = line.rstrip().lower()
args.freq_words[w] = n
if args.bl_words:
with open(args.bl_words, 'rt', encoding="utf-8") as f:
for line in f:
if line[0] == '#':
continue
w = line.rstrip()
bl_words.add_keyword(w)
if args.bl_subreddits:
with open(args.bl_subreddits, 'rt', encoding="utf-8") as f:
for line in f:
if line[0] == '#':
continue
s = line.rstrip().lower()
bl_subreddits[s] = 1
if args.ignore_keys:
args.keep_keys = None
args.discard_tgt_keys = None
else:
if args.keep_keys:
keys = load_keys(args.keep_keys)
if args.discard_tgt_keys:
keys_rm = load_keys(args.discard_tgt_keys)
fld_root_in = args.reddit_input
fld_root_out = args.reddit_output
fld_split = fld_root_out + '/extract/%s'%(args.dump_name)
if args.task == 'extract':
extract()
elif args.task == 'conv':
fld_out = fld_root_out + '/conv'
build_conv(fld_out)
else:
print("Unknown task: %s" % args.task, file=sys.stderr)
| [
"[email protected]"
] | |
8556153eee128df8d4a4b2c68b116c9fc5edad6e | 67416177cd9e221db0b20332c02dcc7680fcdd0e | /이것이 취업을 위한 코딩 테스트다/Chapter05_DFS_BFS/Q04_S.py | 0e89d038a59dde7bce9a39cb6bf567d78488ff5a | [] | no_license | svclaw2000/Algorithm | 4fe5e3bf50888b974df4f3d87387a003b5249352 | b6d92cf0d18997e9e973d5f731ecb44a7935d93a | refs/heads/main | 2023-06-21T21:50:13.089719 | 2021-07-11T14:18:47 | 2021-07-11T14:18:47 | 363,825,838 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 739 | py | from collections import deque
N, M = map(int, input().split())
maze = [list(map(int, input())) for _ in range(N)]
ds = ((-1, 0), (0, 1), (1, 0), (0, -1))
def dfs(x, y):
queue = deque()
queue.append((x, y))
while queue:
x, y = queue.popleft()
for dx, dy in ds:
nx, ny = x + dx, y + dy
if not 0 <= nx < N or not 0 <= ny < M or maze[nx][ny] == 0: # 범위 벗어나거나 괴물이면 무시
continue
if maze[nx][ny] == 1: # 최초 방문(거리 1) 시 최단거리 저장 및 다른 곳 탐색
maze[nx][ny] = maze[x][y] + 1
queue.append((nx, ny))
return maze[-1][-1]
print(dfs(0, 0)) | [
"[email protected]"
] | |
dedc1fed02afc5cafde18f60d850ddf2a3f2c7d7 | a1cd1135cd7bc3255e29632fe6c025cffd231285 | /cluster/server_base.py | efe3f07db8830108d0bfc6e7cd4883872a50401e | [] | no_license | liguopeng80/gcommon.py27 | 5f8d3ac9fe85c7134cfbb557ec06a61184b58fd1 | 900cd0717c7a9db90793752fd5cbf9a576286497 | refs/heads/master | 2023-08-11T16:01:16.566945 | 2021-10-10T07:08:54 | 2021-10-10T07:08:54 | 404,542,040 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 9,940 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# created: 2015-05-04
import optparse
import traceback
import os
import sys
import logging
from twisted.internet import reactor
from twisted.internet.defer import inlineCallbacks, maybeDeferred
from gcommon import cluster
from gcommon.logger import log_util
from gcommon.logger import server_logger
from gcommon.cluster.cluster_manager import ClusterManager
from gcommon.cluster.server_init import create_zookeeper_service
from gcommon.cluster.zkmanager import SlimZookeeperManager, SlimHashLockManager
from gcommon.utils.rpcparams import get_rpc_param, get_rpc_routing_key
from gcommon.utils import env
from gcommon.utils import proc
from gcommon.utils.cfgfile import DefaultConfigParser
from gcommon.proto import init_thrift_protocol_stack
init_thrift_protocol_stack()
logger = logging.getLogger('server')
CONFIG_FILE_NAME = 'default.conf'
PROJECT_LOG_DIR = '../../../log/'
PROJECT_CONFIG_DIR = '../../../etc/'
ENV_CONFIG_DIR = 'SLIM_CONFIG_DIR'
ENV_LOG_DIR = 'SLIM_LOG_DIR'
def get_log_folder(options):
"""返回当前服务器的 log 目录。如果目录不存在则创建之。"""
if options.log_folder:
log_base = options.log_folder
else:
log_base = env.get_env(ENV_LOG_DIR)
if not log_base:
log_base = env.get_relative_folder(__file__, PROJECT_LOG_DIR)
log_folder = os.path.join(log_base, options.service, '%s' % options.instance)
# create if the log folder is not existed
if not os.path.isdir(log_folder):
os.makedirs(log_folder)
return log_folder
def get_config_file(options):
"""配置文件,优先顺序(配置参数,环境变量,工程目录)"""
if options.config_file:
return options.config_file
config_dir = env.get_env(ENV_CONFIG_DIR)
if config_dir:
return os.path.join(config_dir, CONFIG_FILE_NAME)
project_cfg_dir = env.get_relative_folder(__file__, PROJECT_CONFIG_DIR)
return os.path.join(project_cfg_dir, CONFIG_FILE_NAME)
def parse_command_line(service_name, parser, all_args):
"""解析命令行参数。"""
# set usage
usage_text = """Start %(service)s server.
%(app)s [-c override_config_file] [-i instance] [-l log_folder] [-sid service_id]"""
usage_param = {
'app': all_args[0],
'service': service_name,
}
print usage_param
parser.set_usage(usage_text % usage_param)
# add arguments
parser.add_option('-c', '--config-file', dest='config_file',
action='store', default='', help='server config file')
parser.add_option('-s', '--service', dest='service',
action='store', default='', help='service name')
parser.add_option('-i', '--instance', dest='instance',
action='store', default=0, help='instance sequence')
parser.add_option('-l', '--log-folder', dest='log_folder',
action='store', default='', help='log folder')
parser.add_option('--sid', dest='service_id',
action='store', default='', help='service ID')
parser.add_option('-d', '--debug', dest='debug',
action='store_true', default=False, help='enable debug')
# parse command
all_args = all_args[1:]
return parser.parse_args(all_args)
class SlimServer(object):
STATUS_CONTROLLER_CLASS = None
controller = None
SERVICE_NAME = 'undefined'
INSTANCE = 0
VERSION = 'undefined'
DEFAULT_CONFIG = {}
def init_server(self):
"""初始化服务器"""
pass
@inlineCallbacks
def start_server(self):
"""启动服务器"""
raise NotImplementedError('for sub-class')
def _get_service_specific_confg(self):
"""服务器特定的配置参数"""
return None
def __init__(self):
self.options = None
self.args = None
self.config_file = ''
self.log_dir = ''
self.cfg = DefaultConfigParser(self.DEFAULT_CONFIG)
# 解析命令行
parser = optparse.OptionParser()
options, args = parse_command_line(self.SERVICE_NAME, parser, sys.argv)
self.options, self.args = options, args
self.verify_command_line(parser)
# 初始化 logger
self.init_logger()
# 加载配置项
self.load_server_config()
self.full_server_name = proc.get_process_id(self.SERVICE_NAME, int(self.options.instance))
self.unique_server_name = proc.get_process_unique_id(self.SERVICE_NAME, int(self.options.instance))
def _init_controller(self):
if self._is_zookeeper_enabled_in_cfg() and self.STATUS_CONTROLLER_CLASS:
# todo: load init status from config file
# todo: client failover and server failover
self.controller = self.STATUS_CONTROLLER_CLASS(self)
self.controller.subscribe(self._on_server_status_changed)
cluster.Failover_Enabled = True
else:
self.STATUS_CONTROLLER_CLASS = None
cluster.Failover_Enabled = False
def _is_zookeeper_enabled(self):
"""应用服务器支持 zookeeper"""
return self._is_zookeeper_enabled_in_cfg() and self._is_zookeeper_enabled_on_server()
def _is_zookeeper_enabled_on_server(self):
"""应用服务器支持 zookeeper"""
return self.STATUS_CONTROLLER_CLASS is not None
def _is_zookeeper_enabled_in_cfg(self):
"""部署环境支持 zookeeper"""
return self.cfg.get_bool('zookeeper.enabled')
def is_failover_enabled(self):
return self._is_zookeeper_enabled()
def is_my_resource(self, key):
if not self._is_zookeeper_enabled():
# for local debug
return True
else:
return self._hl_manager.is_my_resource(key)
def is_running(self):
"""服务是否正在运行"""
if self._is_zookeeper_enabled():
return self.controller.is_running()
else:
# 没有状态控制类的服务总是处于运行状态
return True
def _on_server_status_changed(self, _controller):
"""服务器状态改变(停止/运行)"""
# raise NotImplementedError('for sub-class')
pass
def verify_command_line(self, parser):
# if self.args:
# parser.error('No arguments needed.')
if self.options.service:
if self.options.service != self.SERVICE_NAME:
parser.error('bad service name. expected: %s, got: %s.'
% (self.SERVICE_NAME, self.options.service))
else:
self.options.service = self.SERVICE_NAME
if not self.options.instance:
self.options.instance = self.INSTANCE
pass
def load_server_config(self):
self.config_file = get_config_file(self.options)
params = self.get_config_params()
if self.config_file:
self.cfg.read(self.config_file, params)
def init_logger(self):
log_folder = get_log_folder(self.options)
# TODO: stdio_handler should be False in production environment
server_logger.init_logger(log_folder, add_stdio_handler=True)
def get_config_params(self):
cfg_root = env.get_folder(self.config_file)
service_config = self._get_service_specific_confg()
params = {
'SERVICE': self.options.service,
'INSTANCE': self.options.instance,
'CFGROOT': cfg_root,
}
if service_config:
params.update(service_config)
return params
def main(self):
# 如果需要,启动 controller
self._init_controller()
ClusterManager.reg_app_server(self)
# 打印服务器启动信息
log_util.log_server_started(logger, self.SERVICE_NAME, self.VERSION)
reactor.callLater(0, self._service_main)
if self.controller:
reactor.callLater(0, self.controller.start)
reactor.run()
@inlineCallbacks
def _service_main(self):
def __error_back(failure):
stack = ''.join(traceback.format_tb(failure.getTracebackObject()))
logger.error('failure: \n%s', stack)
return failure
try:
d = maybeDeferred(self._service_main_with_exception)
d.addErrback(__error_back)
yield d
except Exception, e:
logger.error('server exception: %s', e)
reactor.stop()
def _init_zookeeper_client(self):
# init zookeeper client
self.zk_service = create_zookeeper_service()
zk_manager = SlimZookeeperManager(self.controller, self.zk_service)
zk_manager.start()
# todo: remove testing code
test_hash_lock = False
if test_hash_lock:
from zkhashlock import HashLockObserver
self.__class__ = type(self.__class__.__name__, (self.__class__, HashLockObserver), {})
self.start_hash_lock(self)
def start_hash_lock(self, observer):
"""
:type observer: HashLockObserver
"""
assert self._is_zookeeper_enabled()
self._hl_manager = SlimHashLockManager(self.controller, self.zk_service)
self._hl_manager.set_observer(observer)
self._hl_manager.start(default_service=False)
@inlineCallbacks
def _service_main_with_exception(self):
if self._is_zookeeper_enabled():
self._init_zookeeper_client()
yield maybeDeferred(self.init_server)
yield maybeDeferred(self.start_server)
logger.debug('--------- STARTED ---------')
def get_routine_key(self):
if cluster.Failover_Enabled:
key = get_rpc_routing_key(self.cfg, self.unique_server_name)
else:
key = get_rpc_param(self.cfg, 'server_key', self.SERVICE_NAME)
return key
| [
"[email protected]"
] | |
4ce810b7a98c0b77591ee5e277abc2db6860b0c2 | 556db265723b0cc30ad2917442ed6dad92fd9044 | /tensorflow/python/profiler/profiler_v2_test.py | 42fbeba1e98be36f2c2c401a3db60bfbe1f1b5a8 | [
"MIT",
"Apache-2.0",
"BSD-2-Clause"
] | permissive | graphcore/tensorflow | c1669b489be0e045b3ec856b311b3139858de196 | 085b20a4b6287eff8c0b792425d52422ab8cbab3 | refs/heads/r2.6/sdk-release-3.2 | 2023-07-06T06:23:53.857743 | 2023-03-14T13:04:04 | 2023-03-14T13:48:43 | 162,717,602 | 84 | 17 | Apache-2.0 | 2023-03-25T01:13:37 | 2018-12-21T13:30:38 | C++ | UTF-8 | Python | false | false | 4,412 | py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf 2.x profiler."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import socket
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import errors
from tensorflow.python.framework import test_util
from tensorflow.python.platform import gfile
from tensorflow.python.profiler import profiler_v2 as profiler
from tensorflow.python.profiler import trace
class ProfilerTest(test_util.TensorFlowTestCase):
def test_profile_exceptions(self):
logdir = self.get_temp_dir()
profiler.start(logdir)
with self.assertRaises(errors.AlreadyExistsError):
profiler.start(logdir)
profiler.stop()
with self.assertRaises(errors.UnavailableError):
profiler.stop()
# Test with a bad logdir, and it correctly raises exception and deletes
# profiler.
# pylint: disable=anomalous-backslash-in-string
profiler.start('/\/\/:123')
# pylint: enable=anomalous-backslash-in-string
with self.assertRaises(Exception):
profiler.stop()
profiler.start(logdir)
profiler.stop()
def test_save_profile(self):
logdir = self.get_temp_dir()
profiler.start(logdir)
with trace.Trace('three_times_five'):
three = constant_op.constant(3)
five = constant_op.constant(5)
product = three * five
self.assertAllEqual(15, product)
profiler.stop()
file_list = gfile.ListDirectory(logdir)
self.assertEqual(len(file_list), 2)
for file_name in gfile.ListDirectory(logdir):
if gfile.IsDirectory(os.path.join(logdir, file_name)):
self.assertEqual(file_name, 'plugins')
else:
self.assertTrue(file_name.endswith('.profile-empty'))
profile_dir = os.path.join(logdir, 'plugins', 'profile')
run = gfile.ListDirectory(profile_dir)[0]
hostname = socket.gethostname()
overview_page = os.path.join(profile_dir, run,
hostname + '.overview_page.pb')
self.assertTrue(gfile.Exists(overview_page))
input_pipeline = os.path.join(profile_dir, run,
hostname + '.input_pipeline.pb')
self.assertTrue(gfile.Exists(input_pipeline))
tensorflow_stats = os.path.join(profile_dir, run,
hostname + '.tensorflow_stats.pb')
self.assertTrue(gfile.Exists(tensorflow_stats))
kernel_stats = os.path.join(profile_dir, run, hostname + '.kernel_stats.pb')
self.assertTrue(gfile.Exists(kernel_stats))
trace_file = os.path.join(profile_dir, run, hostname + '.trace.json.gz')
self.assertTrue(gfile.Exists(trace_file))
def test_profile_with_options(self):
logdir = self.get_temp_dir()
options = profiler.ProfilerOptions(
host_tracer_level=3, python_tracer_level=1)
profiler.start(logdir, options)
with trace.Trace('three_times_five'):
three = constant_op.constant(3)
five = constant_op.constant(5)
product = three * five
self.assertAllEqual(15, product)
profiler.stop()
file_list = gfile.ListDirectory(logdir)
self.assertEqual(len(file_list), 2)
def test_context_manager_with_options(self):
logdir = self.get_temp_dir()
options = profiler.ProfilerOptions(
host_tracer_level=3, python_tracer_level=1)
with profiler.Profile(logdir, options):
with trace.Trace('three_times_five'):
three = constant_op.constant(3)
five = constant_op.constant(5)
product = three * five
self.assertAllEqual(15, product)
file_list = gfile.ListDirectory(logdir)
self.assertEqual(len(file_list), 2)
if __name__ == '__main__':
test.main()
| [
"[email protected]"
] | |
2a812d45d9da8870b85821736661cd9b51ad3c61 | c7cebec6209866b02ee654cffeafe0f2cf0646f1 | /implementation/oceangame.py | 8c2fcb4e93ffdc7cf0b2c1af8292a4326c4b7a2d | [] | no_license | dondon17/algorithm | 5492cf039a96ecf5a944816bdca9b5755e5a2623 | da4d6ca1c21c31c6521a62b38855e0b9cf4b0d91 | refs/heads/master | 2023-05-02T14:54:35.185914 | 2021-05-30T07:31:40 | 2021-05-30T07:31:40 | 323,802,402 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 941 | py | n, m = map(int, input().split())
x, y, _dir = map(int, input().split())
check = [[0]*m for _ in range(n)] # 0으로 채워진 n * m 행렬 생성
check[x][y] = 1 # 시작 위치 방문 처리
_map = []
for i in range(n):
_map.append(list(map(int, input().split())))
count = 1
turn_time = 0 # 4방이 이미 방문한 곳이거나 바다인 경우를 체크하기 위함
dx = [-1, 0, 1, 0]
dy = [0, 1, 0, -1]
def turnleft():
global _dir
_dir -= 1
if _dir == -1:
_dir = 3
while True:
turnleft()
nx = x + dx[_dir]
ny = y + dy[_dir]
if check[nx][ny] == 0 and _map[nx][ny] == 0:
check[nx][ny] = 1
x, y = nx, ny
count += 1
turn_time = 0
continue
else:
turn_time+=1
if turn_time == 4:
nx = x-dx[_dir]
ny = y-dy[_dir]
if _map[nx][ny] == 0:
x, y = nx, ny
else: break
turn_time = 0
print(count) | [
"[email protected]"
] | |
f45b44a0ce075baa3867c855c1d857223d4631c4 | ef4a1748a5bfb5d02f29390d6a66f4a01643401c | /algorithm/algorithm_week/week3/problem_3.py | 1dfac10225edd5d46d7a5f1b0aee2c62ba86901b | [] | no_license | websvey1/TIL | aa86c1b31d3efc177df45503d705b3e58b800f8e | 189e797ba44e2fd22a033d1024633f9e0128d5cf | refs/heads/master | 2023-01-12T10:23:45.677578 | 2019-12-09T07:26:59 | 2019-12-09T07:26:59 | 162,102,142 | 0 | 1 | null | 2022-12-11T16:31:08 | 2018-12-17T08:57:58 | Python | UTF-8 | Python | false | false | 498 | py | import sys
sys.stdin = open("problem_3.txt", "r")
T = int(input())
for tc in range(1, T+1):
comp = input()
total = input()
len_comp = len(comp)
len_total = len(total)
empty_list = [0] * len_comp
result = 0
# print(empty_list)
for i in range(len_comp):
for j in range(len_total):
if total[j] == comp[i]:
empty_list[i] += 1
result = max(empty_list)
print(f'#{tc} {result}')
##################### dict 사용해보기
| [
"[email protected]"
] | |
d4263f84ee75cfae0c1c0448bd6e638c64abaaec | f68e0b205bd3eb036905c60bd03a8d9c7f3b1d88 | /gluon-tutorials-zh-master/chapter_optimization/adagrad-gluon.py | 9fe3fd6be483681cf7e1719444f2b19373cdcb0f | [
"Apache-2.0"
] | permissive | SleepyBag/TrivialPractice | c31458d0c28afba158cb4090cb7013267ff54bb2 | 8e006fbe1425f62b52b2a5fe5b6404ea1883f3ab | refs/heads/master | 2020-03-22T00:34:37.415074 | 2018-06-30T14:02:04 | 2018-06-30T14:02:04 | 139,253,389 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,188 | py | import mxnet as mx
from mxnet import gluon, nd
from mxnet.gluon import nn
import sys
sys.path.append('..')
import utils
# 生成数据集。
num_inputs = 2
num_examples = 1000
true_w = [2, -3.4]
true_b = 4.2
features = nd.random.normal(scale=1, shape=(num_examples, num_inputs))
labels = true_w[0] * features[:, 0] + true_w[1] * features[:, 1] + true_b
labels += nd.random.normal(scale=0.01, shape=labels.shape)
# 线性回归模型。
net = nn.Sequential()
net.add(nn.Dense(1))
learning_rate = .01
net.collect_params().initialize(mx.init.Normal(sigma=1), force_reinit=True)
trainer = gluon.Trainer(net.collect_params(), 'sgd',
{'learning_rate': learning_rate})
utils.optimize(batch_size=10, trainer=trainer, num_epochs=5, decay_epoch=None,
log_interval=10, features=features, labels=labels, net=net)
net.collect_params().initialize(mx.init.Normal(sigma=1), force_reinit=True)
trainer = gluon.Trainer(net.collect_params(), 'adagrad',
{'learning_rate': learning_rate})
utils.optimize(batch_size=10, trainer=trainer, num_epochs=5, decay_epoch=None,
log_interval=10, features=features, labels=labels, net=net)
| [
"[email protected]"
] | |
86fee19decbe6fbd99256621b5d77459a4c80b51 | a6cc157fdd1a15e9d451af653cf3eadbdac60885 | /cpp_develop/catkin_ws/src/ros_arduino_bridge/ros_arduino_python/src/ros_arduino_python/calibrate_linear.py | 3ab1f901ef9a959a737978f37d4c3bb5cebabeba | [] | no_license | miaoruonan/morn | 9b4f0b64241c12140e8adc571579974d9e35a14b | 88e353ce480265b0b0b12f22a67ce13dd2ff42f3 | refs/heads/master | 2021-06-26T05:18:58.321932 | 2021-01-28T07:21:51 | 2021-01-28T07:25:16 | 214,172,876 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,792 | py | #!/usr/bin/env python
import rospy
from geometry_msgs.msg import Twist, Point
from math import copysign, sqrt, pow
import tf
class CalibrateLinear():
def __init__(self):
#give the node a name
rospy.init_node('calibrate_linear', anonymous=False)
#set rospy to execute a shutdown function when terminating the script
rospy.on_shutdown(self.shutdown)
#How fast will we check the odometry values?
self.rate = 10
r = rospy.Rate(self.rate)
#set the distance to travel
self.test_distance = 1.5
self.speed = 0.2
self.tolerance = 0.01
self.odom_linear_scale_correction = 1.0
self.start_test = True
#Publisher to control the robot's speed
self.cmd_vel = rospy.Publisher('/cmd_vel', Twist, queue_size=5)
#The base frame is base_footprint for the robot
self.base_frame = rospy.get_param('~base_frame', '/base_footprint')
#The odom frame is usually just /odom
self.odom_frame = rospy.get_param('~odom_frame', '/odom')
#initialize the tf listener
self.tf_listener = tf.TransformListener()
#give tf some time to fill its buffer
rospy.sleep(2)
#make sure we see the odom and base frames
self.tf_listener.waitForTransform(self.odom_frame, self.base_frame, rospy.Time(), rospy.Duration(60.0))
self.position = Point()
#get the starting position from the tf transform between the odom and base frames
self.position = self.get_position()
x_start = self.position.x
y_start = self.position.y
move_cmd = Twist()
while not rospy.is_shutdown():
#Stop the robot by default
move_cmd = Twist()
if self.start_test:
#get the current position from the tf transform between the odom and base frames
self.position = self.get_position()
#compute the euclidean distance from the target point
distance = sqrt(pow((self.position.x - x_start), 2) +
pow((self.position.y - y_start), 2))
#correct the estimate distance by the correction factor
distance *= self.odom_linear_scale_correction
#How close are we?
error = distance - self.test_distance
#are we close enough?
if not self.start_test or abs(error) < self.tolerance:
self.start_test = False
params = False
rospy.loginfo(params)
else:
#if not, move in the appropriate direction
move_cmd.linear.x = copysign(self.speed, -1*error)
else:
self.position = self.get_position()
x_start = self.position.x
y_start = self.position.y
self.cmd_vel.publish(move_cmd)
r.sleep()
#stop the robot
self.cmd_vel.publish(Twist())
def get_position(self):
#get the current transform between the odom and base frames
try:
(trans, rot) = self.tf_listener.lookupTransform(self.odom_frame, self.base_frame, rospy.Time(0))
except (tf.Exception, tf.ConnectivityException, tf.LookupException):
rospy.loginfo("TF exception")
return
return Point(*trans)
def shutdown(self):
#Always stop the robot when shutting down the node
rospy.loginfo("Stopping the robot")
self.cmd_vel.publish(Twist())
rospy.sleep(1)
if __name__ == '__main__':
try:
CalibrateLinear()
rospy.spin()
except:
rospy.loginfo("Calibration terminated.")
| [
"[email protected]"
] | |
7ffae845f088fb2c95c7c37d9f6e0559af611adc | 6d9fbe6e6a2abfd8455e92f6dba67a5f02d87f41 | /lib/phonenumbers/data/region_AC.py | 243d707a8213496a114091c66baa4551350d4629 | [] | no_license | JamesBrace/InfluenceUWebLaunch | 549d0b48ff3259b139cb891a19cb8b5382ffe2c8 | 332d25940e4b1b45a7a2a8200f77c8413543b199 | refs/heads/master | 2021-09-04T04:08:47.594900 | 2018-01-15T16:49:29 | 2018-01-15T16:49:29 | 80,778,825 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,094 | py | """Auto-generated file, do not edit by hand. AC metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_AC = PhoneMetadata(id='AC', country_code=247, international_prefix='00',
general_desc=PhoneNumberDesc(national_number_pattern='[46]\\d{4}|[01589]\\d{5}', possible_number_pattern='\\d{5,6}', possible_length=(5, 6)),
fixed_line=PhoneNumberDesc(national_number_pattern='6[2-467]\\d{3}', possible_number_pattern='\\d{5}', example_number='62889', possible_length=(5,)),
mobile=PhoneNumberDesc(national_number_pattern='4\\d{4}', possible_number_pattern='\\d{5}', example_number='40123', possible_length=(5,)),
toll_free=PhoneNumberDesc(),
premium_rate=PhoneNumberDesc(),
shared_cost=PhoneNumberDesc(),
personal_number=PhoneNumberDesc(),
voip=PhoneNumberDesc(),
pager=PhoneNumberDesc(),
uan=PhoneNumberDesc(national_number_pattern='[01589]\\d{5}', possible_number_pattern='\\d{6}', example_number='542011', possible_length=(6,)),
voicemail=PhoneNumberDesc(),
no_international_dialling=PhoneNumberDesc())
| [
"[email protected]"
] | |
5f751d070b7b8f37c7891268731da16b714824f3 | 066286643b062e681e3f35c83ac8fa9187a402db | /Section4/py_project/Lib/site-packages/django/core/management/commands/dumpdata.py | 31df5ac2440c294424bf6015d86f4adb5b3e1751 | [
"MIT"
] | permissive | PacktPublishing/Real-World-Projects-in-Python-3.x | fd4664ac70e67a922eab422a6c339d65ab1aee90 | ac1cddb400e4e8b9bad3eea0eac3c33f06867d7a | refs/heads/master | 2023-02-13T10:21:47.780189 | 2023-01-30T08:40:38 | 2023-01-30T08:40:38 | 183,572,451 | 18 | 9 | MIT | 2022-12-13T02:26:39 | 2019-04-26T06:40:50 | Python | UTF-8 | Python | false | false | 8,479 | py | import warnings
from collections import OrderedDict
from django.apps import apps
from django.core import serializers
from django.core.management.base import BaseCommand, CommandError
from django.core.management.utils import parse_apps_and_model_labels
from django.db import DEFAULT_DB_ALIAS, router
class ProxyModelWarning(Warning):
pass
class Command(BaseCommand):
help = (
"Output the contents of the database as a fixture of the given format "
"(using each model's default manager unless --all is specified)."
)
def add_arguments(self, parser):
parser.add_argument(
'args', metavar='app_label[.ModelName]', nargs='*',
help='Restricts dumped data to the specified app_label or app_label.ModelName.',
)
parser.add_argument(
'--format', default='json',
help='Specifies the output serialization format for fixtures.',
)
parser.add_argument(
'--indent', type=int,
help='Specifies the indent level to use when pretty-printing output.',
)
parser.add_argument(
'--database',
default=DEFAULT_DB_ALIAS,
help='Nominates a specific database to dump fixtures from. '
'Defaults to the "default" database.',
)
parser.add_argument(
'-e', '--exclude', action='append', default=[],
help='An app_label or app_label.ModelName to exclude '
'(use multiple --exclude to exclude multiple apps/models).',
)
parser.add_argument(
'--natural-foreign', action='store_true', dest='use_natural_foreign_keys',
help='Use natural foreign keys if they are available.',
)
parser.add_argument(
'--natural-primary', action='store_true', dest='use_natural_primary_keys',
help='Use natural primary keys if they are available.',
)
parser.add_argument(
'-a', '--all', action='store_true', dest='use_base_manager',
help="Use Django's base manager to dump all models stored in the database, "
"including those that would otherwise be filtered or modified by a custom manager.",
)
parser.add_argument(
'--pks', dest='primary_keys',
help="Only dump objects with given primary keys. Accepts a comma-separated "
"list of keys. This option only works when you specify one model.",
)
parser.add_argument(
'-o', '--output',
help='Specifies file to which the output is written.'
)
def handle(self, *app_labels, **options):
format = options['format']
indent = options['indent']
using = options['database']
excludes = options['exclude']
output = options['output']
show_traceback = options['traceback']
use_natural_foreign_keys = options['use_natural_foreign_keys']
use_natural_primary_keys = options['use_natural_primary_keys']
use_base_manager = options['use_base_manager']
pks = options['primary_keys']
if pks:
primary_keys = [pk.strip() for pk in pks.split(',')]
else:
primary_keys = []
excluded_models, excluded_apps = parse_apps_and_model_labels(excludes)
if not app_labels:
if primary_keys:
raise CommandError("You can only use --pks option with one model")
app_list = OrderedDict.fromkeys(
app_config for app_config in apps.get_app_configs()
if app_config.models_module is not None and app_config not in excluded_apps
)
else:
if len(app_labels) > 1 and primary_keys:
raise CommandError("You can only use --pks option with one model")
app_list = OrderedDict()
for label in app_labels:
try:
app_label, model_label = label.split('.')
try:
app_config = apps.get_app_config(app_label)
except LookupError as e:
raise CommandError(str(e))
if app_config.models_module is None or app_config in excluded_apps:
continue
try:
model = app_config.get_model(model_label)
except LookupError:
raise CommandError("Unknown model: %s.%s" % (app_label, model_label))
app_list_value = app_list.setdefault(app_config, [])
# We may have previously seen a "all-models" request for
# this app (no model qualifier was given). In this case
# there is no need adding specific models to the list.
if app_list_value is not None:
if model not in app_list_value:
app_list_value.append(model)
except ValueError:
if primary_keys:
raise CommandError("You can only use --pks option with one model")
# This is just an app - no model qualifier
app_label = label
try:
app_config = apps.get_app_config(app_label)
except LookupError as e:
raise CommandError(str(e))
if app_config.models_module is None or app_config in excluded_apps:
continue
app_list[app_config] = None
# Check that the serialization format exists; this is a shortcut to
# avoid collating all the objects and _then_ failing.
if format not in serializers.get_public_serializer_formats():
try:
serializers.get_serializer(format)
except serializers.SerializerDoesNotExist:
pass
raise CommandError("Unknown serialization format: %s" % format)
def get_objects(count_only=False):
"""
Collate the objects to be serialized. If count_only is True, just
count the number of objects to be serialized.
"""
models = serializers.sort_dependencies(app_list.items())
for model in models:
if model in excluded_models:
continue
if model._meta.proxy and model._meta.proxy_for_model not in models:
warnings.warn(
"%s is a proxy model and won't be serialized." % model._meta.label,
category=ProxyModelWarning,
)
if not model._meta.proxy and router.allow_migrate_model(using, model):
if use_base_manager:
objects = model._base_manager
else:
objects = model._default_manager
queryset = objects.using(using).order_by(model._meta.pk.name)
if primary_keys:
queryset = queryset.filter(pk__in=primary_keys)
if count_only:
yield queryset.order_by().count()
else:
yield from queryset.iterator()
try:
self.stdout.ending = None
progress_output = None
object_count = 0
# If dumpdata is outputting to stdout, there is no way to display progress
if output and self.stdout.isatty() and options['verbosity'] > 0:
progress_output = self.stdout
object_count = sum(get_objects(count_only=True))
stream = open(output, 'w') if output else None
try:
serializers.serialize(
format, get_objects(), indent=indent,
use_natural_foreign_keys=use_natural_foreign_keys,
use_natural_primary_keys=use_natural_primary_keys,
stream=stream or self.stdout, progress_output=progress_output,
object_count=object_count,
)
finally:
if stream:
stream.close()
except Exception as e:
if show_traceback:
raise
raise CommandError("Unable to serialize database: %s" % e)
| [
"[email protected]"
] | |
9f628a3fcb3ba15724f1abcb004ff2ed34f398a2 | ad13583673551857615498b9605d9dcab63bb2c3 | /output/instances/nistData/atomic/nonNegativeInteger/Schema+Instance/NISTXML-SV-IV-atomic-nonNegativeInteger-enumeration-3-2.py | b576ac7679de66f6d806e4fa821974abf9447594 | [
"MIT"
] | permissive | tefra/xsdata-w3c-tests | 397180205a735b06170aa188f1f39451d2089815 | 081d0908382a0e0b29c8ee9caca6f1c0e36dd6db | refs/heads/main | 2023-08-03T04:25:37.841917 | 2023-07-29T17:10:13 | 2023-07-30T12:11:13 | 239,622,251 | 2 | 0 | MIT | 2023-07-25T14:19:04 | 2020-02-10T21:59:47 | Python | UTF-8 | Python | false | false | 648 | py | from output.models.nist_data.atomic.non_negative_integer.schema_instance.nistschema_sv_iv_atomic_non_negative_integer_enumeration_3_xsd.nistschema_sv_iv_atomic_non_negative_integer_enumeration_3 import NistschemaSvIvAtomicNonNegativeIntegerEnumeration3
from output.models.nist_data.atomic.non_negative_integer.schema_instance.nistschema_sv_iv_atomic_non_negative_integer_enumeration_3_xsd.nistschema_sv_iv_atomic_non_negative_integer_enumeration_3 import NistschemaSvIvAtomicNonNegativeIntegerEnumeration3Type
obj = NistschemaSvIvAtomicNonNegativeIntegerEnumeration3(
value=NistschemaSvIvAtomicNonNegativeIntegerEnumeration3Type.VALUE_9176
)
| [
"[email protected]"
] | |
52164c85a80608ac931cec19edf6440abf757d17 | e48a43af1b285f19137cf1c839ea5836312c6793 | /toutiao.py | f330e711018b71ad4b9619c8c133b9cad3456107 | [] | no_license | willfengis/toutiao | 5f933aadebd6a24d5f4a69c162a61efb4dcd68d0 | 10fc31a46368f9cc08e5889f4a644a2e0ffc7028 | refs/heads/master | 2021-01-21T10:47:17.788795 | 2017-08-31T10:09:27 | 2017-08-31T10:09:27 | 101,987,700 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,909 | py | import requests
from urllib.parse import urlencode
from requests.exceptions import RequestException
import json
from bs4 import BeautifulSoup
import re
import pymongo
import os
from multiprocessing import Pool
from hashlib import md5
from com.it.ttconfig import *
client = pymongo.MongoClient(MONGO_URL,connect=False)
db = client[MONGO_DB]
def getHtml(Url):
try:
Myhtml = requests.get(Url,headers=Myheader)
if Myhtml.status_code == 200:
return Myhtml.text
return "getHtmlerror"
except RequestException:
return "getHtmlerror"
def getStr(Myhtml):
Data = json.loads(Myhtml)
if Data and "data" in Data.keys():
for url1 in Data.get("data"):
yield url1.get("share_url")
def getDetail(Url1):
try:
Myhtml = requests.get(Url1,headers=Myheader)
if Myhtml.status_code == 200:
return Myhtml.text
return "getDetailHtmlerror"
except RequestException:
print("Url1 error")
return "getDetailHtmlerror"
def getStr2(Myhtml2,Url1):
Bs4html = BeautifulSoup(Myhtml2,"lxml")
Mytitle = Bs4html.select("title")[0].get_text()
Myrule = re.compile("BASE_DATA.galleryInfo.*?gallery:(.*?),\n\s*?siblingList",re.S)
Mystr2 = re.search(Myrule,Myhtml2)
if Mystr2:
json_str2 = json.loads(Mystr2.group(1))
if json_str2 and "sub_images" in json_str2.keys():
sub_image = json_str2.get("sub_images")
image = [x.get("url") for x in sub_image]
for imageurlone in image:downLoad(imageurlone)
return {"title":Mytitle,"imageurl":image,"Url":Url1}
return "url2error"
return "url2error"
def saveMongo(imageurl):
if db[MONGO_TABLE].insert(imageurl):
print("url save to mongodb ok")
return True
return False
def downLoad(imageurlone):
try:
Myhtml = requests.get(imageurlone,headers=Myheader)
if Myhtml.status_code == 200:
saveImage(Myhtml.content)
return "getHtmlerror"
except RequestException:
print("downloadImagError")
return "getHtmlerror"
def saveImage(content):
path = "{0}/{1}.{2}".format(os.getcwd()+"/image",md5(content).hexdigest(),"jpg")
if not os.path.exists(path):
with open(path,"wb") as f:
f.write(content)
f.close()
print("downimage successful:"+ path)
def main(Offset):
Data = {'offset': Offset, 'format': 'json', 'keyword': Find, 'autoload': 'true', 'count': '20', 'cur_tab': '1'}
Url = "http://www.toutiao.com/search_content/?" + urlencode(Data)
Myhtml = getHtml(Url)
for Url1 in getStr(Myhtml):
Myhtml2 = getDetail(Url1)
imageurl = getStr2(Myhtml2,Url1)
if imageurl != "url2error":
saveMongo(imageurl)
if __name__ == "__main__":
page = [i*20 for i in range(0,6)]
pool = Pool()
pool.map(main,page)
| [
"[email protected]"
] | |
651465018b3370f246d12c0b45dead9a006898ac | 5488617b1b05c436b1f8c8642ea75ca754719f8d | /phenomenological/Single_TOP/select_scripts/script/script_1065.py | 07afd7a1b201fed85dd725acf70a9ea4ba7a34d7 | [] | no_license | wenxingfang/TW_Top | fdb1ba136be6ace8fdacaade58cb4ca4fcdc3c9e | 389e76c904d08a59d9141b9b66ec15d2583f8e9a | refs/heads/master | 2021-02-05T06:54:27.908688 | 2020-02-28T13:24:00 | 2020-02-28T13:24:00 | 243,754,087 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 231 | py | import os
import ROOT
ROOT.gSystem.Load("/user/wenxing/ST_TW_channel/CMSSW_8_0_25/src/Phynomenological_study/Single_TOP/select_scripts/select_save_parton_C.so")
ROOT.gROOT.ProcessLine('select_save_parton("1065")')
print 'Done!' | [
"[email protected]"
] | |
781541a1f0b86a43ce728e170b53055a99749d93 | 12123592a54c4f292ed6a8df4bcc0df33e082206 | /py2/pgms/sec6/flask/db_create.py | 667c9a87acb3e16e1bbb26111a89d32eaae62ba8 | [] | no_license | alvinooo/advpython | b44b7322915f832c8dce72fe63ae6ac7c99ef3d4 | df95e06fd7ba11b0d2329f4b113863a9c866fbae | refs/heads/master | 2021-01-23T01:17:22.487514 | 2017-05-30T17:51:47 | 2017-05-30T17:51:47 | 92,860,630 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 525 | py | #!venv/bin/python
# db_create.py - create database
from migrate.versioning import api
from config import SQLALCHEMY_DATABASE_URI
from config import SQLALCHEMY_MIGRATE_REPO
from app import db
import os.path
db.create_all()
if not os.path.exists(SQLALCHEMY_MIGRATE_REPO):
api.create(SQLALCHEMY_MIGRATE_REPO, 'database repository')
api.version_control(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)
else:
api.version_control(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO, api.version(SQLALCHEMY_MIGRATE_REPO))
| [
"[email protected]"
] | |
f1b609c2eddd00d477feba9484dbec32eaf69cf5 | 6b4ab6543a3ead51a4b26dd750b01925c6a59a81 | /rawe/newton/nmpcMaps.py | 0115715b43911eb057a78714303a19406993d614 | [] | no_license | jaeandersson/rawesome | f5139f255d8de90467329e3d0d35b9f32e5d459b | 67e8eefd6eedd9df563084c9a3238733cfe21a47 | refs/heads/master | 2021-01-18T10:30:55.579069 | 2013-04-04T11:06:38 | 2013-04-04T11:06:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,039 | py | import numpy as np
import casadi as C
class VectorizedReadOnlyNmpcMap(object):
"""
Initialize this with a vector (like MX or numpy.array)
and it will provide efficient slices with xVec/uVec/pVec.
It will also provide lookup(name,timestep) functionality
"""
def __init__(self,dae,nk,vec):
self._nk = nk
self._xNames = dae.xNames()
self._uNames = dae.uNames()
self._pNames = dae.pNames()
self._vec = vec
xSize = len(self._xNames)
uSize = len(self._uNames)
pSize = len(self._pNames)
mapSize = xSize*(self._nk+1) + uSize*self._nk + pSize
if type(self._vec) in [C.MX,C.SXMatrix]:
assert (mapSize == self._vec.size()), "vector size is wrong"
elif type(self._vec) in [np.array,np.ndarray]:
assert (mapSize == self._vec.size), "vector size is wrong"
else:
raise ValueError("unrecognized type: "+str(type(self._vec)))
# set up xVec,uVec,pVec
vecIdx = 0
self._p = self._vec[vecIdx:vecIdx+pSize]
vecIdx += pSize
self._X = []
self._U = []
for ts in range(self._nk):
self._X.append(self._vec[vecIdx:vecIdx+xSize])
vecIdx += xSize
self._U.append(self._vec[vecIdx:vecIdx+uSize])
vecIdx += uSize
self._X.append(self._vec[vecIdx:vecIdx+xSize])
vecIdx += xSize
assert (vecIdx == mapSize)
# set up indexes
self._xIdx = {}
self._uIdx = {}
self._pIdx = {}
for k,name in enumerate(self._xNames):
self._xIdx[name] = k
for k,name in enumerate(self._uNames):
self._uIdx[name] = k
for k,name in enumerate(self._pNames):
self._pIdx[name] = k
def vectorize(self):
return self._vec
def xVec(self,timestep):
assert (timestep != None), "please set timestep"
assert (timestep <= self._nk), "timestep too large"
return self._X[timestep]
def uVec(self,timestep):
assert (timestep != None), "please set timestep"
assert (timestep < self._nk), "timestep too large"
return self._U[timestep]
def pVec(self):
return self._p
def lookup(self,name,timestep=None):
if name in self._xIdx:
return self.xVec(timestep)[self._xIdx[name]]
elif name in self._uIdx:
return self.uVec(timestep)[self._uIdx[name]]
elif name in self._pIdx:
assert (timestep == None), "don't set timestep for parameter"
return self.pVec()[self._pIdx[name]]
else:
raise NameError('unrecognized name "'+name+'"')
class WriteableNmpcMap(object):
"""
Initialize this with a dae and number of control intervals and
it will set all elements to None. Then you can call setVal() to set them
and lookup() or vectorize() to retrieve them.
You can also call getMissing() to get a summary of elements which haven't been set
"""
def __init__(self,dae,nk):
self._nk = nk
self._xNames = dae.xNames()
self._uNames = dae.uNames()
self._pNames = dae.pNames()
self._X = np.resize(np.array([None]),(self._nk+1,dae.xVec().size()))
self._U = np.resize(np.array([None]),(self._nk,dae.uVec().size()))
self._p = np.resize(np.array([None]),dae.pVec().size())
self._xIdx = {}
self._uIdx = {}
self._pIdx = {}
for k,name in enumerate(self._xNames):
self._xIdx[name] = k
for k,name in enumerate(self._uNames):
self._uIdx[name] = k
for k,name in enumerate(self._pNames):
self._pIdx[name] = k
def vectorize(self):
outs = [self.pVec()]
for k in range(self._nk):
outs.append(self.xVec(k))
outs.append(self.uVec(k))
outs.append(self.xVec(self._nk))
return np.concatenate(outs)
def xVec(self,timestep):
assert (timestep != None), "please set timestep"
assert (timestep <= self._nk), "timestep too large"
return self._X[timestep,:]
def uVec(self,timestep):
assert (timestep != None), "please set timestep"
assert (timestep < self._nk), "timestep too large"
return self._U[timestep,:]
def pVec(self):
return self._p
def lookup(self,name,timestep=None):
if name in self._xIdx:
assert (timestep != None), "please set timestep"
assert (timestep <= self._nk), "timestep too large"
return self._X[timestep][self._xIdx[name]]
elif name in self._uIdx:
assert (timestep != None), "please set timestep"
assert (timestep < self._nk), "timestep too large"
return self._U[timestep][self._uIdx[name]]
elif name in self._pIdx:
assert (timestep == None), "don't set timestep for parameter"
return self._p[self._pIdx[name]]
else:
raise NameError('unrecognized name "'+name+'"')
def setVal(self,name,val,timestep=None):
if name in self._xIdx:
if timestep == None:
for k in range(self._nk+1):
self.setVal(name,val,timestep=k)
return
assert (timestep <= self._nk), "timestep too large"
self._X[timestep,self._xIdx[name]] = val
elif name in self._uIdx:
if timestep == None:
for k in range(self._nk):
self.setVal(name,val,timestep=k)
return
assert (timestep < self._nk), "timestep too large"
self._U[timestep,self._uIdx[name]] = val
elif name in self._pIdx:
assert (timestep == None), "don't set timestep for parameter"
self._p[self._pIdx[name]] = val
else:
raise NameError('unrecognized name "'+name+'"')
def getMissing(self):
xuMissing = {}
for name in self._xNames:
missing = []
for k in range(self._nk+1):
if self.lookup(name,timestep=k) is None:
missing.append(k)
if len(missing)>0:
xuMissing[name] = missing
for name in self._uNames:
missing = []
for k in range(self._nk):
if self.lookup(name,timestep=k) is None:
missing.append(k)
if len(missing)>0:
xuMissing[name] = missing
pMissing = []
for name in self._pNames:
if self.lookup(name) is None:
pMissing.append(name)
return (xuMissing,pMissing)
class NmpcOutputMapGenerator(object):
"""
Something which will efficiently generate a map of all outputs.
The outputs are all computed all at once to ensure no (additional) CSEs are generated.
On initialization, the function which creates all the outputs from a dv vector is created.
Then you use it to initialize an OutputMap object
"""
def __init__(self,ocp):
(fAll,(f0,outputNames0)) = ocp.dae.outputsFun()
self._outputNames0 = outputNames0
self._outputNames = ocp.dae.outputNames()
assert (len(self._outputNames0) == f0.getNumOutputs())
assert (len(self._outputNames) == fAll.getNumOutputs())
self._nk = ocp.nk
outs = []
for timestepIdx in range(self._nk):
if f0 is not None:
outs += f0.call([ocp._dvMap.xVec(timestepIdx),
ocp._dvMap.uVec(timestepIdx),
ocp._dvMap.pVec()])
# make the function
self.fEveryOutput = C.MXFunction([ocp._dvMap.vectorize()],outs)
self.fEveryOutput.init()
class NmpcOutputMap(object):
"""
Initialize this with an outputMapGenerator and a vector of design vars.
If you pass a symbolic vector you get symbolic outputs with MXFunction.call().
If you pass a numeric vector you get numeric outputs with MXFunction.setInput(); MXFunction.evaluate(); ..
"""
def __init__(self,outputMapGenerator,dvs):
if type(dvs) == C.MX:
allOutputs = outputMapGenerator.fEveryOutput.call([dvs])
elif type(dvs) == C.SXMatrix:
allOutputs = outputMapGenerator.fEveryOutput.eval([dvs])
elif type(dvs) in [np.ndarray,C.DMatrix]:
outputMapGenerator.fEveryOutput.setInput(dvs,0)
outputMapGenerator.fEveryOutput.evaluate()
allOutputs = [np.array(outputMapGenerator.fEveryOutput.output(k)).squeeze()
for k in range(outputMapGenerator.fEveryOutput.getNumOutputs())]
else:
raise TypeError("OutputMap got unrecognized design vector type: "+str(type(dvs)))
self._outputNames0 = outputMapGenerator._outputNames0
self._outputNames = outputMapGenerator._outputNames
self._numOutputs0 = len(self._outputNames0)
self._numOutputs = len(self._outputNames)
self._nk = outputMapGenerator._nk
self._outputs0 = {}
for name in self._outputNames0:
self._outputs0[name] = np.resize(np.array([None]),self._nk)
outs = []
k = 0
for timestepIdx in range(self._nk):
# outputs defined at tau_i0
outs = allOutputs[k:k+self._numOutputs0]
k += self._numOutputs0
for name,val in zip(self._outputNames0,outs):
self._outputs0[name][timestepIdx] = val
def lookup(self,name,timestep):
if name not in self._outputNames:
raise NameError("couldn't find \""+name+"\"")
if name not in self._outputs0:
raise ValueError("sorry, \""+name+"\" depends on algebraic variable or ddt(differential variable) \
and Multiple Shooting cannot access it")
assert (timestep != None), "please set timestep"
return self._outputs0[name][timestep]
| [
"[email protected]"
] | |
c384ec15b71a910edb23a3ece597828f1918efc1 | ba80ca143ba35fd481730786a27ebdb1f88ce835 | /algorithm/Daily Coding Problem/전체탐색/5.py | b00f4a6021504dc364e8ea5b32dbdadd7ce6fbe1 | [] | no_license | uiandwe/TIL | c541020b65adc53578aeb1c3ba4c6770b3b2e8b3 | 186544469374dd0279099c6c6aa7555ee23e42fe | refs/heads/master | 2022-02-15T08:33:07.270573 | 2022-01-01T15:22:54 | 2022-01-01T15:22:54 | 63,420,931 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 720 | py | # -*- coding: utf-8 -*-
"""
Given an array of integers where every integer occurs three times except for one integer,
which only occurs once, find and return the non-duplicated integer.
For example, given [6, 1, 3, 3, 3, 6, 6], return 1. Given [13, 19, 13, 13], return 19.
Do this in O(N) time and O(1) space.
"""
def getSingle(arr):
ones = 0
twos = 0
n = len(arr)
for i in range(n):
twos = twos | (ones & arr[i])
ones = ones ^ arr[i]
common_bit_mask = ~(ones & twos)
ones &= common_bit_mask
twos &= common_bit_mask
return ones
if __name__ == '__main__':
print(getSingle([13, 1, 13, 13]))
print(getSingle([13, 1, 13, 13, 2, 3, 2, 3, 2, 3]))
| [
"[email protected]"
] | |
bcc9c809b5e3b2f36a892d4ae81d9509a2aba905 | 4cbc8b81d197bc392d1b57856254300331b9738f | /python/voz.py | 9a63b10843050891efa76b73fcb08d8c72ccf6b6 | [
"MIT"
] | permissive | vcatafesta/chili | 87b9606f17cda645ba44cbf2bb4cc4637e18d211 | 5c734ac88454db76eb2f4e92c13364a5bbc7a93a | refs/heads/main | 2023-09-01T01:39:09.457448 | 2023-08-29T21:23:28 | 2023-08-29T21:23:28 | 171,972,556 | 2 | 2 | null | 2019-02-22T01:38:49 | 2019-02-22T01:26:46 | null | UTF-8 | Python | false | false | 260 | py | # coding: cp860
import speech_recognition as sr
r = sr.Recognizer()
with sr.Microphone() as s:
r.adjust_for_ambient_noise(s)
while True:
audio = r.listen(s)
print("Voce respondeu:", r.recognize_google(audio, language = 'pt'))
| [
"[email protected]"
] | |
00fc5380aacd4b854d68cc5cd1802c1879b9e2e4 | 260306e56beaaa5ecad8f783d094ecbabef4705b | /blog.py | 3bfbbfd1ea4129429f2520d6cd12680037202c45 | [] | no_license | xxnbyy/mytools | dd5f09033b2b794b3e56bf16b9d4f28fe1377503 | 88d99614f09dd7a96f787236d0bbf674dfc5fcf2 | refs/heads/master | 2021-04-29T10:13:07.230358 | 2016-12-29T13:16:56 | 2016-12-29T13:16:56 | 77,874,884 | 0 | 1 | null | 2017-01-03T01:49:32 | 2017-01-03T01:49:32 | null | UTF-8 | Python | false | false | 899 | py | #############################################################
###
### _|_|_| _| _| _| _|
### _| _| _| _|_|_| _|_| _| _| _|_|_|_|
### _|_| _|_| _| _| _| _| _| _| _|
### _| _| _| _| _| _| _| _| _| _|
### _|_|_| _| _| _|_|_| _| _| _| _|_|
### _|
### _|
###
### name: blog.py
### function: write blog
### date: 2016-11-02
### author: quanyechavshuo
### blog: https://3xp10it.cc
#############################################################
import time
from exp10it import figlet2file
figlet2file("3xp10it",0,True)
time.sleep(1)
| [
"[email protected]"
] | |
7428a6f9ed8f18d1d5b40f66c207c09dbccfea2e | 58ee1dc37b57e0b4f06cf383c6a9e0654f490150 | /python-zict/lilac.py | 74172c63bc984bdbddbadf099461698472befd29 | [] | no_license | MikeyBaldinger/arch4edu | f3af87ef3a8d4cd78fde7e0ef75658c17dbe8c06 | c1775bf7fe0ffc87f3c8b4109fb1e8acde12a430 | refs/heads/master | 2022-12-23T16:40:55.513537 | 2020-09-28T21:00:59 | 2020-09-28T21:00:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 350 | py | #!/usr/bin/env python3
from lilaclib import *
maintainers = [{'github': 'petronny', 'email': 'Jingbei Li <[email protected]>'}]
update_on = [{'aur': None}, {'alias': 'python'}]
build_prefix = 'extra-x86_64'
repo_depends = ['python-heapdict']
pre_build = aur_pre_build
post_build = aur_post_build
if __name__ == '__main__':
single_main(build_prefix)
| [
"[email protected]"
] | |
5cf4e3cccdec114e403ab352d32e8640f5a6250b | b99bbc50ab1d039948ccf853963ae044a97498fb | /src/api/symbols/views/__init__.py | 9f0254365c9be66a905516f608870a81172d33c2 | [] | no_license | fan1018wen/Alpha | 26899cc0eb6761bf6bd8089e7d12716c9e7ae01e | c50def8cde58fd4663032b860eb058302cbac6da | refs/heads/master | 2021-05-12T12:54:15.747220 | 2017-10-11T10:58:51 | 2017-10-11T10:58:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,736 | py | from django.core.paginator import Paginator
from django.db.models import Q
from rest_framework.views import APIView
from common.models import BackstageHTTPResponse
from common.utils import log_exception
from symbols.filters import SymbolFilter
from symbols.models import Symbol
from symbols.serializers import SymbolSerializer
class SymbolListAPI(APIView):
@log_exception
def get(self, request, *args, **kwargs):
"""
公式可用数据点列表
---
parameters:
- name: index
description: 页数
type: integer
paramType: query
required: false
- name: number
description: 每页条数
type: integer
paramType: query
required: false
- name: table_name
description: 表名
type: string
paramType: query
required: false
- name: classification_1
description: 第一维度
type: string
paramType: query
required: false
- name: classification_2
description: 第二维度
type: string
paramType: query
required: false
"""
symbols = Symbol.objects.all()
symbols = SymbolFilter(request.GET, queryset=symbols).qs
paginator = Paginator(symbols, request.GET.get('number', 100))
page = paginator.page(request.GET.get('index', 1))
serializer = SymbolSerializer(page, many=True)
return BackstageHTTPResponse(
code=BackstageHTTPResponse.API_HTTP_CODE_NORMAL,
data=serializer.data,
pageinfo=page
).to_response()
class TableListAPI(APIView):
@log_exception
def get(self, request, *args, **kwargs):
"""
公式可用表
---
parameters:
- name: index
description: 页数
type: integer
paramType: query
required: false
- name: number
description: 每页条数
type: integer
paramType: query
required: false
"""
table_names = list(Symbol.objects.exclude(
Q(table_name__isnull=True)|(Q(table_name=''))
).values_list('table_name', flat=True).order_by('table_name').distinct())
return BackstageHTTPResponse(
code=BackstageHTTPResponse.API_HTTP_CODE_NORMAL,
data=table_names,
).to_response()
class SymbolClassificationListAPI(APIView):
@log_exception
def get(self, request, classification, *args, **kwargs):
"""
公式可用区分维度
---
parameters:
- name: classification
description: 第几个区分维度
type: integer
paramType: path
required: true
"""
column_name = 'classification_%s' % classification
if column_name not in [i.attname for i in Symbol._meta.fields]:
return BackstageHTTPResponse(
code=BackstageHTTPResponse.API_HTTP_CODE_NOT_FOUND,
message='未找到数据'
).to_response()
query_dict_1 = {'%s__isnull' % column_name: True}
query_dict_2 = {column_name: ''}
column_values = list(Symbol.objects.exclude(
Q(**query_dict_1)|(Q(**query_dict_2))
).values_list(column_name, flat=True).order_by(column_name).distinct())
return BackstageHTTPResponse(
code=BackstageHTTPResponse.API_HTTP_CODE_NORMAL,
data=column_values,
).to_response()
| [
"[email protected]"
] | |
9ae6c6e071cb9074c94b0058901696c52d416298 | a0eb6744e6f7f509b96d21f0bc8b3f8387f6861c | /notebook/union_find_basic_usage.py | 01c45b5560292afba9712dec8f754ebe83733393 | [
"MIT"
] | permissive | nkmk/python-snippets | a6c66bdf999502e52f4795a3074ced63bf440817 | f9dd286a9cf93f474e20371f8fffc4732cb3c4d5 | refs/heads/master | 2023-08-03T04:20:05.606293 | 2023-07-26T13:21:11 | 2023-07-26T13:21:11 | 98,900,570 | 253 | 77 | MIT | 2020-10-25T01:12:53 | 2017-07-31T14:54:47 | Jupyter Notebook | UTF-8 | Python | false | false | 1,721 | py | from union_find_basic import UnionFindBasic, UnionFindPathCompression, UnionFindByRank, UnionFindBySize, UnionFind
ufb = UnionFindBasic(5)
print(ufb.parents)
# [0, 1, 2, 3, 4]
ufb.union(3, 4)
print(ufb.parents)
ufb.union(2, 3)
print(ufb.parents)
ufb.union(1, 2)
print(ufb.parents)
ufb.union(0, 4)
print(ufb.parents)
# [0, 1, 2, 3, 3]
# [0, 1, 2, 2, 3]
# [0, 1, 1, 2, 3]
# [0, 0, 1, 2, 3]
print([ufb.find(i) for i in range(5)])
# [0, 0, 0, 0, 0]
ufpc = UnionFindPathCompression(5)
print(ufpc.parents)
# [0, 1, 2, 3, 4]
ufpc.union(3, 4)
print(ufpc.parents)
ufpc.union(2, 3)
print(ufpc.parents)
ufpc.union(1, 2)
print(ufpc.parents)
ufpc.union(0, 4)
print(ufpc.parents)
# [0, 1, 2, 3, 3]
# [0, 1, 2, 2, 3]
# [0, 1, 1, 2, 3]
# [0, 0, 1, 1, 1]
print([ufpc.find(i) for i in range(5)])
# [0, 0, 0, 0, 0]
ufbr = UnionFindByRank(5)
print(ufbr.parents)
# [0, 1, 2, 3, 4]
ufbr.union(3, 4)
print(ufbr.parents)
ufbr.union(2, 3)
print(ufbr.parents)
ufbr.union(1, 2)
print(ufbr.parents)
ufbr.union(0, 4)
print(ufbr.parents)
# [0, 1, 2, 3, 3]
# [0, 1, 3, 3, 3]
# [0, 3, 3, 3, 3]
# [3, 3, 3, 3, 3]
ufbs = UnionFindBySize(5)
print(ufbs.parents)
# [0, 1, 2, 3, 4]
ufbs.union(3, 4)
print(ufbs.parents)
ufbs.union(2, 3)
print(ufbs.parents)
ufbs.union(1, 2)
print(ufbs.parents)
ufbs.union(0, 4)
print(ufbs.parents)
# [0, 1, 2, 3, 3]
# [0, 1, 3, 3, 3]
# [0, 3, 3, 3, 3]
# [3, 3, 3, 3, 3]
print(ufbs.size)
# [1, 1, 1, 5, 1]
print(ufbs.size[ufbs.find(0)])
# 5
uf = UnionFind(5)
print(uf.parents)
# [-1, -1, -1, -1, -1]
uf.union(3, 4)
print(uf.parents)
uf.union(2, 3)
print(uf.parents)
uf.union(1, 2)
print(uf.parents)
uf.union(0, 4)
print(uf.parents)
# [-1, -1, -1, -2, 3]
# [-1, -1, 3, -3, 3]
# [-1, 3, 3, -4, 3]
# [3, 3, 3, -5, 3]
| [
"[email protected]"
] | |
5be778cd62c0fc4fb164b11572b2864f06dd6ffe | 4a0f8c5c0e8324fa614da776f2a704b5c369ccbb | /topologyTest/GetDDIs_150_250Examples_WithDifferentDomainNames.py | cafde521581bfddd4fd57d7b907fcb2ae3e1149d | [] | no_license | magic2du/contact_matrix | 9f8ae868d71e7e5c8088bf22a9407ea3eb073be6 | 957e2ead76fabc0299e36c1435162edd574f4fd5 | refs/heads/master | 2021-01-18T21:15:07.341341 | 2015-09-16T02:14:53 | 2015-09-16T02:14:53 | 24,237,641 | 0 | 0 | null | 2015-09-10T19:58:24 | 2014-09-19T16:48:37 | null | UTF-8 | Python | false | false | 1,903 | py | import _mysql
from dealFile import *
#Get of Domains which has more than 2 interfaces have 16-20 examples
db=_mysql.connect(host="localhost",user="root",passwd="zxcv4321",db="DDI")
#db.query("""select COUNT(*) from PPI inner join example on (ID = PPI_ID) where domain1="ACT" and domain2="ACT" and topology_1 = 6 and topology_2 = 6""")
#db.query("""select * from PPI inner join example on (ID = PPI_ID) where domain1="ACT" and domain2="ACT" """)
ddiList=readDDIsFile('listOfFolders15OCT.txt')
ddis=[]
#Number of Domains which has 2 interfaces have more than 15 examples
for ddi in ddiList:
[domain1,domain2]=ddi
if domain1 == domain2:
continue
#print i
#print domain1
#print domain2
#query='SELECT DISTINCT topology_1,topology_2 from DDItopology WHERE domain1="'+domain1+'" AND domain2="'+domain2+'"'
#query='SELECT DISTINCT topology_1,topology_2 from DDItopology WHERE domain1="'+domain1+'" AND domain2="'+domain2+'"'
query='SELECT COUNT(DISTINCT topology_1,topology_2) from DDItopology WHERE domain1="'+domain1+'" AND domain2="'+domain2+'"'
#print query
#query='select domain1,domain2 from DDI1'
db.query(query)
result=db.store_result()
numTopology=result.fetch_row(0)
print numTopology[0][0]
if numTopology[0][0]<2:
break
try:
query='SELECT COUNT(*) from DDItopology WHERE domain1="'+domain1+'" AND domain2="'+domain2+'"'
#print query
db.query(query)
result=db.store_result()
numExample=result.fetch_row(0)
print int(numExample[0][0])
if int(numExample[0][0])>150 and int(numExample[0][0])<250:
ddis.append(domain1+'_int_'+domain2)
except:
print 'error'
break
writeListFile('listOfDDIsHaveOver2InterfacesHave150-250Examples.txt',ddis)
#print result.fetch_row()
#print r[0][0] readDDIsFile('listOfDDIsHave2InterfacesOver15.txt')
| [
"[email protected]"
] | |
b5ab5cda1555793b46c2e5542858767a98e8ef6e | 658e2e3cb8a4d5343a125f7deed19c9ebf06fa68 | /course_DE/udacity-data-engineering-projects-master/Project 5 - Data Pipelines with Airflow/exercises/dags/3_ex3_subdags/dag.py | bc6617c67ca71e8cfaa562b89339fc7fdf1fc524 | [] | no_license | yennanliu/analysis | 3f0018809cdc2403f4fbfe4b245df1ad73fa08a5 | 643ad3fed41961cddd006fadceb0e927f1db1f23 | refs/heads/master | 2021-01-23T21:48:58.572269 | 2020-10-13T22:47:12 | 2020-10-13T22:47:12 | 57,648,676 | 11 | 9 | null | null | null | null | UTF-8 | Python | false | false | 2,352 | py | # Instructions
# In this exercise, we’ll place our S3 to RedShift Copy operations into a SubDag.
# 1 - Consolidate HasRowsOperator into the SubDag
# 2 - Reorder the tasks to take advantage of the SubDag Operators
import datetime
from airflow import DAG
from airflow.operators.postgres_operator import PostgresOperator
from airflow.operators.subdag_operator import SubDagOperator
from airflow.operators.udacity_plugin import HasRowsOperator
from lesson3.exercise3.subdag import get_s3_to_redshift_dag
import sql_statements
start_date = datetime.datetime.utcnow()
dag = DAG(
"lesson3.exercise3",
start_date=start_date,
)
trips_task_id = "trips_subdag"
trips_subdag_task = SubDagOperator(
subdag=get_s3_to_redshift_dag(
"lesson3.exercise3",
trips_task_id,
"redshift",
"aws_credentials",
"trips",
sql_statements.CREATE_TRIPS_TABLE_SQL,
s3_bucket="udac-data-pipelines",
s3_key="divvy/unpartitioned/divvy_trips_2018.csv",
start_date=start_date,
),
task_id=trips_task_id,
dag=dag,
)
stations_task_id = "stations_subdag"
stations_subdag_task = SubDagOperator(
subdag=get_s3_to_redshift_dag(
"lesson3.exercise3",
stations_task_id,
"redshift",
"aws_credentials",
"stations",
sql_statements.CREATE_STATIONS_TABLE_SQL,
s3_bucket="udac-data-pipelines",
s3_key="divvy/unpartitioned/divvy_stations_2017.csv",
start_date=start_date,
),
task_id=stations_task_id,
dag=dag,
)
#
# TODO: Consolidate check_trips and check_stations into a single check in the subdag
# as we did with the create and copy in the demo
#
check_trips = HasRowsOperator(
task_id="check_trips_data",
dag=dag,
redshift_conn_id="redshift",
table="trips"
)
check_stations = HasRowsOperator(
task_id="check_stations_data",
dag=dag,
redshift_conn_id="redshift",
table="stations"
)
location_traffic_task = PostgresOperator(
task_id="calculate_location_traffic",
dag=dag,
postgres_conn_id="redshift",
sql=sql_statements.LOCATION_TRAFFIC_SQL
)
#
# TODO: Reorder the Graph once you have moved the checks
#
trips_subdag_task >> check_trips
stations_subdag_task >> check_stations
check_stations >> location_traffic_task
check_trips >> location_traffic_task
| [
"[email protected]"
] | |
a0fcd76bb531bd1b8db92bfd0f143b1ac789e17f | f983d2fc949bc0de944755a19e57e5d15466dd98 | /homeads/mails.py | a837ea5fa7f51062eae6d086a491ffecd86079ce | [] | no_license | wd5/localized_classified_ads | 2c523a58372a3963d15f01e52709e1923df20ca7 | 49414088a8ba7f09da35f005b15652efd2bcdb18 | refs/heads/master | 2020-12-25T15:30:40.113192 | 2012-11-01T15:29:02 | 2012-11-01T15:29:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,646 | py | #-*- coding: utf-8 -*-
from django.contrib.sites.models import Site
from utils.mails import AdEmailMultiAlternatives
class HomeEmail(AdEmailMultiAlternatives):
"""
Class used to send multi alternavies email (text + html)
for AcheterSansCom and LouerSansCom
"""
def get_default_context(self):
if self.ad:
if self.ad.__class__.__name__ == "HomeForSaleAd":
self.default_context = {'linkColor': '#20B2AB',
'secondColor': '#FFB82E'}
if self.ad.__class__.__name__ == "HomeForRentAd":
self.default_context = {'linkColor': '#9D81A1',
'secondColor': 'Pink'}
else:
domain = Site.objects.get_current().domain
if domain == 'achetersanscom.com':
self.default_context = {'linkColor': '#20B2AB',
'secondColor': '#FFB82E'}
if domain == 'louersanscom.com':
self.default_context = {'linkColor': '#9D81A1',
'secondColor': 'Pink'}
return self.default_context
def get_default_images(self):
if self.ad:
if self.ad.__class__.__name__ == "HomeForSaleAd":
self.default_files = (('img/home.png', 'logo'),
('img/shadow_bottom.jpg', 'shadow'))
if self.ad.__class__.__name__ == "HomeForRentAd":
self.default_files = (('img/apartment.png', 'logo'),
('img/shadow_bottom.jpg', 'shadow'))
else:
domain = Site.objects.get_current().domain
if domain == 'achetersanscom.com':
self.default_files = (('img/home.png', 'logo'),
('img/shadow_bottom.jpg', 'shadow'))
if domain == 'louersanscom.com':
self.default_files = (('img/apartment.png', 'logo'),
('img/shadow_bottom.jpg', 'shadow'))
return self.default_files
class UserSignIn(HomeEmail):
"""
User Sign In
"""
subject = u"[{{ site.name }}] Validation de votre inscription"
template_name = 'emails/user_sign_in/body'
class HomeAdCreatedMessageEmail(HomeEmail):
"""
Home Ad Created Message Email
Send when user create a new ad
"""
subject = u"[{{ site.name }}] Annonce créée"
template_name = 'emails/home_ad_created/body'
class HomeAdUpdatedMessageEmail(HomeEmail):
"""
Home Ad Update Message Email
Send when user update an ad
"""
subject = u"[{{ site.name }}] Annonce mise à jour"
template_name = 'emails/home_ad_updated/body'
class BuyerToVendorMessageEmail(HomeEmail):
"""
User message email from buyer to vendor for an Ad
"""
subject = u'[{{ site.name }}] Nouveau message à propos de votre bien'
template_name = 'emails/to_vendor_message/body'
class VendorToBuyerMessageEmail(HomeEmail):
"""
User message email from vendor to buyer for an Ad
"""
subject = u'[{{ site.name }}] Nouveau message à propos de votre recherche'
template_name = 'emails/to_buyer_message/body'
class NewPotentialBuyerToVendorMessageEmail(HomeEmail):
"""
Mail sent to vendor when a user has it search coincides with it ad
"""
subject = u'[{{ site.name }}] Une nouvelle personne pourrait être interessée par votre bien'
template_name = 'emails/to_vendor_potential_buyer/body'
class NewAdToBuyerMessageEmail(HomeEmail):
"""
Mail sent to inform a user that a new ad corresponds to it search
"""
subject = u'[{{ site.name }}] Un nouveau bien correspond à votre recherche'
template_name = 'emails/to_buyer_potential_ad/body'
| [
"[email protected]"
] | |
2d91f87c27aff3220f48df0e44ec5d65370af653 | c831e7f6c434900d817f59a11b25e78a1a5090ad | /Calibration/CalibConfigFiles/MuonCalibration/CalibConfig_DetModel89_RecoStage38.py | 4f213b2a710dcce9269a2f654daa625b68761bfc | [] | no_license | StevenGreen1/OptimisationStudies | 8cca03f57d2cbf81e5fb609f13e2fa4b9c9880f6 | c5741e8d2fab4752ceca8b10cc5f2bbc1a7fafa9 | refs/heads/master | 2021-01-18T21:30:51.418785 | 2017-02-21T16:27:50 | 2017-02-21T16:27:50 | 44,306,370 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,896 | py | # Digitisation Constants - ECal
CalibrECal = 42.121331495
# Digitisation Constants ILDCaloDigi - HCal
CalibrHCalBarrel = 47.5716455642
CalibrHCalEndcap = 53.3873293873
CalibrHCalOther = 29.2886957667
# Digitisation Constants NewLDCCaloDigi - HCal
CalibrHCal = -1
# Digitisation Constants - Muon Chamber
CalibrMuon = 56.7
# MIP Peak position in directed corrected SimCaloHit energy distributions
# used for realistic ECal and HCal digitisation options
CalibrECalMIP = 0.0001475
CalibrHCalMIP = 0.0004925
# MIP Peak position in directed corrected CaloHit energy distributions
# used for MIP definition in PandoraPFA
ECalToMIPCalibration = 153.846
HCalToMIPCalibration = 41.841
MuonToMIPCalibration = 10.3093
# EM and Had Scale Settings
ECalToEMGeVCalibration = 1.01529193221
HCalToEMGeVCalibration = 1.12124159762
ECalToHadGeVCalibration = 1.08839104614
HCalToHadGeVCalibration = 1.12124159762
# Pandora Threshold Cuts
ECalMIPThresholdPandora = 0.5
HCalMIPThresholdPandora = 0.3
# Hadronic Energy Truncation in HCal PandoraPFA
MaxHCalHitHadronicEnergy = 1
# Timing ECal
ECalBarrelTimeWindowMax = 1000000
ECalEndcapTimeWindowMax = 1000000
# Timing HCal
HCalBarrelTimeWindowMax = 1000000
HCalEndcapTimeWindowMax = 1000000
| [
"[email protected]"
] | |
965503ab5aa40de1f3305cdc5d07646e13c4cb78 | c60c199410289c1d7ec4aea00833b461e1f08f88 | /.history/older-than/day2/func1.py | 13cbbeac3c8a8aec182f78f63a9acaf9f1149a4d | [] | no_license | ver007/pythonjumpstart | 66fb111e6af197fad3e853b2c2d712a1b57a7d59 | 5b1f52479abd07456e2da494149e491d398f3b7d | refs/heads/master | 2021-01-21T01:34:35.501870 | 2015-05-13T14:10:13 | 2015-05-13T14:10:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 104 | py | #!/usr/bin/env python
def sum(a, b):
return a+b
#print sum(2, 3)
func = sum
print func(10, 100)
| [
"[email protected]"
] | |
836dfafcfdee968679acbc1cd37e6add131774e2 | 994a82e4d859e605cf67736446aadcaf3cca2ec8 | /examples/query_horizon.py | f5d72107849b8b207524dd159b162cfb44653d09 | [
"Apache-2.0"
] | permissive | kingdavid6336/py-stellar-base | fe7a5af576b7f03f7d36badca6a540232719e7cc | 5e22370113e81eca1096ae62d58a5e663ffebca7 | refs/heads/master | 2021-12-18T14:20:01.146139 | 2020-06-21T07:55:59 | 2020-06-21T07:58:57 | 231,694,118 | 1 | 0 | Apache-2.0 | 2020-06-21T12:04:49 | 2020-01-04T01:52:57 | Python | UTF-8 | Python | false | false | 464 | py | from stellar_sdk import Server
server = Server(horizon_url="https://horizon-testnet.stellar.org")
# get a list of transactions that occurred in ledger 1400
transactions = server.transactions().for_ledger(1400).call()
print(transactions)
# get a list of transactions submitted by a particular account
transactions = server.transactions() \
.for_account(account_id="GASOCNHNNLYFNMDJYQ3XFMI7BYHIOCFW3GJEOWRPEGK2TDPGTG2E5EDW") \
.call()
print(transactions)
| [
"[email protected]"
] | |
b6d49962e507d1202269880c14641540b5bffc8d | b22778ed4a21cc1102512ae7da7e8225b5f5299e | /examples/vector_v3.py | b5c655b305449e8e06758c7fe3ff2eeb22089f93 | [
"MIT"
] | permissive | afcarl/pythonic-api | 4722358935075878ff91a640174a2e5d0ae5764d | 764cb9dba9418c591d6d0cef20401b58d8ce0b1b | refs/heads/master | 2020-03-18T16:41:40.297453 | 2016-07-30T19:45:41 | 2016-07-30T19:45:41 | 134,980,879 | 1 | 0 | null | 2018-05-26T17:18:08 | 2018-05-26T17:18:08 | null | UTF-8 | Python | false | false | 1,369 | py | """
A multi-dimensional ``Vector`` class, take 3
"""
from array import array
import math
import reprlib
import numbers
class Vector:
typecode = 'd'
def __init__(self, components):
self._components = array(self.typecode, components)
def __len__(self):
return len(self._components)
def __iter__(self):
return iter(self._components)
def __abs__(self):
return math.sqrt(sum(x * x for x in self))
def __eq__(self, other):
return (len(self) == len(other) and
all(a == b for a, b in zip(self, other)))
def __str__(self):
return str(tuple(self))
def __repr__(self):
components = reprlib.repr(self._components)
components = components[components.find('['):-1]
return 'Vector({})'.format(components)
def __getitem__(self, index):
cls = type(self)
if isinstance(index, slice):
return cls(self._components[index])
elif isinstance(index, numbers.Integral):
return self._components[index]
else:
msg = '{cls.__name__} indices must be integers'
raise TypeError(msg.format(cls=cls))
# ...
def __mul__(self, scalar):
if isinstance(scalar, numbers.Real):
return Vector(n * scalar for n in self)
else:
return NotImplemented
| [
"[email protected]"
] | |
daebceb3498025be0ba64616015b483e5246c793 | 3c54f853a782e07675b809cada049debe3d415b1 | /main/rates/management/commands/get_rate_data.py | a5e02efdf4c86d7cd7a4d9b3c79e36f983ab86be | [
"MIT"
] | permissive | Hawk94/coin_tracker | ebf82a17aff1ae84aa7de872734dbf1616022de5 | 082909e17308a8dd460225c1b035751d12a27106 | refs/heads/master | 2021-01-24T08:12:37.041745 | 2017-08-10T11:01:19 | 2017-08-10T11:01:19 | 93,378,699 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 786 | py | from django.core.management.base import BaseCommand, CommandError
from django.conf import settings
from main.rates.models import Rate
import requests
import datetime
import decimal
class Command(BaseCommand):
help = 'Gets todays BTC price and saves it to the database'
def handle(self, *args, **options):
base_url = 'https://openexchangerates.org/api/latest.json?app_id={}'
request_json = requests.get(base_url.format(settings.OPEN_EXCHANGE_APP_ID)).json()['rates']
eur_rate = 1 / request_json['EUR']
gbp_rate = 1 / request_json['GBP']
date = datetime.date.today()
Rate.objects.create(date=date, eur_rate=eur_rate, gbp_rate=gbp_rate)
self.stdout.write(self.style.SUCCESS('Successfully created exchange rate records!'))
| [
"[email protected]"
] | |
123ff2163d0dea2759b84eef2ebe3fab6a5fdbff | 7cc53a80f8ca9716e2e6893b6fd98ddab326061c | /iHome/web_html.py | 453a18b89d1a6db8eec578debdba818165338f0e | [] | no_license | zengsiquan/ihome | 4b98c476fdf381ad18113b070a44e48432b51a58 | 40926f74d46bc76de4aecd98cfc52302ecf72f1b | refs/heads/master | 2020-03-10T00:55:06.484107 | 2018-04-10T13:22:52 | 2018-04-10T13:22:52 | 129,093,116 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 561 | py | # -*- coding:utf-8 -*-
from flask import Blueprint,current_app,make_response
from flask_wtf import csrf
html = Blueprint('html',__name__)
@html.route('/<re(".*"):file_name>')
def get_html(file_name):
if not file_name:
file_name = 'index.html'
if file_name != "favicon.ico":
file_name= 'html/'+file_name
response = make_response(current_app.send_static_file(file_name))
csrf_token = csrf.generate_csrf()
response.set_cookie('csrf_token',csrf_token)
# return current_app.send_static_file(file_name)
return response | [
"[email protected]"
] | |
718743c7da0e3030a59f7358c4988be1a2d87356 | 534570bbb873293bd2646a1567b63d162fbba13c | /Python/Data Structure/Binary Tree/Serilization:Deserialization/Verify Preorder Serialization of a Binary Tree.py | 9a2274a297c9e044d6cc6fe2a8830f27f34a8bea | [] | no_license | XinheLIU/Coding-Interview | fa3df0f7167fb1bc6c8831748249ebaa6f164552 | d6034c567cef252cfafca697aa316c7ad4e7d128 | refs/heads/master | 2022-09-17T14:30:54.371370 | 2022-08-19T15:53:35 | 2022-08-19T15:53:35 | 146,382,499 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 836 | py | class Solution:
def isValidSerialization(self, preorder: str) -> bool:
stack = []
top = -1
preorder = preorder.split(',')
for s in preorder:
stack.append(s)
top += 1
while self.endsWithTwoHashes(stack,top):
h = stack.pop()
top -= 1
h = stack.pop()
top -= 1
if top < 0:
return False
h = stack.pop()
stack.append('#')
#print stack
if len(stack) == 1:
if stack[0] == '#':
return True
return False
def endsWithTwoHashes(self,stack,top):
if top < 1:
return False
if stack[top]=='#' and stack[top-1]=='#':
return True
return False | [
"[email protected]"
] | |
1cae18ce8bce6554011b5d4dd4091266f3224738 | 4d9b7b5f12b343e515609b063bdf5c31fe89a4f9 | /asynchttp/websocket.py | ad23d2e17d1975f029217e182f7e79048da1b7f1 | [
"BSD-3-Clause"
] | permissive | oohlaf/asynchttp | 6aa956695dd82a60854d98afbf09741ce5c1fee9 | 2fb6a3b321c130e7b87cf1de03f042b89579a702 | refs/heads/master | 2021-01-16T19:47:18.929253 | 2013-10-10T23:36:13 | 2013-10-10T23:36:13 | 13,546,086 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,484 | py | """WebSocket protocol versions 13 and 8."""
__all__ = ['WebSocketParser', 'WebSocketWriter', 'do_handshake',
'Message', 'WebSocketError',
'MSG_TEXT', 'MSG_BINARY', 'MSG_CLOSE', 'MSG_PING', 'MSG_PONG']
import base64
import binascii
import collections
import hashlib
import struct
from asynchttp import errors
# Frame opcodes defined in the spec.
OPCODE_CONTINUATION = 0x0
MSG_TEXT = OPCODE_TEXT = 0x1
MSG_BINARY = OPCODE_BINARY = 0x2
MSG_CLOSE = OPCODE_CLOSE = 0x8
MSG_PING = OPCODE_PING = 0x9
MSG_PONG = OPCODE_PONG = 0xa
WS_KEY = b'258EAFA5-E914-47DA-95CA-C5AB0DC85B11'
WS_HDRS = ('UPGRADE', 'CONNECTION',
'SEC-WEBSOCKET-VERSION', 'SEC-WEBSOCKET-KEY')
Message = collections.namedtuple('Message', ['tp', 'data', 'extra'])
class WebSocketError(Exception):
"""WebSocket protocol parser error."""
def WebSocketParser(out, buf):
while True:
message = yield from parse_message(buf)
out.feed_data(message)
if message.tp == MSG_CLOSE:
out.feed_eof()
break
def parse_frame(buf):
"""Return the next frame from the socket."""
# read header
data = yield from buf.read(2)
first_byte, second_byte = struct.unpack('!BB', data)
fin = (first_byte >> 7) & 1
rsv1 = (first_byte >> 6) & 1
rsv2 = (first_byte >> 5) & 1
rsv3 = (first_byte >> 4) & 1
opcode = first_byte & 0xf
# frame-fin = %x0 ; more frames of this message follow
# / %x1 ; final frame of this message
# frame-rsv1 = %x0 ; 1 bit, MUST be 0 unless negotiated otherwise
# frame-rsv2 = %x0 ; 1 bit, MUST be 0 unless negotiated otherwise
# frame-rsv3 = %x0 ; 1 bit, MUST be 0 unless negotiated otherwise
if rsv1 or rsv2 or rsv3:
raise WebSocketError('Received frame with non-zero reserved bits')
if opcode > 0x7 and fin == 0:
raise WebSocketError('Received fragmented control frame')
if fin == 0 and opcode == OPCODE_CONTINUATION:
raise WebSocketError(
'Received new fragment frame with non-zero opcode')
has_mask = (second_byte >> 7) & 1
length = (second_byte) & 0x7f
# Control frames MUST have a payload length of 125 bytes or less
if opcode > 0x7 and length > 125:
raise WebSocketError(
"Control frame payload cannot be larger than 125 bytes")
# read payload
if length == 126:
data = yield from buf.read(2)
length = struct.unpack_from('!H', data)[0]
elif length > 126:
data = yield from buf.read(8)
length = struct.unpack_from('!Q', data)[0]
if has_mask:
mask = yield from buf.read(4)
if length:
payload = yield from buf.read(length)
else:
payload = b''
if has_mask:
payload = bytes(b ^ mask[i % 4] for i, b in enumerate(payload))
return fin, opcode, payload
def parse_message(buf):
fin, opcode, payload = yield from parse_frame(buf)
if opcode == OPCODE_CLOSE:
if len(payload) >= 2:
close_code = struct.unpack('!H', payload[:2])[0]
close_message = payload[2:]
return Message(OPCODE_CLOSE, close_code, close_message)
elif payload:
raise WebSocketError(
'Invalid close frame: {} {} {!r}'.format(fin, opcode, payload))
return Message(OPCODE_CLOSE, '', '')
elif opcode == OPCODE_PING:
return Message(OPCODE_PING, '', '')
elif opcode == OPCODE_PONG:
return Message(OPCODE_PONG, '', '')
elif opcode not in (OPCODE_TEXT, OPCODE_BINARY):
raise WebSocketError("Unexpected opcode={!r}".format(opcode))
# load text/binary
data = [payload]
while not fin:
fin, _opcode, payload = yield from parse_frame(buf)
if _opcode != OPCODE_CONTINUATION:
raise WebSocketError(
'The opcode in non-fin frame is expected '
'to be zero, got {!r}'.format(opcode))
else:
data.append(payload)
if opcode == OPCODE_TEXT:
return Message(OPCODE_TEXT, b''.join(data).decode('utf-8'), '')
else:
return Message(OPCODE_BINARY, b''.join(data), '')
class WebSocketWriter:
def __init__(self, transport):
self.transport = transport
def _send_frame(self, message, opcode):
"""Send a frame over the websocket with message as its payload."""
header = bytes([0x80 | opcode])
msg_length = len(message)
if msg_length < 126:
header += bytes([msg_length])
elif msg_length < (1 << 16):
header += bytes([126]) + struct.pack('!H', msg_length)
else:
header += bytes([127]) + struct.pack('!Q', msg_length)
self.transport.write(header + message)
def pong(self):
"""Send pong message."""
self._send_frame(b'', OPCODE_PONG)
def ping(self):
"""Send pong message."""
self._send_frame(b'', OPCODE_PING)
def send(self, message, binary=False):
"""Send a frame over the websocket with message as its payload."""
if isinstance(message, str):
message = message.encode('utf-8')
if binary:
self._send_frame(message, OPCODE_BINARY)
else:
self._send_frame(message, OPCODE_TEXT)
def close(self, code=1000, message=b''):
"""Close the websocket, sending the specified code and message."""
if isinstance(message, str):
message = message.encode('utf-8')
self._send_frame(
struct.pack('!H%ds' % len(message), code, message),
opcode=OPCODE_CLOSE)
def do_handshake(method, headers, transport):
"""Prepare WebSocket handshake. It return http response code,
response headers, websocket parser, websocket writer. It does not
perform any IO."""
# WebSocket accepts only GET
if method.upper() != 'GET':
raise errors.HttpErrorException(405, headers=(('Allow', 'GET'),))
headers = dict(((hdr, val) for hdr, val in headers if hdr in WS_HDRS))
if 'websocket' != headers.get('UPGRADE', '').lower().strip():
raise errors.BadRequestException(
'No WebSocket UPGRADE hdr: {}\n'
'Can "Upgrade" only to "WebSocket".'.format(
headers.get('UPGRADE')))
if 'upgrade' not in headers.get('CONNECTION', '').lower():
raise errors.BadRequestException(
'No CONNECTION upgrade hdr: {}'.format(
headers.get('CONNECTION')))
# check supported version
version = headers.get('SEC-WEBSOCKET-VERSION')
if version not in ('13', '8', '7'):
raise errors.BadRequestException(
'Unsupported version: {}'.format(version))
# check client handshake for validity
key = headers.get('SEC-WEBSOCKET-KEY')
try:
if not key or len(base64.b64decode(key)) != 16:
raise errors.BadRequestException(
'Handshake error: {!r}'.format(key))
except binascii.Error:
raise errors.BadRequestException(
'Handshake error: {!r}'.format(key)) from None
# response code, headers, parser, writer
return (101,
(('UPGRADE', 'websocket'),
('CONNECTION', 'upgrade'),
('TRANSFER-ENCODING', 'chunked'),
('SEC-WEBSOCKET-ACCEPT', base64.b64encode(
hashlib.sha1(key.encode() + WS_KEY).digest()).decode())),
WebSocketParser,
WebSocketWriter(transport))
| [
"[email protected]"
] | |
67624c3041be101cc92c160d6d7e7fd3442377f3 | 832f86e052d90916fb0c8156825c87dc13c0443e | /imported-from-gmail/2020-05-03-invert-a-binary-tree.py | 3afada0e040cf5b62868c3542fa850c26f003171 | [] | no_license | johncornflake/dailyinterview | 292615849cea62cb945ecc7039c594b6966a81f3 | 91bb0edb9e25255e6222279109c15ae9d203970c | refs/heads/master | 2022-12-09T21:02:12.204755 | 2021-06-07T13:09:34 | 2021-06-07T13:09:34 | 225,059,833 | 0 | 0 | null | 2022-12-08T11:27:38 | 2019-11-30T19:24:58 | Python | UTF-8 | Python | false | false | 1,226 | py | Hi, here's your problem today. (You've reached the end of the problems for now - in the meanwhile, here is a random question. And visit
CoderPro
for more practice!) This problem was recently asked by Twitter:
You are given the root of a binary tree. Invert the binary tree in place. That is, all left children should become right children, and all right children should become left children.
Example:
a
/
\
b c
/
\
/
d e f
The inverted version of this tree is as follows:
a
/
\
c b
\
/
\
f e d
Here is the function signature:
class
Node
:
def
__init__
(
self
,
value
):
self
.
left
=
None
self
.
right
=
None
self
.
value
=
value
def
preorder
(
self
):
print
self
.
value
,
if
self
.
left
:
self
.
left
.
preorder
()
if
self
.
right
:
self
.
right
.
preorder
()
def
invert
(
node
):
# Fill this in.
root
=
Node
(
'a'
)
root
.
left
=
Node
(
'b'
)
root
.
right
=
Node
(
'c'
)
root
.
left
.
left
=
Node
(
'd'
)
root
.
left
.
right
=
Node
(
'e'
)
root
.
right
.
left
=
Node
(
'f'
)
root
.
preorder
()
# a b d e c f
print
"\n"
invert
(
root
)
root
.
preorder
()
# a c f b e d
| [
"[email protected]"
] | |
ca092fc09e899aefc6a21b81dd2fa026594f71d9 | ff0c17789badd75559eb834fe039d4b4ab175ba8 | /pythonscript/x11-64-cpython/lib/python3.6/site-packages/zmq/green/core.py | a86c455dce0748319cf9d86431b9b202ef28f26b | [
"LicenseRef-scancode-python-cwi",
"LicenseRef-scancode-free-unknown",
"Python-2.0",
"GPL-1.0-or-later",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-unicode",
"BSD-3-Clause",
"Apache-2.0",
"LicenseRef-scancode-public-domain",
"OpenSSL",
"MIT"
] | permissive | studioschade/notebook_graph | 3f7555ab46167b050e461164c6b4a1525dc7df0c | 0fd159855fdd9c38a6dd293e5ec6164986ad6209 | refs/heads/master | 2022-10-23T23:54:54.467050 | 2018-10-14T08:10:18 | 2018-10-14T08:10:18 | 148,099,361 | 9 | 2 | MIT | 2022-10-10T20:16:48 | 2018-09-10T04:32:44 | Python | UTF-8 | Python | false | false | 10,619 | py | #-----------------------------------------------------------------------------
# Copyright (C) 2011-2012 Travis Cline
#
# This file is part of pyzmq
# It is adapted from upstream project zeromq_gevent under the New BSD License
#
# Distributed under the terms of the New BSD License. The full license is in
# the file COPYING.BSD, distributed as part of this software.
#-----------------------------------------------------------------------------
"""This module wraps the :class:`Socket` and :class:`Context` found in :mod:`pyzmq <zmq>` to be non blocking
"""
from __future__ import print_function
import sys
import time
import warnings
import zmq
from zmq import Context as _original_Context
from zmq import Socket as _original_Socket
from .poll import _Poller
import gevent
from gevent.event import AsyncResult
from gevent.hub import get_hub
if hasattr(zmq, 'RCVTIMEO'):
TIMEOS = (zmq.RCVTIMEO, zmq.SNDTIMEO)
else:
TIMEOS = ()
def _stop(evt):
"""simple wrapper for stopping an Event, allowing for method rename in gevent 1.0"""
try:
evt.stop()
except AttributeError as e:
# gevent<1.0 compat
evt.cancel()
class _Socket(_original_Socket):
"""Green version of :class:`zmq.Socket`
The following methods are overridden:
* send
* recv
To ensure that the ``zmq.NOBLOCK`` flag is set and that sending or receiving
is deferred to the hub if a ``zmq.EAGAIN`` (retry) error is raised.
The `__state_changed` method is triggered when the zmq.FD for the socket is
marked as readable and triggers the necessary read and write events (which
are waited for in the recv and send methods).
Some double underscore prefixes are used to minimize pollution of
:class:`zmq.Socket`'s namespace.
"""
__in_send_multipart = False
__in_recv_multipart = False
__writable = None
__readable = None
_state_event = None
_gevent_bug_timeout = 11.6 # timeout for not trusting gevent
_debug_gevent = False # turn on if you think gevent is missing events
_poller_class = _Poller
def __init__(self, *a, **kw):
super(_Socket, self).__init__(*a, **kw)
self.__in_send_multipart = False
self.__in_recv_multipart = False
self.__setup_events()
def __del__(self):
self.close()
def close(self, linger=None):
super(_Socket, self).close(linger)
self.__cleanup_events()
def __cleanup_events(self):
# close the _state_event event, keeps the number of active file descriptors down
if getattr(self, '_state_event', None):
_stop(self._state_event)
self._state_event = None
# if the socket has entered a close state resume any waiting greenlets
self.__writable.set()
self.__readable.set()
def __setup_events(self):
self.__readable = AsyncResult()
self.__writable = AsyncResult()
self.__readable.set()
self.__writable.set()
try:
self._state_event = get_hub().loop.io(self.getsockopt(zmq.FD), 1) # read state watcher
self._state_event.start(self.__state_changed)
except AttributeError:
# for gevent<1.0 compatibility
from gevent.core import read_event
self._state_event = read_event(self.getsockopt(zmq.FD), self.__state_changed, persist=True)
def __state_changed(self, event=None, _evtype=None):
if self.closed:
self.__cleanup_events()
return
try:
# avoid triggering __state_changed from inside __state_changed
events = super(_Socket, self).getsockopt(zmq.EVENTS)
except zmq.ZMQError as exc:
self.__writable.set_exception(exc)
self.__readable.set_exception(exc)
else:
if events & zmq.POLLOUT:
self.__writable.set()
if events & zmq.POLLIN:
self.__readable.set()
def _wait_write(self):
assert self.__writable.ready(), "Only one greenlet can be waiting on this event"
self.__writable = AsyncResult()
# timeout is because libzmq cannot be trusted to properly signal a new send event:
# this is effectively a maximum poll interval of 1s
tic = time.time()
dt = self._gevent_bug_timeout
if dt:
timeout = gevent.Timeout(seconds=dt)
else:
timeout = None
try:
if timeout:
timeout.start()
self.__writable.get(block=True)
except gevent.Timeout as t:
if t is not timeout:
raise
toc = time.time()
# gevent bug: get can raise timeout even on clean return
# don't display zmq bug warning for gevent bug (this is getting ridiculous)
if self._debug_gevent and timeout and toc-tic > dt and \
self.getsockopt(zmq.EVENTS) & zmq.POLLOUT:
print("BUG: gevent may have missed a libzmq send event on %i!" % self.FD, file=sys.stderr)
finally:
if timeout:
timeout.cancel()
self.__writable.set()
def _wait_read(self):
assert self.__readable.ready(), "Only one greenlet can be waiting on this event"
self.__readable = AsyncResult()
# timeout is because libzmq cannot always be trusted to play nice with libevent.
# I can only confirm that this actually happens for send, but lets be symmetrical
# with our dirty hacks.
# this is effectively a maximum poll interval of 1s
tic = time.time()
dt = self._gevent_bug_timeout
if dt:
timeout = gevent.Timeout(seconds=dt)
else:
timeout = None
try:
if timeout:
timeout.start()
self.__readable.get(block=True)
except gevent.Timeout as t:
if t is not timeout:
raise
toc = time.time()
# gevent bug: get can raise timeout even on clean return
# don't display zmq bug warning for gevent bug (this is getting ridiculous)
if self._debug_gevent and timeout and toc-tic > dt and \
self.getsockopt(zmq.EVENTS) & zmq.POLLIN:
print("BUG: gevent may have missed a libzmq recv event on %i!" % self.FD, file=sys.stderr)
finally:
if timeout:
timeout.cancel()
self.__readable.set()
def send(self, data, flags=0, copy=True, track=False, **kwargs):
"""send, which will only block current greenlet
state_changed always fires exactly once (success or fail) at the
end of this method.
"""
# if we're given the NOBLOCK flag act as normal and let the EAGAIN get raised
if flags & zmq.NOBLOCK:
try:
msg = super(_Socket, self).send(data, flags, copy, track, **kwargs)
finally:
if not self.__in_send_multipart:
self.__state_changed()
return msg
# ensure the zmq.NOBLOCK flag is part of flags
flags |= zmq.NOBLOCK
while True: # Attempt to complete this operation indefinitely, blocking the current greenlet
try:
# attempt the actual call
msg = super(_Socket, self).send(data, flags, copy, track)
except zmq.ZMQError as e:
# if the raised ZMQError is not EAGAIN, reraise
if e.errno != zmq.EAGAIN:
if not self.__in_send_multipart:
self.__state_changed()
raise
else:
if not self.__in_send_multipart:
self.__state_changed()
return msg
# defer to the event loop until we're notified the socket is writable
self._wait_write()
def recv(self, flags=0, copy=True, track=False):
"""recv, which will only block current greenlet
state_changed always fires exactly once (success or fail) at the
end of this method.
"""
if flags & zmq.NOBLOCK:
try:
msg = super(_Socket, self).recv(flags, copy, track)
finally:
if not self.__in_recv_multipart:
self.__state_changed()
return msg
flags |= zmq.NOBLOCK
while True:
try:
msg = super(_Socket, self).recv(flags, copy, track)
except zmq.ZMQError as e:
if e.errno != zmq.EAGAIN:
if not self.__in_recv_multipart:
self.__state_changed()
raise
else:
if not self.__in_recv_multipart:
self.__state_changed()
return msg
self._wait_read()
def send_multipart(self, *args, **kwargs):
"""wrap send_multipart to prevent state_changed on each partial send"""
self.__in_send_multipart = True
try:
msg = super(_Socket, self).send_multipart(*args, **kwargs)
finally:
self.__in_send_multipart = False
self.__state_changed()
return msg
def recv_multipart(self, *args, **kwargs):
"""wrap recv_multipart to prevent state_changed on each partial recv"""
self.__in_recv_multipart = True
try:
msg = super(_Socket, self).recv_multipart(*args, **kwargs)
finally:
self.__in_recv_multipart = False
self.__state_changed()
return msg
def get(self, opt):
"""trigger state_changed on getsockopt(EVENTS)"""
if opt in TIMEOS:
warnings.warn("TIMEO socket options have no effect in zmq.green", UserWarning)
optval = super(_Socket, self).get(opt)
if opt == zmq.EVENTS:
self.__state_changed()
return optval
def set(self, opt, val):
"""set socket option"""
if opt in TIMEOS:
warnings.warn("TIMEO socket options have no effect in zmq.green", UserWarning)
result = super(_Socket, self).set(opt, val)
if opt in (zmq.SUBSCRIBE, zmq.UNSUBSCRIBE):
self.__state_changed()
return result
class _Context(_original_Context):
"""Replacement for :class:`zmq.Context`
Ensures that the greened Socket above is used in calls to `socket`.
"""
_socket_class = _Socket
| [
"[email protected]"
] | |
892eeecf50de0754f98741f00a9c62aa1017f5c9 | 91214eaa804c0673c4ef476be99331ee745af352 | /application/models/piece.py | 558ad924ced777003c1b2c77b38ec9534659d9f9 | [] | no_license | Spike774/1jingdian | 52a534bae08b162800e038e28c70d2a352155e38 | 7648bf8adb8964220b25f483f92d3e66f0b33ba8 | refs/heads/master | 2021-01-18T01:41:34.546533 | 2015-04-21T15:31:30 | 2015-04-21T15:31:30 | 34,445,222 | 1 | 0 | null | 2015-04-23T08:55:20 | 2015-04-23T08:55:20 | null | UTF-8 | Python | false | false | 10,707 | py | # coding: utf-8
import qrcode
import math
from flask import g
from urlparse import urlparse
from datetime import datetime, date, timedelta
from ._base import db
from ..utils.uploadsets import qrcodes, save_image
from ..utils.helpers import absolute_url_for
class Piece(db.Model):
"""Model for text piece"""
id = db.Column(db.Integer, primary_key=True)
content = db.Column(db.Text)
content_length = db.Column(db.Integer, default=0)
original = db.Column(db.Boolean, default=False)
author = db.Column(db.String(100))
source = db.Column(db.String(100))
source_link = db.Column(db.String(200))
source_link_title = db.Column(db.String(200))
clicks_count = db.Column(db.Integer, default=0)
votes_count = db.Column(db.Integer, default=0)
qrcode = db.Column(db.String(200))
created_at = db.Column(db.DateTime, default=datetime.now)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
user = db.relationship('User', backref=db.backref('pieces',
lazy='dynamic',
order_by='desc(Piece.created_at)'))
def __setattr__(self, name, value):
# Hash password when set it.
if name == 'content':
super(Piece, self).__setattr__('content_length', Piece.calculate_content_length(value))
super(Piece, self).__setattr__(name, value)
@property
def source_link_favicon(self):
result = urlparse(self.source_link)
host = "%s://%s" % (result.scheme or "http", result.netloc)
return "http://g.soz.im/%s" % host
@property
def qrcode_url(self):
return qrcodes.url(self.qrcode) if self.qrcode else ""
@property
def source_string(self):
if self.original:
return ""
result_str = ""
if self.author:
result_str += self.author
if self.source:
result_str += "《%s》" % self.source
return result_str
@property
def weibo_share_url(self):
template = "http://service.weibo.com/share/share.php?searchPic=false&title=%s&url=%s"
title = self.content
if self.source_string:
title += " —— %s" % self.source_string
url = absolute_url_for('piece.view', uid=self.id)
return template % (title, url)
@property
def root_comments(self):
return self.comments.filter(PieceComment.root_comment_id == None)
@staticmethod
def calculate_content_length(content):
cn_length = (len(bytes(content)) - len(content)) / 2
en_length = len(content) - cn_length
return cn_length + int(math.ceil(en_length / 2.0))
def voted_by_user(self):
if not g.user:
return False
return g.user.voted_pieces.filter(PieceVote.piece_id == self.id).count() > 0
def make_qrcode(self):
qr = qrcode.QRCode(box_size=10, border=0)
qr.add_data(absolute_url_for('piece.view', uid=self.id))
qr.make(fit=True)
img = qr.make_image()
self.qrcode = save_image(img, qrcodes, 'png')
@staticmethod
def get_pieces_data_by_day(day):
"""获取某天的pieces"""
SHOW_PIECES_COUNT = 20
pieces_count = Piece.query.filter(db.func.date(Piece.created_at) == day).count()
hide_pieces_count = pieces_count - SHOW_PIECES_COUNT if pieces_count > SHOW_PIECES_COUNT \
else 0
if hide_pieces_count:
hide_pieces = pieces = Piece.query.filter(
db.func.date(Piece.created_at) == day).order_by(
Piece.votes_count.desc()).offset(SHOW_PIECES_COUNT)
else:
hide_pieces = None
pieces = Piece.query.filter(db.func.date(Piece.created_at) == day). \
order_by(Piece.votes_count.desc()). \
order_by(Piece.created_at.desc()). \
limit(SHOW_PIECES_COUNT)
if day == date.today():
date_string = '今天'
elif day == date.today() - timedelta(days=1):
date_string = '昨天'
else:
date_string = "%s年%s月%s日" % (day.year, day.month, day.day)
return {
'date': day,
'date_string': date_string,
'pieces': pieces,
'hide_pieces': hide_pieces,
'hide_pieces_count': hide_pieces_count
}
class PieceVote(db.Model):
"""每日文字的投票(顶)"""
id = db.Column(db.Integer, primary_key=True)
created_at = db.Column(db.DateTime, default=datetime.now)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
user = db.relationship('User', backref=db.backref('voted_pieces',
lazy='dynamic',
order_by='desc(PieceVote.created_at)'))
piece_id = db.Column(db.Integer, db.ForeignKey('piece.id'))
piece = db.relationship('Piece', backref=db.backref('voters',
lazy='dynamic',
order_by='asc(PieceVote.created_at)'))
class PieceComment(db.Model):
"""文字评论"""
id = db.Column(db.Integer, primary_key=True)
content = db.Column(db.Text)
created_at = db.Column(db.DateTime, default=datetime.now)
votes_count = db.Column(db.Integer, default=0)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
user = db.relationship('User',
foreign_keys=[user_id],
backref=db.backref('piece_comments',
lazy='dynamic',
order_by='desc(PieceComment.created_at)'))
target_user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
target_user = db.relationship('User', foreign_keys=[target_user_id])
root_comment_id = db.Column(db.Integer, db.ForeignKey('piece_comment.id'))
root_comment = db.relationship('PieceComment',
remote_side=[id],
backref=db.backref('sub_comments',
lazy='dynamic',
order_by='asc(PieceComment.created_at)'))
piece_id = db.Column(db.Integer, db.ForeignKey('piece.id'))
piece = db.relationship('Piece', backref=db.backref('comments',
lazy='dynamic',
order_by='asc(PieceComment.created_at)'))
def voted_by_user(self):
return g.user and g.user.voted_piece_comments.filter(
PieceCommentVote.piece_comment_id == self.id).count() > 0
class PieceCommentVote(db.Model):
"""针对文字评论的赞"""
id = db.Column(db.Integer, primary_key=True)
created_at = db.Column(db.DateTime, default=datetime.now)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
user = db.relationship('User', backref=db.backref('voted_piece_comments',
lazy='dynamic',
order_by='desc(PieceCommentVote.created_at)'))
piece_comment_id = db.Column(db.Integer, db.ForeignKey('piece_comment.id'))
piece_comment = db.relationship('PieceComment',
backref=db.backref('votes',
lazy='dynamic',
order_by='asc(PieceCommentVote.created_at)'))
class PieceSource(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(200))
count = db.Column(db.Integer, default=1)
created_at = db.Column(db.DateTime, default=datetime.now)
class PieceAuthor(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(200))
count = db.Column(db.Integer, default=1)
created_at = db.Column(db.DateTime, default=datetime.now)
class PIECE_EDIT_KIND(object):
# create
CREATE = 15
# collection
ADD_TO_COLLECTION = 1
REMOVE_FROM_COLLECTION = 2
# content
UPDATE_CONTENT = 3
# original
CHANGE_TO_ORIGINAL = 13
CHANGE_TO_NON_ORIGINAL = 14
# author
ADD_AUTHOR = 4
UPDATE_AUTHOR = 5
REMOVE_AUTHOR = 6
# source
ADD_SOURCE = 7
UPDATE_SOURCE = 8
REMOVE_SOURCE = 9
# source link
ADD_SOURCE_LINK = 10
UPDATE_SOURCE_LINK = 11
REMOVE_SOURCE_LINK = 12
class PieceEditLog(db.Model):
id = db.Column(db.Integer, primary_key=True)
created_at = db.Column(db.DateTime, default=datetime.now)
kind = db.Column(db.Integer, nullable=False)
before = db.Column(db.String(200))
before_id = db.Column(db.Integer)
after = db.Column(db.String(200))
after_id = db.Column(db.Integer)
compare = db.Column(db.String(500))
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
user = db.relationship('User',
backref=db.backref('edited_pieces',
lazy='dynamic',
order_by='desc(PieceEditLog.created_at)'))
piece_id = db.Column(db.Integer, db.ForeignKey('piece.id'))
piece = db.relationship('Piece',
backref=db.backref('logs',
lazy='dynamic',
order_by='desc(PieceEditLog.created_at)'))
def reported_by_user(self):
return g.user and g.user.reported_piece_edit_logs.filter(
PieceEditLogReport.log_id == self.id).count() > 0
class PieceEditLogReport(db.Model):
id = db.Column(db.Integer, primary_key=True)
processed = db.Column(db.Boolean, default=False)
created_at = db.Column(db.DateTime, default=datetime.now)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
user = db.relationship('User',
backref=db.backref('reported_piece_edit_logs',
lazy='dynamic',
order_by='desc(PieceEditLogReport.created_at)'))
log_id = db.Column(db.Integer, db.ForeignKey('piece_edit_log.id'))
log = db.relationship('PieceEditLog',
backref=db.backref('reports',
lazy='dynamic',
order_by='desc('
'PieceEditLogReport.created_at)'))
| [
"[email protected]"
] | |
5e45efaf0a3d7732008c6b31f63ea03a3f44c0fe | d75359fde22b08a4109b30bb39c9db27961fa417 | /loginpass/github.py | 794d62418c269dca14a72d1f15a6c4568fd4dea5 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | authlib/loginpass | 58f0881b4e5975c305e633337d1b86657bea907b | 635823a78a2a92cf8630f9935aebb9afcccb8656 | refs/heads/master | 2022-06-08T13:08:09.271879 | 2020-12-08T06:04:39 | 2020-12-08T06:04:39 | 128,506,236 | 280 | 95 | BSD-3-Clause | 2022-05-13T19:30:54 | 2018-04-07T07:26:46 | Python | UTF-8 | Python | false | false | 1,775 | py | """
loginpass.github
~~~~~~~~~~~~~~~~
Loginpass Backend of GitHub (https://github.com).
Useful Links:
- Create App: https://github.com/settings/developers
- API documentation: https://developer.github.com/v3/
:copyright: (c) 2018 by Hsiaoming Yang
:license: BSD, see LICENSE for more details.
"""
from authlib.oidc.core import UserInfo
class GitHub(object):
NAME = 'github'
OAUTH_CONFIG = {
'api_base_url': 'https://api.github.com/',
'access_token_url': 'https://github.com/login/oauth/access_token',
'authorize_url': 'https://github.com/login/oauth/authorize',
'client_kwargs': {'scope': 'user:email'},
'userinfo_endpoint': 'https://api.github.com/user',
}
def userinfo(self, **kwargs):
resp = self.get(self.OAUTH_CONFIG['userinfo_endpoint'], **kwargs)
data = resp.json()
params = {
'sub': str(data['id']),
'name': data['name'],
'email': data.get('email'),
'preferred_username': data['login'],
'profile': data['html_url'],
'picture': data['avatar_url'],
'website': data.get('blog'),
}
# The email can be be None despite the scope being 'user:email'.
# That is because a user can choose to make his/her email private.
# If that is the case we get all the users emails regardless if private or note
# and use the one he/she has marked as `primary`
if params.get('email') is None:
resp = self.get('user/emails', **kwargs)
resp.raise_for_status()
data = resp.json()
params["email"] = next(email['email'] for email in data if email['primary'])
return UserInfo(params)
| [
"[email protected]"
] | |
67fd4e81addbef4bfb8d19272ec99dbf5c5362c6 | 1d27decdb5207616837f03a36741947a46e8852d | /py/hscTools/pipe_test/stacker.py | 54af743c04e6fd06668d3410c5a7ec4f07cf2be9 | [] | no_license | dr-guangtou/hs_hsc | 12d7a83ee9898f6d9fb5cf5dc85fa682d50578e8 | 865abc0ba5337d3a085efa99b87ebfcfdd9710af | refs/heads/master | 2021-01-17T03:20:03.900050 | 2019-06-19T05:26:34 | 2019-06-19T05:26:34 | 23,053,990 | 0 | 2 | null | 2015-12-02T15:52:47 | 2014-08-18T00:47:48 | Python | UTF-8 | Python | false | false | 14,441 | py | #!/usr/bin/env python
#
# LSST Data Management System
# Copyright 2008, 2009, 2010 LSST Corporation.
#
# This product includes software developed by the
# LSST Project (http://www.lsst.org/).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the LSST License Statement and
# the GNU General Public License along with this program. If not,
# see <http://www.lsstcorp.org/LegalNotices/>.
#
# -*- python -*-
"""
Tests for Stack
Run with:
./Stacker.py
or
python
>>> import Stacker; Stacker.run()
"""
##########################
# simpleStacker.py
# Steve Bickerton
# An example executible which calls the example 'stack' code
import unittest
import numpy
import lsst.afw.image as afwImage
import lsst.afw.math as afwMath
import lsst.afw.geom as afwGeom
import lsst.utils.tests as utilsTests
import lsst.pex.exceptions as pexEx
import lsst.afw.display.ds9 as ds9
try:
type(display)
except:
display = False
######################################
# main body of code
######################################
class StackTestCase(utilsTests.TestCase):
def setUp(self):
self.nImg = 10
self.nX, self.nY = 64, 64
self.values = [1.0, 2.0, 2.0, 3.0, 8.0 ]
def testMean(self):
""" Test the statisticsStack() function for a MEAN"""
knownMean = 0.0
imgList = afwImage.vectorImageF()
for iImg in range(self.nImg):
imgList.push_back(afwImage.ImageF(afwGeom.Extent2I(self.nX, self.nY), iImg))
knownMean += iImg
imgStack = afwMath.statisticsStack(imgList, afwMath.MEAN)
knownMean /= self.nImg
self.assertEqual(imgStack.get(self.nX/2, self.nY/2), knownMean)
# Test in-place stacking
afwMath.statisticsStack(imgStack, imgList, afwMath.MEAN)
self.assertEqual(imgStack.get(self.nX/2, self.nY/2), knownMean)
def testStatistics(self):
""" Test the statisticsStack() function """
imgList = afwImage.vectorImageF()
for val in self.values:
imgList.push_back(afwImage.ImageF(afwGeom.Extent2I(self.nX, self.nY), val))
imgStack = afwMath.statisticsStack(imgList, afwMath.MEAN)
mean = reduce(lambda x, y: x+y, self.values)/float(len(self.values))
self.assertAlmostEqual(imgStack.get(self.nX/2, self.nY/2), mean)
imgStack = afwMath.statisticsStack(imgList, afwMath.MEDIAN)
median = sorted(self.values)[len(self.values)//2]
self.assertEqual(imgStack.get(self.nX/2, self.nY/2), median)
def testWeightedStack(self):
""" Test statisticsStack() function when weighting by a variance plane"""
sctrl = afwMath.StatisticsControl()
sctrl.setWeighted(True)
mimgList = afwImage.vectorMaskedImageF()
for val in self.values:
mimg = afwImage.MaskedImageF(afwGeom.Extent2I(self.nX, self.nY))
mimg.set(val, 0x0, val)
mimgList.push_back(mimg)
mimgStack = afwMath.statisticsStack(mimgList, afwMath.MEAN, sctrl)
wvalues = [1.0/q for q in self.values]
wmean = float(len(self.values)) / reduce(lambda x, y: x + y, wvalues)
self.assertAlmostEqual(mimgStack.getImage().get(self.nX/2, self.nY/2), wmean)
# Test in-place stacking
afwMath.statisticsStack(mimgStack, mimgList, afwMath.MEAN, sctrl)
self.assertAlmostEqual(mimgStack.getImage().get(self.nX/2, self.nY/2), wmean)
def testConstantWeightedStack(self):
""" Test statisticsStack() function when weighting by a vector of weights"""
sctrl = afwMath.StatisticsControl()
imgList = afwImage.vectorImageF()
weights = afwMath.vectorF()
for val in self.values:
img = afwImage.ImageF(afwGeom.Extent2I(self.nX, self.nY), val)
imgList.push_back(img)
weights.push_back(val)
imgStack = afwMath.statisticsStack(imgList, afwMath.MEAN, sctrl, weights)
wsum = reduce(lambda x, y: x + y, self.values)
wvalues = [x*x for x in self.values]
wmean = reduce(lambda x, y: x + y, wvalues)/float(wsum)
self.assertAlmostEqual(imgStack.get(self.nX/2, self.nY/2), wmean)
def testRequestMoreThanOneStat(self):
""" Make sure we throw an exception if someone requests more than one type of statistics. """
sctrl = afwMath.StatisticsControl()
imgList = afwImage.vectorImageF()
for val in self.values:
img = afwImage.ImageF(afwGeom.Extent2I(self.nX, self.nY), val)
imgList.push_back(img)
def tst():
imgStackBad = afwMath.statisticsStack(imgList, afwMath.MEAN | afwMath.MEANCLIP, sctrl)
utilsTests.assertRaisesLsstCpp(self, pexEx.InvalidParameterException, tst)
def testReturnInputs(self):
""" Make sure that a single file put into the stacker is returned unscathed"""
imgList = afwImage.vectorMaskedImageF()
img = afwImage.MaskedImageF(afwGeom.Extent2I(10, 20))
for y in range(img.getHeight()):
simg = img.Factory(
img,
afwGeom.Box2I(afwGeom.Point2I(0, y), afwGeom.Extent2I(img.getWidth(), 1)),
afwImage.LOCAL)
simg.set(y)
imgList.push_back(img)
imgStack = afwMath.statisticsStack(imgList, afwMath.MEAN)
if display:
ds9.mtv(img, frame=1, title="input")
ds9.mtv(imgStack, frame=2, title="stack")
self.assertEqual(img.get(0, 0)[0], imgStack.get(0, 0)[0])
def testStackBadPixels(self):
"""Check that we properly ignore masked pixels, and set noGoodPixelsMask where there are
no good pixels"""
mimgVec = afwImage.vectorMaskedImageF()
DETECTED = afwImage.MaskU_getPlaneBitMask("DETECTED")
EDGE = afwImage.MaskU_getPlaneBitMask("EDGE")
INTRP = afwImage.MaskU_getPlaneBitMask("INTRP")
SAT = afwImage.MaskU_getPlaneBitMask("SAT")
sctrl = afwMath.StatisticsControl()
sctrl.setNanSafe(False)
sctrl.setAndMask(INTRP | SAT)
sctrl.setNoGoodPixelsMask(EDGE)
edgeBBox = afwGeom.Box2I(afwGeom.Point2I(0, 0), afwGeom.Extent2I(20, 20)) # set these pixels to EDGE
width, height = 512, 512
dim=afwGeom.Extent2I(width, height)
val, maskVal = 10, DETECTED
for i in range(4):
mimg = afwImage.MaskedImageF(dim)
mimg.set(val, maskVal, 1)
#
# Set part of the image to NaN (with the INTRP bit set)
#
llc = afwGeom.Point2I(width//2*(i//2), height//2*(i%2))
bbox = afwGeom.Box2I(llc, dim/2)
smimg = mimg.Factory(mimg, bbox, afwImage.LOCAL)
#smimg.set(numpy.nan, INTRP, numpy.nan)
del smimg
#
# And the bottom corner to SAT
#
smask = mimg.getMask().Factory(mimg.getMask(), edgeBBox, afwImage.LOCAL)
smask |= SAT
del smask
mimgVec.push_back(mimg)
if display > 1:
ds9.mtv(mimg, frame=i, title=str(i))
mimgStack = afwMath.statisticsStack(mimgVec, afwMath.MEAN, sctrl)
if display:
i += 1
ds9.mtv(mimgStack, frame=i, title="Stack")
i += 1
ds9.mtv(mimgStack.getVariance(), frame=i, title="var(Stack)")
#
# Check the output, ignoring EDGE pixels
#
sctrl = afwMath.StatisticsControl()
sctrl.setAndMask(afwImage.MaskU_getPlaneBitMask("EDGE"))
stats = afwMath.makeStatistics(mimgStack, afwMath.MIN | afwMath.MAX, sctrl)
self.assertEqual(stats.getValue(afwMath.MIN), val)
self.assertEqual(stats.getValue(afwMath.MAX), val)
#
# We have to clear EDGE in the known bad corner to check the mask
#
smask = mimgStack.getMask().Factory(mimgStack.getMask(), edgeBBox, afwImage.LOCAL)
self.assertEqual(smask.get(edgeBBox.getMinX(), edgeBBox.getMinY()), EDGE)
smask &= ~EDGE
del smask
self.assertEqual(afwMath.makeStatistics(mimgStack.getMask(), afwMath.SUM, sctrl).getValue(), maskVal)
def testTicket1412(self):
"""Ticket 1412: ignored mask bits are propegated to output stack."""
mimg1 = afwImage.MaskedImageF(afwGeom.Extent2I(1, 1))
mimg1.set(0, 0, (1, 0x4, 1)) # set 0100
mimg2 = afwImage.MaskedImageF(afwGeom.Extent2I(1, 1))
mimg2.set(0, 0, (2, 0x3, 1)) # set 0010 and 0001
imgList = afwImage.vectorMaskedImageF()
imgList.push_back(mimg1)
imgList.push_back(mimg2)
sctrl = afwMath.StatisticsControl()
sctrl.setAndMask(0x1) # andmask only 0001
# try first with no sctrl (no andmask set), should see 0x0111 for all output mask pixels
imgStack = afwMath.statisticsStack(imgList, afwMath.MEAN)
self.assertEqual(imgStack.get(0, 0)[1], 0x7)
# now try with sctrl (andmask = 0x0001), should see 0x0100 for all output mask pixels
imgStack = afwMath.statisticsStack(imgList, afwMath.MEAN, sctrl)
self.assertEqual(imgStack.get(0, 0)[1], 0x4)
def test2145(self):
"""The how-to-repeat from #2145"""
Size = 5
statsCtrl = afwMath.StatisticsControl()
statsCtrl.setCalcErrorFromInputVariance(True)
maskedImageList = afwImage.vectorMaskedImageF()
weightList = []
for i in range(3):
mi = afwImage.MaskedImageF(Size, Size)
imArr, maskArr, varArr = mi.getArrays()
imArr[:] = numpy.random.normal(10, 0.1, (Size, Size))
varArr[:] = numpy.random.normal(10, 0.1, (Size, Size))
maskedImageList.append(mi)
weightList.append(1.0)
stack = afwMath.statisticsStack(maskedImageList, afwMath.MEAN, statsCtrl, weightList)
if False:
print "image=", stack.getImage().getArray()
print "variance=", stack.getVariance().getArray()
self.assertNotEqual(numpy.sum(stack.getVariance().getArray()), 0.0)
def testRejectedMaskPropagation(self):
"""Test that we can propagate mask bits from rejected pixels, when the amount
of rejection crosses a threshold."""
rejectedBit = 1 # use this bit to determine whether to reject a pixel
propagatedBit = 2 # propagate this bit if a pixel with it set is rejected
statsCtrl = afwMath.StatisticsControl()
statsCtrl.setMaskPropagationThreshold(propagatedBit, 0.3)
statsCtrl.setAndMask(1 << rejectedBit)
statsCtrl.setWeighted(True)
maskedImageList = afwImage.vectorMaskedImageF()
# start with 4 images with no mask bits set
partialSum = numpy.zeros((1, 4), dtype=numpy.float32)
finalImage = numpy.array([12.0, 12.0, 12.0, 12.0], dtype=numpy.float32)
for i in range(4):
mi = afwImage.MaskedImageF(4, 1)
imArr, maskArr, varArr = mi.getArrays()
imArr[:,:] = numpy.ones((1, 4), dtype=numpy.float32)
maskedImageList.append(mi)
partialSum += imArr
# add one more image with all permutations of the first two bits set in different pixels
mi = afwImage.MaskedImageF(4, 1)
imArr, maskArr, varArr = mi.getArrays()
imArr[0,:] = finalImage
maskArr[0,1] |= (1 << rejectedBit)
maskArr[0,2] |= (1 << propagatedBit)
maskArr[0,3] |= (1 << rejectedBit)
maskArr[0,3] |= (1 << propagatedBit)
maskedImageList.append(mi)
# these will always be rejected
finalImage[1] = 0.0
finalImage[3] = 0.0
# Uniform weights: we should only see pixel 2 set with propagatedBit, because it's not rejected;
# pixel 3 is rejected, but its weight (0.2) below the propagation threshold (0.3)
stack1 = afwMath.statisticsStack(maskedImageList, afwMath.MEAN, statsCtrl, [1.0, 1.0, 1.0, 1.0, 1.0])
self.assertEqual(stack1.get(0,0)[1], 0x0)
self.assertEqual(stack1.get(1,0)[1], 0x0)
self.assertEqual(stack1.get(2,0)[1], 1 << propagatedBit)
self.assertEqual(stack1.get(3,0)[1], 0x0)
self.assertClose(stack1.getImage().getArray(),
(partialSum + finalImage) / numpy.array([5.0, 4.0, 5.0, 4.0]),
rtol=1E-7)
# Give the masked image more weight: we should see pixel 2 and pixel 3 set with propagatedBit,
# pixel 2 because it's not rejected, and pixel 3 because the weight of the rejection (0.3333)
# is above the threshold (0.3)
# Note that rejectedBit is never propagated, because we didn't include it in statsCtrl (of course,
# normally the bits we'd propagate and the bits we'd reject would be the same)
stack2 = afwMath.statisticsStack(maskedImageList, afwMath.MEAN, statsCtrl, [1.0, 1.0, 1.0, 1.0, 2.0])
self.assertEqual(stack2.get(0,0)[1], 0x0)
self.assertEqual(stack2.get(1,0)[1], 0x0)
self.assertEqual(stack2.get(2,0)[1], 1 << propagatedBit)
self.assertEqual(stack2.get(3,0)[1], 1 << propagatedBit)
self.assertClose(stack2.getImage().getArray(),
(partialSum + 2*finalImage) / numpy.array([6.0, 4.0, 6.0, 4.0]),
rtol=1E-7)
#################################################################
# Test suite boiler plate
#################################################################
def suite():
"""Returns a suite containing all the test cases in this module."""
utilsTests.init()
suites = []
suites += unittest.makeSuite(StackTestCase)
suites += unittest.makeSuite(utilsTests.MemoryTestCase)
return unittest.TestSuite(suites)
def run(shouldExit = False):
"""Run the tests"""
utilsTests.run(suite(), shouldExit)
if __name__ == "__main__":
run(True)
| [
"[email protected]"
] | |
dcfd46cd7f64e2d713c1cee6172c58fb7d16cbea | 9bb5241e1e48ec6d4a879ab53ec3976d747a39e0 | /test/test_fx.py | 18f42c03af0242a570b774d6b70e3b6b4d6f200e | [
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0",
"BSD-2-Clause"
] | permissive | yupeifengyannis/pytorch | 9dd4c2cebeb119ed57178f09e76366d17d27169b | c97dc9286dbea2cd3b3ca407e35cffdb3b181fdb | refs/heads/master | 2021-12-06T13:27:47.115070 | 2021-12-05T03:42:55 | 2021-12-05T03:44:07 | 357,236,572 | 0 | 0 | NOASSERTION | 2021-04-12T15:03:16 | 2021-04-12T15:03:15 | null | UTF-8 | Python | false | false | 139,137 | py | # Owner(s): ["oncall: fx"]
import builtins
import contextlib
import copy
import functools
import inspect
import math
import numbers
import operator
import os
import pickle
import sys
import torch
import traceback
import typing
import types
import warnings
import unittest
from math import sqrt
from torch.multiprocessing import Process
from torch.testing import FileCheck
from torch.testing._internal.common_methods_invocations import op_db
from torch.testing._internal.common_device_type import ops, onlyCPU, instantiate_device_type_tests
import torch.utils._pytree as pytree
import torch.fx._pytree as fx_pytree
from torch.fx import symbolic_trace, Proxy, Node, GraphModule, Interpreter, Tracer, Transformer, Graph, wrap, PH
from torch.fx.node import Target, Argument
from torch.fx.passes import shape_prop
from torch.fx.immutable_collections import immutable_dict, immutable_list
from torch.fx.experimental.rewriter import RewritingTracer
from torch.fx.operator_schemas import get_signature_for_torch_op
from copy import deepcopy
from collections import namedtuple
from torch.fx.proxy import TraceError
from torch.fx._compatibility import _BACK_COMPAT_OBJECTS, _MARKED_WITH_COMATIBLITY
from fx.test_subgraph_rewriter import TestSubgraphRewriter # noqa: F401
from fx.test_dce_pass import TestDCE # noqa: F401
from fx.test_fx_const_fold import TestConstFold # noqa: F401
from fx.test_fx_param_shape_control_flow import TestConstParamShapeInControlFlow # noqa: F401
if sys.version_info >= (3, 7):
from fx.test_gradual_type import AnnotationsTest # noqa: F401
if sys.version_info >= (3, 7):
from fx.test_gradual_type import TypeCheckerTest # noqa: F401
from typing import Any, Callable, Dict, NamedTuple, List, Optional, Tuple, Union
from torch.testing._internal.common_utils import (
IS_FBCODE,
IS_MACOS,
IS_WINDOWS,
TEST_WITH_ROCM,
find_library_location,
run_tests,
)
from torch.testing._internal.jit_utils import JitTestCase
from fx.named_tup import MyNamedTup
try:
from torchvision import models as torchvision_models
HAS_TORCHVISION = True
except ImportError:
HAS_TORCHVISION = False
skipIfNoTorchVision = unittest.skipIf(not HAS_TORCHVISION, "no torchvision")
class SimpleTest(torch.nn.Module):
def forward(self, x):
return torch.relu(x + 3.0)
def a_non_torch_leaf(a, b):
return a + b
# Used for test_autowrap_function. Autowrapped functions need to be global
def fx_int(x: float) -> int:
return int(x)
def fx_int_x2(x: float) -> int:
return int(x) * 2
# used in test_pytree. It's all the way out here because pickling a GraphModule
# that uses Point errors out if Point is local to the function
Point = namedtuple('Point', ['x', 'y'])
# Test wrap() passing both a function name as well as a function
# directly
def a_lifted_leaf(a, b):
return a[0] + a[1] + b
wrap('a_lifted_leaf')
# Test wrapping twice doesn't break anything
wrap('a_lifted_leaf')
def a_lifted_leaf2(a, b):
return a[0] + a[1] + b
wrap(a_lifted_leaf2)
wrap('len')
wrap('getattr')
@wrap
def wrapped_via_decorator(a):
return a + 1
wrap('wrapped_with_submodule')
def wrapped_with_submodule(x: torch.Tensor, batchnorm1d: torch.nn.BatchNorm1d):
return batchnorm1d(x)
real_wrapped_via_decorator = wrapped_via_decorator
real_a_lifed_leaf = a_lifted_leaf
real_a_lifed_leaf2 = a_lifted_leaf2
_sqrt = sqrt
wrap('wrapper_fn')
def wrapper_fn(x):
return torch.foo(x)
class Pair(NamedTuple):
x : torch.Tensor
y : torch.Tensor
# for testing pytrees
class Foo(object): # noqa: B209
def __init__(self, a, b):
self.a = a
self.b = b
class TestFX(JitTestCase):
def setUp(self):
# Checking for mutable operations whil tracing is feature flagged
# Enable it in testing but not by default
self.orig_tracer_mutable_flag = torch.fx.proxy.TracerBase.check_mutable_operations
torch.fx.proxy.TracerBase.check_mutable_operations = True
if not (TEST_WITH_ROCM or IS_FBCODE or IS_WINDOWS or IS_MACOS):
lib_file_path = find_library_location('libtorchbind_test.so')
torch.ops.load_library(str(lib_file_path))
def tearDown(self):
torch.fx.proxy.TracerBase.check_mutable_operations = self.orig_tracer_mutable_flag
def checkGraphModule(self, m: torch.nn.Module, args, kwargs=None):
"""Check that an nn.Module's results match the GraphModule version
for a given set of args/kwargs.
"""
kwargs = kwargs if kwargs else {}
ref_outs = m(*args, **kwargs)
gm = symbolic_trace(m)
gm.graph.lint()
test_outs = gm(*args, **kwargs)
self.assertEqual(ref_outs, test_outs)
def test_graph_module(self):
class MySub(torch.nn.Module):
def __init__(self):
super().__init__()
self.w = torch.nn.Parameter(torch.rand(4, 3))
def forward(self, x):
return self.w + x
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.lin = torch.nn.Linear(4, 3)
self.sub_mod = MySub()
self.w = torch.nn.Parameter(torch.rand(3))
def forward(self, A, B, c):
t = torch.sigmoid(A) + self.lin(c)
return self.sub_mod(t.data + self.w + t + 1 - A + B // A + -A + A.add(B, alpha=3))
m = MyModule()
gm = symbolic_trace(m)
ms = torch.jit.script(gm)
class M2(torch.nn.Module):
def forward(self, A):
m, idx = torch.max(A, 0)
return m + 1, idx + 1
m2 = M2()
gm2 = symbolic_trace(m2)
class T(torch.nn.Module):
def forward(self, A, b=4, *args, c=5, **kwargs):
x = A + 1 + args[0] + kwargs['3']
return x
t = T()
symbolic_trace(t)
# test for issue described at https://github.com/pytorch/pytorch/issues/63883
class M3(torch.nn.Module):
def forward(self, x):
return torch.relu(x)
m3 = M3()
gm3 = symbolic_trace(m3)
new_instance = gm3.__new__(type(gm3))
new_instance.__init__(gm3, gm3.graph)
x = torch.randn(5, 3)
torch.testing.assert_allclose(new_instance(x), torch.relu(x))
def test_custom_import(self):
graph = torch.fx.Graph()
a = graph.placeholder('x')
b = graph.placeholder('y')
c = graph.call_function(a_non_torch_leaf, (a, b))
d = graph.call_function(torch.sin, (c,))
graph.output(d)
gm = GraphModule(torch.nn.Module(), graph)
x, y = torch.rand(1), torch.rand(1)
self.assertEqual(torch.sin(x + y), gm(x, y))
def test_args_kwargs(self):
class T(torch.nn.Module):
def forward(self, *args, **kwargs):
x = args[0] + kwargs['foo']
return x
t = T()
self.checkGraphModule(t, (torch.rand(1), torch.rand(1)), {'foo': torch.rand(1)})
def test_args_kwargs_no_self(self):
class T(torch.nn.Module):
def forward(*args, **kwargs): # noqa: B902
self = args[0]
return torch.relu(args[1])
t = T()
with self.assertRaisesRegex(RuntimeError, r'cannot be part of \*args expansion'):
self.checkGraphModule(t, (torch.rand(1), torch.rand(1)), {'foo': torch.rand(1)})
def test_fx_shifts(self):
class MyModule(torch.nn.Module):
def forward(self, x):
return x << 3, x >> 3
input = torch.LongTensor(10).random_(0, 1024)
m = MyModule()
self.checkGraphModule(m, (input,))
def test_fx_and_or(self):
class MyModule(torch.nn.Module):
def forward(self, x):
return x & x, x | x
input = torch.LongTensor(10).random_(0, 1024)
m = MyModule()
self.checkGraphModule(m, (input,))
def test_dict(self):
class MyDictMod(torch.nn.Module):
def forward(self, d):
return d['3'].relu(), {'4' : d['3'].neg()}
input_dict = {'3': torch.rand(3, 4)}
m = MyDictMod()
self.checkGraphModule(m, (input_dict,))
def test_matmul_tracing(self):
const = torch.randn(3)
def matmul_f(x):
return x @ const
mod = symbolic_trace(matmul_f)
inp = torch.randn(3)
self.assertEqual(mod(inp), matmul_f(inp))
def rmatmul_f(x):
return const @ x
mod = symbolic_trace(rmatmul_f)
inp = torch.randn(3)
self.assertEqual(mod(inp), rmatmul_f(inp))
def test_disallow_override(self):
# Custom delegate to disallow in-place tensor operations
class NoMutableCallTracer(Tracer):
def create_node(self, kind : str, target : Union[str, Callable],
args : Tuple[Argument, ...], kwargs : Dict[str, Any], name : Optional[str] = None,
type_expr : Optional[Any] = None) -> Node:
name = target if isinstance(target, str) else torch.typename(target)
if name[-1] == '_':
raise RuntimeError('In-place operations are not supported')
return super().create_node(kind, target, args, kwargs, name)
# Test method
class MyInplaceMod(torch.nn.Module):
def forward(self, x):
x.add_(3.0)
return x
m = MyInplaceMod()
with self.assertRaisesRegex(RuntimeError, 'In-place operations'):
NoMutableCallTracer().trace(m)
# Test free function
class MyInplaceMod2(torch.nn.Module):
def forward(self, x):
torch.log_(x)
return x
m2 = MyInplaceMod2()
with self.assertRaisesRegex(RuntimeError, 'In-place operations'):
NoMutableCallTracer().trace(m2)
# Test symbolic node as an arg
class MyInplaceMod3(torch.nn.Module):
def forward(self, x):
y = torch.ones(3, 4)
y.add_(x)
return x
m3 = MyInplaceMod3()
with self.assertRaisesRegex(RuntimeError, 'In-place operations'):
NoMutableCallTracer().trace(m3)
def test_leaf_module(self):
# Custom delegate to make it so that there are no leaf modules, everything
# should get traced through
class NoLeafModulesTracer(Tracer):
def is_leaf_module(self, m, qualname):
return False
class MyReluMod(torch.nn.Module):
def __init__(self):
super().__init__()
self.relu = torch.nn.ReLU()
def forward(self, x):
return self.relu(x)
mrm = MyReluMod()
sym = NoLeafModulesTracer().trace(mrm)
for node in sym.nodes:
self.assertNotEqual(node.op, 'call_module')
sym.lint()
def test_wrap(self):
self.assertEqual(3 + 4 + 5, a_lifted_leaf((3, 4), 5))
def to_trace(y):
return a_lifted_leaf((4, y), 3) + a_lifted_leaf((3, 4), 5) + a_lifted_leaf((y, y), y)
m = symbolic_trace(to_trace)
self.assertIn('a_lifted_leaf', m.code)
self.assertEqual(27, m(2))
self.assertIs(a_lifted_leaf, real_a_lifed_leaf)
def test_wrap_fn_directly(self):
self.assertEqual(3 + 4 + 5, a_lifted_leaf2((3, 4), 5))
def to_trace(y):
return a_lifted_leaf2((4, y), 3) + a_lifted_leaf2((3, 4), 5) + a_lifted_leaf2((y, y), y)
m = symbolic_trace(to_trace)
self.assertIn('a_lifted_leaf2', m.code)
self.assertEqual(27, m(2))
self.assertIs(a_lifted_leaf2, real_a_lifed_leaf2)
def test_wrapped_via_decorator(self):
self.assertEqual(wrapped_via_decorator(0), 1)
def to_trace(y):
return wrapped_via_decorator(y)
m = symbolic_trace(to_trace)
self.assertIn('wrapped_via_decorator', m.code)
self.assertEqual(m(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
def test_wrapped_via_decorator_and_transformed(self):
self.assertEqual(wrapped_via_decorator(0), 1)
def to_trace(y):
return wrapped_via_decorator(y)
m = symbolic_trace(to_trace)
self.assertIn('wrapped_via_decorator', m.code)
self.assertEqual(m(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
transformed = torch.fx.Transformer(m).transform()
self.assertIn('wrapped_via_decorator', transformed.code)
self.assertEqual(transformed(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
def test_wrap_with_submodule(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.batchnorm1d = torch.nn.BatchNorm1d(2, affine=False)
def forward(self, x: torch.Tensor):
return wrapped_with_submodule(x, self.batchnorm1d)
m = symbolic_trace(M())
self.assertIn("wrapped_with_submodule", m.code)
input = torch.rand(3, 2)
ref_batchnorm1d = torch.nn.BatchNorm1d(2, affine=False)
self.assertEqual(ref_batchnorm1d(input), m(input))
def test_wrapped_retrace(self):
def to_trace(y):
return wrapped_via_decorator(y)
m = symbolic_trace(to_trace)
self.assertIn('wrapped_via_decorator', m.code)
self.assertEqual(m(0), 1)
retraced = symbolic_trace(m)
self.assertIn('wrapped_via_decorator', retraced.code)
self.assertEqual(retraced(0), 1)
def test_graph_edit_with_proxy(self):
class M(torch.nn.Module):
def forward(self, a, b):
return a + b
m = M()
g = symbolic_trace(m).graph
new_g = torch.fx.Graph()
val_map : Dict[Node, Node] = {}
output_val = new_g.graph_copy(g, val_map)
t = Proxy(output_val)
# test that we can use proxy objects to generate more graph code later for things that do not need to work with modules.
new_g.output((t + t).node)
gm = GraphModule(m, new_g)
gm.graph.lint()
self.assertEqual(gm(3, 4), 14)
def test_graph_unique_names(self):
class M(torch.nn.Module):
def forward(self, a, b):
return a + b
m = M()
g = symbolic_trace(m).graph
new_g = torch.fx.Graph()
val_map : Dict[Node, Node] = {}
output_val = new_g.graph_copy(g, val_map)
t = Proxy(output_val)
# test that we can use proxy objects to generate more graph code later for things that do not need to work with modules.
new_g.output((t + t).node)
gm = GraphModule(m, new_g)
seen_names : Set[str] = set()
for node in gm.graph.nodes:
assert node.name not in seen_names
seen_names.add(node.name)
def test_stack_traces(self):
class M(torch.nn.Module):
def forward(self, a, b):
return a + b
tracer = torch.fx.Tracer()
tracer.record_stack_traces = True
graph = tracer.trace(M())
for node in graph.nodes:
if node.op == 'output':
continue
self.assertTrue(node.stack_trace is not None)
assert 'test_fx.py' in node.stack_trace
def test_graph_unique_names_manual(self):
graph : torch.fx.Graph = torch.fx.Graph()
a : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_module', 'linear_mod', args=(a,), name='foo_1_1')
c : torch.fx.Node = graph.create_node('get_attr', 'y_attr', name='foo_1')
d : torch.fx.Node = graph.create_node('call_function', operator.add, args=(b, c))
graph.output(d)
graph2 = torch.fx.Graph()
val_map : Dict[Node, Node] = {}
graph2.graph_copy(graph, val_map)
seen_names : Set[str] = set()
for node in graph2.nodes:
assert node.name not in seen_names
seen_names.add(node.name)
def test_unpack(self):
class M(torch.nn.Module):
def forward(self, a, b):
c, d = a
return c + d + b
a = (torch.rand(1), torch.rand(1))
b = torch.rand(1)
m = M()
self.checkGraphModule(m, (a, b))
def test_native_callable(self):
if TEST_WITH_ROCM or IS_FBCODE or IS_WINDOWS or IS_MACOS:
raise unittest.SkipTest("non-portable load_library call used in test")
# This test exercises the case where we use FX to translate from Python
# code to some native callable object
#
# For the purposes of testing, we use ElementwiseInterpreter defined
# in test_custom_class.cpp.
#
# We test that we can
# 1) Construct a native callable from FX IR
# 2) Construct a drop-in replacement module that delegates to the
# native callable rather than the original code
# 3) Run both the original code and native callable wrapper with
# equivalent results
# 4) TorchScript compile the native callable wrapper and confirm
# equivalent results with the reference
# 5) TorchScript serialize and deserialize the native callable
# and confirm equivalent results with the reference
# We use this simple Module as a reference computation
class MySimpleMod(torch.nn.Module):
def forward(self, x):
return 3.0 * x + x
msm = MySimpleMod()
# This is what a lowering pass might look like: a function that takes
# a valid nn.Module, symbolically traces it, lowers the Module to some
# representation, and wraps that representation up into another
# nn.Module instance that handles dispatch to the compiled/lowered code.
def lower_to_elementwise_interpreter(orig_mod : torch.nn.Module) -> torch.nn.Module:
# ===== Stage 1: Symbolic trace the module =====
mod = symbolic_trace(orig_mod)
# ===== Stage 2: Lower GraphModule representation to the C++
# interpreter's instruction format ======
instructions = []
constant_idx = 0
constants = {}
fn_input_names = []
target_to_name = {
operator.add : "add",
operator.mul : "mul"
}
output_node : Optional[Node] = None
# For each instruction, create a triple
# (instruction_name : str, inputs : List[str], output : str)
# to feed into the C++ interpreter
for n in mod.graph.nodes:
target, args, out_name = n.target, n.args, n.name
assert len(n.kwargs) == 0, "kwargs currently not supported"
if n.op == 'placeholder':
# Placeholders specify function argument names. Save these
# for later when we generate the wrapper GraphModule
fn_input_names.append(target)
elif n.op == 'call_function':
assert target in target_to_name, "Unsupported call target " + target
arg_names = []
for arg in args:
if not isinstance(arg, Node):
# Pull out constants. These constants will later be
# fed to the interpreter C++ object via add_constant()
arg_name = f'constant_{constant_idx}'
constants[arg_name] = torch.tensor(
[arg] if isinstance(arg, numbers.Number) else arg)
arg_names.append(arg_name)
constant_idx += 1
else:
arg_names.append(arg.name)
instructions.append((target_to_name[target], arg_names, out_name))
elif n.op == 'output':
if output_node is not None:
raise RuntimeError('Multiple output nodes!')
output_node = n
else:
raise RuntimeError('Unsupported opcode ' + n.op)
interpreter = torch.classes._TorchScriptTesting._ElementwiseInterpreter()
# Load constants
for k, v in constants.items():
interpreter.add_constant(k, v)
# Specify names for positional input arguments
interpreter.set_input_names(fn_input_names)
# Load instructions
interpreter.set_instructions(instructions)
# Specify name for single output
assert isinstance(output_node.args[0], torch.fx.Node)
interpreter.set_output_name(output_node.args[0].name)
# ===== Stage 3: Create a wrapper GraphModule around the interpreter =====
class WrapperModule(torch.nn.Module):
def __init__(self, interpreter):
super().__init__()
self.interpreter = interpreter
wrapper = WrapperModule(interpreter)
# Create a graph that: 1) Takes function arguments 2) Invokes the interpreter
# 3) Returns the speficied return value
# FIXME: The following code could be greatly simplified by symbolic_trace'ing
# the wrapper with a Tracer that considers the Wrapper instance a root
# module, however, I can't get `__call__` exposed on TorchBind classes
# without it messing up Python `hasattr` for some reason. More digging
# into CPython's implementation of hasattr is probably in order...
graph = torch.fx.Graph()
# Add placeholders for fn inputs
placeholder_nodes = []
for name in fn_input_names:
placeholder_nodes.append(graph.create_node('placeholder', name))
# Get the interpreter object
interpreter_node = graph.create_node('get_attr', 'interpreter')
# Add a node to call the interpreter instance
output_node = graph.create_node(
op='call_method', target='__call__', args=(interpreter_node, placeholder_nodes))
# Register output
graph.output(output_node)
graph.lint()
# Return final GraphModule!!!
return GraphModule(wrapper, graph)
# Lower GraphModule to C++ interpreter
lowered = lower_to_elementwise_interpreter(msm)
# Compare correctness with original module
x = torch.rand(3, 4)
ref_out = msm(x)
test_out = lowered(x)
torch.testing.assert_close(test_out, ref_out)
# Test TorchScript compilation
scripted_lowered = torch.jit.script(lowered)
script_out = scripted_lowered(x)
torch.testing.assert_close(script_out, ref_out)
# Test TorchScript ser/de
import_copy = self.getExportImportCopy(scripted_lowered)
imported_out = import_copy(x)
torch.testing.assert_close(imported_out, ref_out)
def test_reserved_getattr(self):
"""Ensure that we do not name any nodes with a reserved builtin like `getattr`"""
class M(torch.nn.Module):
def forward(self, a):
return a.foo.bar.baz
m = M()
m_g = symbolic_trace(m)
m_g.graph.lint()
for node in m_g.graph.nodes:
self.assertTrue(node.name != "getattr")
def test_node_tagging(self):
class TaggingTracer(Tracer):
def create_node(self, kind : str, target : Union[str, Callable],
args : Tuple[Argument, ...], kwargs : Dict[str, Any], name : Optional[str] = None,
type_expr : Optional[Any] = None) -> Node:
n = super().create_node(kind, target, args, kwargs, name)
n.tag = 'foo'
return n
class M(torch.nn.Module):
def forward(self, a, b):
return a + b
m = M()
g = TaggingTracer().trace(m)
g.lint()
for n in g.nodes:
self.assertTrue(hasattr(n, 'tag'))
self.assertEqual(n.tag, 'foo')
def test_tensor_attribute(self):
class TensorAttribute(torch.nn.Module):
def __init__(self):
super().__init__()
self.tensor = torch.rand(3, 4)
def forward(self, x):
return torch.nn.functional.linear(x, self.tensor)
ta = TensorAttribute()
traced = symbolic_trace(ta)
traced(torch.rand(4, 4))
class WrapperForQualname(torch.nn.Module):
def __init__(self):
super().__init__()
self.ta = TensorAttribute()
def forward(self, x):
return torch.nn.functional.linear(x, self.ta.tensor)
wfq = WrapperForQualname()
traced2 = symbolic_trace(wfq)
traced2.graph.lint()
traced2(torch.rand(4, 4))
def test_tensor_attribute_coalseced(self):
def count_attrs(fx_module):
targets = set()
for node in traced.graph.nodes:
if node.op == 'get_attr':
targets.add(node.target)
return len(targets)
val = torch.tensor(5)
def f(x):
return x + val + val
traced = symbolic_trace(f)
traced.graph.lint()
self.assertEqual(count_attrs(traced), 1)
val2 = torch.tensor(5)
def f(x):
val = torch.tensor(5)
return x + val + val2
traced = symbolic_trace(f)
traced.graph.lint()
self.assertEqual(count_attrs(traced), 2)
def test_symbolic_trace_sequential(self):
class Simple(torch.nn.Module):
def forward(self, x):
return torch.neg(x)
seq = torch.nn.Sequential(
Simple(),
Simple(),
Simple()
)
traced = symbolic_trace(seq)
traced.graph.lint()
x = torch.rand(3, 4)
self.assertEqual(traced(x), seq(x))
def test_tensor_constant(self):
class ConstTensor(torch.nn.Module):
def forward(self, x):
return torch.nn.functional.linear(x, torch.zeros(3, 4))
ct = ConstTensor()
traced = symbolic_trace(ct)
traced.graph.lint()
traced(torch.rand(4, 4))
def test_pickle_graphmodule(self):
class Nested(torch.nn.Module):
def __init__(self):
super().__init__()
self.st = torch.nn.Linear(4, 4)
def forward(self, x):
return self.st(x)
n = Nested()
traced = symbolic_trace(n)
traced.graph.lint()
pickled = pickle.dumps(traced)
loaded = pickle.loads(pickled)
loaded.graph.lint()
x = torch.rand(3, 4)
self.assertEqual(loaded(x), traced(x))
def test_pickle_custom_import(self):
graph = torch.fx.Graph()
a = graph.placeholder('x')
b = graph.placeholder('y')
c = graph.call_function(a_non_torch_leaf, (a, b))
d = graph.call_function(torch.sin, (c,))
graph.output(d)
gm = GraphModule(torch.nn.Module(), graph)
pickled = pickle.dumps(gm)
loaded = pickle.loads(pickled)
loaded.graph.lint()
x, y = torch.rand(1), torch.rand(1)
self.assertEqual(loaded(x, y), gm(x, y))
def test_all_input_nodes(self):
graph : torch.fx.Graph = torch.fx.Graph()
a : torch.fx.Node = graph.placeholder('x')
b : torch.fx.Node = graph.call_module('linear_mod', args=(a,))
c : torch.fx.Node = graph.get_attr('y_attr')
d : torch.fx.Node = graph.call_function(operator.add, args=(b, c))
e : torch.fx.Node = graph.call_function(torch.unsqueeze, args=(d, 0))
graph.output(e)
graph.lint()
self.assertEqual(b.all_input_nodes, [a])
self.assertEqual(c.all_input_nodes, [])
self.assertEqual(d.all_input_nodes, [b, c])
self.assertEqual(e.all_input_nodes, [d])
def test_deepcopy_graphmodule_with_transform(self):
st = SimpleTest()
traced = symbolic_trace(st)
traced.graph.lint()
def transform(traced):
new_graph = torch.fx.Graph()
val_map : Dict[Node, Node] = {}
output_value = new_graph.graph_copy(traced.graph, val_map)
relu_out = new_graph.create_node(
op='call_method', target='neg', args=(output_value,), kwargs={})
new_graph.output(relu_out)
return GraphModule(traced, new_graph)
transformed = transform(traced)
transformed.graph.lint()
copied = copy.deepcopy(transformed)
self.assertNotEqual(id(type(transformed)), id(type(copied)))
x = torch.randn(3, 4)
self.assertEqual(copied(x), transformed(x))
def test_deepcopy_with_submods_params(self):
class Bar(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
def forward(self, x):
return torch.relu(x) + self.param
class Baz(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.bar = Bar()
def forward(self, x):
return self.bar(x) - self.param
baz = Baz()
traced = symbolic_trace(baz)
traced.graph.lint()
copied = copy.deepcopy(traced)
copied.graph.lint()
def test_deepcopy_graph_with_tracer_cls(self):
class TestTracer(Tracer):
def is_leaf_module(self, module, name):
return True
g = Graph(tracer_cls=TestTracer)
x = g.placeholder("x")
g.output(x)
h = copy.deepcopy(g)
self.assertIsNotNone(h._tracer_cls)
self.assertTrue(g._tracer_cls == h._tracer_cls)
def test_unpack_list_better_error(self):
class SomeArgs(torch.nn.Module):
def forward(self, a, b):
return torch.rand(3, 4)
class UnpacksList(torch.nn.Module):
def __init__(self):
super().__init__()
self.sa = SomeArgs()
def forward(self, x : list):
return self.sa(*x)
ul = UnpacksList()
with self.assertRaisesRegex(TraceError, 'Proxy object cannot be iterated.'):
symbolic_trace(ul)
def test_unpack_dict_better_error(self):
class SomeKwargs(torch.nn.Module):
def forward(self, x=3, y=4):
return torch.rand(3, 4)
class UnpacksDict(torch.nn.Module):
def __init__(self):
super().__init__()
self.sk = SomeKwargs()
def forward(self, x : dict):
return self.sk(**x)
ud = UnpacksDict()
with self.assertRaisesRegex(TraceError, 'Proxy object cannot be iterated.'):
symbolic_trace(ud)
def test_pretty_print_targets(self):
# Test that Graph pretty-print prints friendly name for targets
# in `operator` and `builtins`
class SomeMod(torch.nn.Module):
def forward(self, x):
return torch.add(x.foo + x.bar, 3.0)
traced = symbolic_trace(SomeMod())
graph_str = str(traced.graph)
self.assertIn('builtins.getattr', graph_str)
self.assertIn('operator.add', graph_str)
self.assertIn('torch.add', graph_str)
def test_pretty_print_node(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.param: torch.nn.Parameter = torch.nn.Parameter(
torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x: torch.Tensor, y: int = 2):
return self.linear(x[y] + self.param).clamp(min=0.0, max=1.0)
traced = symbolic_trace(M())
all_formatted = "\n".join([n.format_node() for n in traced.graph.nodes])
FileCheck().check("x").check("placeholder") \
.check("y").check("placeholder") \
.check("getitem").check("call_function") \
.check("param").check("get_attr") \
.check("add").check("call_function") \
.check("linear").check("call_module") \
.check("clamp").check("call_method") \
.run(all_formatted)
def test_script_tensor_constant(self):
# TorchScript seems to ignore attributes that start with `__`.
# We used to call anonymous Tensor values `__tensor_constant*`, but
# they were getting ignored by script. Now they're called
# `_tensor_constant*`
class IHaveATensorConstant(torch.nn.Module):
def forward(self, x):
return x + torch.rand(3, 4)
traced = torch.fx.symbolic_trace(IHaveATensorConstant())
torch.jit.script(traced)
def test_autowrap_functions(self):
class AutowrapFnTest(torch.nn.Module):
def forward(self, x):
return fx_int(x.shape[0] / 2)
class AutowrapFnTest2(torch.nn.Module):
def forward(self, x):
return fx_int(x.shape[0] / 2) + fx_int_x2(x.shape[0] / 2)
# Check function(s) are wrapped
# `int` would normally throw a TypeError as argument can't be `Proxy`
tracer = Tracer(autowrap_functions=(fx_int,))
graph = tracer.trace(AutowrapFnTest())
traced = GraphModule(tracer.root, graph, 'test')
tracer_2 = Tracer(autowrap_functions=(fx_int, fx_int_x2))
tracer_2.trace(AutowrapFnTest2())
# Test scriptability
traced_scripted = torch.jit.script(traced)
self.assertEqual(traced_scripted(torch.rand(4)), 2)
def test_torch_fx_len(self):
class FXLenTest(torch.nn.Module):
def forward(self, x):
return len(x)
traced = symbolic_trace(FXLenTest())
self.assertEqual(traced(torch.rand(3, 4)), 3)
# Test scriptability
scripted = torch.jit.script(FXLenTest())
self.assertEqual(scripted(torch.rand(3)), 3)
traced_scripted = torch.jit.script(traced)
self.assertEqual(traced_scripted(torch.rand(3)), 3)
# Test non-proxy len
class FXLenTest2(torch.nn.Module):
def __init__(self):
super().__init__()
self.l = [3, 4, 5]
def forward(self, x):
return x + len(self.l)
traced2 = symbolic_trace(FXLenTest2())
inp = torch.rand(3, 4)
self.assertEqual(traced2(inp), inp + 3.0)
self.assertIs(len, builtins.len)
def test_torch_fx_getattr(self):
class FXGetattrTest(torch.nn.Module):
def forward(self, x):
return getattr(x, 'nonexistent_attr', torch.Tensor([2, 3]))
traced = symbolic_trace(FXGetattrTest())
self.assertEqual(traced(torch.rand(3, 4)), torch.Tensor([2, 3]))
def test_sqrt(self):
class Sqrt1(torch.nn.Module):
def forward(self, x):
return sqrt(x.size(0))
class Sqrt2(torch.nn.Module):
def forward(self, x):
return math.sqrt(x.size(0))
class Sqrt3(torch.nn.Module):
def forward(self, x):
return x + math.sqrt(2) + sqrt(2)
self.checkGraphModule(Sqrt1(), [torch.zeros(8)])
self.checkGraphModule(Sqrt2(), [torch.zeros(8)])
self.checkGraphModule(Sqrt3(), [torch.zeros(8)])
self.assertIs(sqrt, _sqrt)
self.assertIs(math.sqrt, _sqrt)
def test_torch_custom_ops(self):
class M(torch.nn.Module):
def forward(self, a):
b = torch.ops.aten.sigmoid(a)
c = torch.ops.aten.cat([a, b])
return torch.ops.aten.cat((c, c))
m = M()
input = torch.randn(3)
ref_out = m(input)
gm = symbolic_trace(m)
gm.graph.lint()
out = gm(input)
self.assertEqual(out, ref_out)
def test_pickle_torch_custom_ops(self):
class M(torch.nn.Module):
def forward(self, a):
b = torch.ops.aten.sigmoid(a)
c = torch.ops.aten.cat([a, b])
return torch.ops.aten.cat((c, c))
m = M()
input = torch.randn(3)
ref_out = m(input)
gm = symbolic_trace(m)
gm.graph.lint()
pickled = pickle.dumps(gm)
loaded = pickle.loads(pickled)
self.assertEqual(loaded(input), gm(input))
def test_pretty_print(self):
st = SimpleTest()
traced = symbolic_trace(st)
traced.graph.lint()
printed = str(traced)
assert 'SimpleTest()' in printed
assert 'torch.relu' in printed
def test_pretty_print_graph(self):
class KwargPrintTest(torch.nn.Module):
def forward(self, x):
return torch.squeeze(x + 3.0, dim=2)
st = KwargPrintTest()
traced = symbolic_trace(st)
traced.graph.lint()
stringed = str(traced.graph)
for s in ['args', 'kwargs', '#users']:
assert s in stringed
def test_custom_proxy_type(self):
class TensorPair:
def __init__(self, left, right):
self.left, self.right = left, right
def add(self, other):
l = self.left + other.left
r = self.right + other.right
return TensorPair(l, r)
def mul(self, other):
l = self.left * other.left
r = self.right * other.right
return TensorPair(l, r)
def use_tensor_pair(x : TensorPair, y : TensorPair):
s = x.add(y)
return s.mul(x)
x = TensorPair(torch.randn(5, 3), torch.randn(5, 3))
y = TensorPair(torch.randn(5, 3), torch.randn(5, 3))
ref_out = use_tensor_pair(x, y)
traced = symbolic_trace(use_tensor_pair)
traced_out = traced(x, y)
self.assertEqual(traced_out.left, ref_out.left)
self.assertEqual(traced_out.right, ref_out.right)
def test_custom_proxy_type_literal(self):
class TensorPair(metaclass=torch.fx.ProxyableClassMeta):
def __init__(self, left, right):
self.left, self.right = left, right
def add(self, other):
l = self.left + other.left
r = self.right + other.right
return TensorPair(l, r)
def mul(self, other):
l = self.left * other.left
r = self.right * other.right
return TensorPair(l, r)
def use_tensor_pair_literal(x : TensorPair):
s = x.add(TensorPair(torch.zeros(5, 3), torch.zeros(5, 3)))
return s.mul(x)
x = TensorPair(torch.randn(5, 3), torch.randn(5, 3))
ref_out = use_tensor_pair_literal(x)
traced = symbolic_trace(use_tensor_pair_literal)
traced_out = traced(x)
self.assertEqual(traced_out.left, ref_out.left)
self.assertEqual(traced_out.right, ref_out.right)
def test_custom_proxy_dynamic_value(self):
class TensorPair(metaclass=torch.fx.ProxyableClassMeta):
def __init__(self, left, right):
self.left, self.right = left, right
def add(self, other):
l = self.left + other.left
r = self.right + other.right
return TensorPair(l, r)
def mul(self, other):
l = self.left * other.left
r = self.right * other.right
return TensorPair(l, r)
def use_tensor_pair_ctor(x : TensorPair, y : torch.Tensor):
s = x.add(TensorPair(y, y))
return s.mul(x)
x = TensorPair(torch.randn(5, 3), torch.randn(5, 3))
y = torch.randn(5, 3)
ref_out = use_tensor_pair_ctor(x, y)
traced = symbolic_trace(use_tensor_pair_ctor)
traced_out = traced(x, y)
self.assertEqual(traced_out.left, ref_out.left)
self.assertEqual(traced_out.right, ref_out.right)
def test_custom_proxy_input_dependent_control_flow(self):
class ZeroTensor(metaclass=torch.fx.ProxyableClassMeta):
def __init__(self, inp):
if inp.sum() == 0:
self.is_zero = True
self.tensor = torch.tensor([])
else:
self.is_zero = False
self.tensor = inp
def add(self, other):
if self.is_zero:
return ZeroTensor(other.tensor)
elif other.is_zero:
return self
def use_zero_tensor(x : torch.Tensor, y : torch.Tensor):
return ZeroTensor(x + y)
x, y = torch.randn(5, 3), torch.randn(5, 3)
ref_out = use_zero_tensor(x, y)
traced = symbolic_trace(use_zero_tensor)
traced_out = traced(x, y)
self.assertEqual(traced_out.is_zero, ref_out.is_zero)
self.assertEqual(traced_out.tensor, ref_out.tensor)
def test_graph_fns(self):
g = Graph()
a = g.placeholder('a')
b = g.call_module('linear', (a,))
c = g.get_attr('bias')
d = g.call_method('add', (b, c))
e = g.call_function(torch.sin, (d,))
g.output(e)
mod = torch.nn.Module()
mod.linear = torch.nn.Linear(3, 4)
mod.bias = torch.rand(4)
gm = GraphModule(mod, g)
gm.graph.lint()
input = torch.rand(3)
r = gm(input)
ref = torch.sin(mod.linear(input) + mod.bias)
self.assertEqual(r, ref)
def test_remove_uses(self):
g : torch.fx.Graph = Graph()
x : torch.fx.Node = g.placeholder('x')
relu : torch.fx.Node = g.call_function(torch.relu, (x,))
neg : torch.fx.Node = g.call_function(torch.neg, (relu,))
g.output(neg)
neg.replace_all_uses_with(relu)
g.erase_node(neg)
self.assertTrue(neg not in relu.users)
def test_nonetype_annotation(self):
eb = torch.nn.EmbeddingBag(3, 4)
symbolic_trace(eb)
def test_pickle_nonetype_annotation(self):
eb = torch.nn.EmbeddingBag(10, 3, mode='sum')
traced = symbolic_trace(eb)
pickled = pickle.dumps(traced)
loaded = pickle.loads(pickled)
loaded.graph.lint()
input = torch.LongTensor([1, 2, 4, 5, 4, 3, 2, 9])
offsets = torch.LongTensor([0, 4])
self.assertEqual(loaded(input, offsets), traced(input, offsets))
def test_return_tuple(self):
class M(torch.nn.Module):
def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
return (x, x + x)
original = M()
traced = symbolic_trace(original)
self.assertEqual(traced(torch.ones(1)), original.forward(torch.ones(1)))
def test_construct_root_dict(self):
graph : torch.fx.Graph = torch.fx.Graph()
a : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_module', 'foo.bar.baz', args=(a,))
c : torch.fx.Node = graph.create_node('get_attr', 'zip.zap.zam')
d : torch.fx.Node = graph.create_node('call_function', operator.add, args=(b, c))
graph.output(d)
linear_mod : torch.nn.Module = torch.nn.Linear(3, 4)
add_param : torch.Tensor = torch.rand(3, 4)
gm : torch.fx.GraphModule = torch.fx.GraphModule(
{'foo.bar.baz': linear_mod, 'zip.zap.zam' : add_param}, graph)
gm.graph.lint()
assert 'self.foo.bar.baz' in gm.code
x : torch.Tensor = torch.rand(3, 3)
out : torch.Tensor = gm(x)
ref_out : torch.Tensor = linear_mod(x) + add_param
self.assertEqual(out, ref_out)
def test_symbolic_trace_assert(self):
class AssertsTensorShape(torch.nn.Module):
def forward(self, x):
torch._assert(x.shape[1] > 4, "assert_foobar")
return x
m = AssertsTensorShape()
# verify traceability
traced = symbolic_trace(m)
# verify assertion on traced model works correctly at runtime
traced(torch.rand(4, 5))
with self.assertRaisesRegex(AssertionError, "assert_foobar"):
traced(torch.rand(4, 3))
# verify the symbolically traced module is scriptable
ms = torch.jit.script(m)
with self.assertRaisesRegex(torch.jit.Error, "assert_foobar"):
ms(torch.rand(4, 3))
def test_fx_create_arg(self):
class CustomArgObject:
def __init__(self, x, y):
self.x = x
self.y = y
def __fx_create_arg__(self, tracer: torch.fx.Tracer):
return tracer.create_node(
"call_function",
CustomArgObject,
args=(
tracer.create_arg(self.x),
tracer.create_arg(self.y),
),
kwargs={},
)
class HasCustomArgObjectWhenLeaf(torch.nn.Module):
def forward(self, o: CustomArgObject):
# Not normally traceable; good reason to make
# this module a leaf.
for x in o.x:
o.y += x
return o.y
class Root(torch.nn.Module):
def __init__(self):
super().__init__()
self.inner = HasCustomArgObjectWhenLeaf()
def forward(self, x, y):
o = CustomArgObject(x, y)
return self.inner(o)
class CreateArgTracer(torch.fx.Tracer):
def is_leaf_module(self, m, module_qualified_name):
return type(m) is HasCustomArgObjectWhenLeaf
m = Root()
graph = CreateArgTracer().trace(m)
gm = torch.fx.GraphModule(m, graph)
assert "CustomArgObject(" in gm.code
def test_trace_fn_constant(self):
some_constant = torch.rand(3, 4)
def add_const(x):
return some_constant + x
traced = symbolic_trace(add_const)
input = torch.rand(3, 4)
self.assertEqual(traced(input), add_const(input))
def test_copy_no_remap(self):
traced = symbolic_trace(SimpleTest())
g = traced.graph
copied = torch.fx.Graph()
for node in g.nodes:
copied.node_copy(node)
with self.assertRaisesRegex(RuntimeError, 'does not belong to this Graph'):
copied.lint()
def test_wrong_topo(self):
graph : torch.fx.Graph = torch.fx.Graph()
a : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_module', 'foo.bar.baz', args=(a,))
c : torch.fx.Node = graph.create_node('get_attr', 'zip.zap.zam')
d : torch.fx.Node = graph.create_node('call_function', operator.add, args=(b, c))
graph.output(d)
nodes = list(graph.nodes)
nodes[3].append(nodes[2])
with self.assertRaisesRegex(RuntimeError, 'was used before it has been defined'):
graph.lint()
def test_wrong_target_type(self):
graph : torch.fx.Graph = torch.fx.Graph()
with self.assertRaises(ValueError):
n = torch.fx.Node(graph=graph, name='foo', op='call_function', target='foo',
args=(), kwargs={})
def test_example_shape_prop(self):
class TestCase(torch.nn.Module):
def __init__(self):
super().__init__()
self.attr = torch.randn(3, 4)
self.submod = torch.nn.Linear(4, 4)
def forward(self, x):
return torch.neg(self.submod(x.relu() + self.attr))
tc = TestCase()
tc_traced = symbolic_trace(tc)
ref_out = tc_traced(torch.rand(3, 4))
shape_prop.ShapeProp(tc_traced).propagate(torch.rand(3, 4))
# Make sure we're testing all opcodes
opcodes = set()
output_shape : Optional[torch.Shape] = None
output_stride : Optional[Tuple[int]] = None
for node in tc_traced.graph.nodes:
opcodes.add(node.op)
if node.op == 'output':
output_shape = node.args[0].meta['tensor_meta'].shape
output_stride = node.args[0].meta['tensor_meta'].stride
self.assertEqual(opcodes, set(['placeholder', 'get_attr', 'call_function', 'call_method',
'call_module', 'output']))
# Test shape propogation and make sure results match actual
self.assertEqual(output_shape, ref_out.shape)
self.assertEqual(output_stride, ref_out.stride())
def test_shape_prop_layout(self):
class ConvTest(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv_mod = torch.nn.Conv2d(5, 5, 3)
def forward(self, x):
return self.conv_mod(x)
# contiguous layout
test_mod = ConvTest()
traced = symbolic_trace(test_mod)
x = torch.randn(5, 5, 224, 224)
shape_prop.ShapeProp(traced).propagate(x)
assert(all(node.meta['tensor_meta'].memory_format is torch.contiguous_format
for node in traced.graph.nodes))
x_channels_last = x.contiguous(memory_format=torch.channels_last)
traced.to(memory_format=torch.channels_last)
shape_prop.ShapeProp(traced).propagate(x_channels_last)
for node in traced.graph.nodes:
# NB: the implementation of conv may not preserve the memory format,
# unfortunately. The best we can do is just check that the placeholder
# node is channels-last
if node.op in {'placeholder'}:
self.assertEqual(node.meta['tensor_meta'].memory_format, torch.channels_last)
def test_shape_prop_aggregate(self):
class ReturnTwo(torch.nn.Module):
def forward(self, x):
return (3, torch.sum(x))
class UnderTest(torch.nn.Module):
def __init__(self):
super().__init__()
self.rt = ReturnTwo()
def forward(self, x):
return self.rt(x)
ut = UnderTest()
class RTTracer(torch.fx.Tracer):
def is_leaf_module(self, m, module_qualified_name):
return type(m) is ReturnTwo
graph = RTTracer().trace(ut)
mod = torch.fx.GraphModule(ut, graph)
shape_prop.ShapeProp(mod).propagate(torch.rand(3, 4))
for node in mod.graph.nodes:
if node.op == 'call_module':
assert 'tensor_meta' in node.meta
tensor_meta = node.meta['tensor_meta']
assert tensor_meta[0] == 3
assert tensor_meta[1].shape == torch.Size([])
def test_shape_prop_layout_3d(self):
class ConvTest3d(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv_mod = torch.nn.Conv3d(5, 5, 3)
def forward(self, x):
return self.conv_mod(x)
test_mod_3d = ConvTest3d()
traced_3d = symbolic_trace(test_mod_3d)
x_3d = torch.randn(5, 5, 224, 224, 15)
shape_prop.ShapeProp(traced_3d).propagate(x_3d)
assert(all(node.meta['tensor_meta'].memory_format is torch.contiguous_format
for node in traced_3d.graph.nodes))
x_channels_last_3d = x_3d.contiguous(memory_format=torch.channels_last_3d)
traced_3d.to(memory_format=torch.channels_last_3d)
shape_prop.ShapeProp(traced_3d).propagate(x_channels_last_3d)
for node in traced_3d.graph.nodes:
# NB: the implementation of conv may not preserve the memory format,
# unfortunately. The best we can do is just check that the placeholder
# node is channels-last
if node.op in {'placeholder'}:
self.assertEqual(node.meta['tensor_meta'].memory_format, torch.channels_last_3d)
def test_interpreter(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
return self.linear(x + self.param).clamp(min=0.0, max=1.0)
m = MyModule()
gm = torch.fx.symbolic_trace(m)
interpreter = Interpreter(gm)
input = torch.randn(3, 4)
self.assertEqual(interpreter.run(input), gm(input))
self.assertEqual(interpreter.run(input), m(input))
def test_interpreter_run_node_override(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
return self.linear(x + self.param).clamp(min=0.0, max=1.0)
m = MyModule()
gm = torch.fx.symbolic_trace(m)
class RunNodeInterpreter(Interpreter):
def __init__(self, module):
super().__init__(module)
def run_node(self, n : Node) -> Any:
result = super().run_node(n)
n.cached_value = result
return result
input = torch.randn(3, 4)
RunNodeInterpreter(gm).run(input)
for node in gm.graph.nodes:
assert hasattr(node, 'cached_value')
def test_interpreter_onthefly_swap(self):
def fn(x):
return torch.sigmoid(x).neg()
gm = torch.fx.symbolic_trace(fn)
class NegSigmSwapInterpreter(Interpreter):
def call_function(self, target : Target, args : Tuple, kwargs : Dict) -> Any:
if target == torch.sigmoid:
return torch.neg(*args, **kwargs)
return super().call_function(n)
def call_method(self, target : Target, args : Tuple, kwargs : Dict) -> Any:
if target == 'neg':
call_self, *args_tail = args
return call_self.sigmoid(*args_tail, **kwargs)
return super().call_method(n)
input = torch.randn(3, 4)
result = NegSigmSwapInterpreter(gm).run(input)
self.assertEqual(result, torch.neg(input).sigmoid())
def test_interpreter_partial_eval(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
return self.linear(x + self.param).clamp(min=0.0, max=1.0)
gm = torch.fx.symbolic_trace(MyModule())
interp = Interpreter(gm)
env = {}
for node in gm.graph.nodes:
if node.op == 'call_module' and node.target == 'linear':
env[node] = torch.arange(0, 12, 1).reshape(3, 4) - 6.0
break
assert len(env) == 1
x = torch.randn(3, 4)
result = interp.run(x, initial_env=env)
self.assertEqual(result, (torch.arange(0, 12, 1).reshape(3, 4) - 6.0).clamp(0.0, 1.0))
def test_interpreter_star_args(self):
def with_star_args(x, *args):
return x + args[0]
gm = torch.fx.symbolic_trace(with_star_args)
interp = Interpreter(gm)
result = interp.run(torch.ones(3, 4), torch.ones(3, 4), torch.rand(3, 4))
self.assertEqual(result, torch.ones(3, 4) * 2.0)
@skipIfNoTorchVision
def test_interpreter_noop_resnet18(self):
rn18 = torchvision_models.resnet18()
transformed = torch.fx.Transformer(symbolic_trace(rn18)).transform()
inp = torch.randn(5, 3, 224, 224)
self.assertEqual(transformed(inp), rn18(inp))
@skipIfNoTorchVision
def test_interpreter_gc_values(self):
rn18 = torchvision_models.resnet18()
interp = Interpreter(symbolic_trace(rn18))
inp = torch.rand(5, 3, 224, 224)
out = interp.run(inp)
env_key_names = set(n.name for n in interp.env.keys())
self.assertEqual(env_key_names, set(['output']))
def test_transformer_noop(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
return self.linear(x + self.param).clamp(min=0.0, max=1.0)
m = MyModule()
gm = torch.fx.symbolic_trace(m)
new_gm = Transformer(gm).transform()
input = torch.randn(3, 4)
self.assertEqual(new_gm(input), gm(input))
def test_transformer_op_swap(self):
def fn(x):
return torch.sigmoid(x).neg()
gm = torch.fx.symbolic_trace(fn)
class NegSigmSwapXformer(Transformer):
def call_function(self, target : Target, args : Tuple, kwargs : Dict) -> Any:
if target == torch.sigmoid:
return torch.neg(*args, **kwargs)
return super().call_function(n)
def call_method(self, target : Target, args : Tuple, kwargs : Dict) -> Any:
if target == 'neg':
call_self, *args_tail = args
return call_self.sigmoid(*args_tail, **kwargs)
return super().call_method(n)
transformed = NegSigmSwapXformer(gm).transform()
input = torch.randn(3, 4)
self.assertEqual(transformed(input), torch.neg(input).sigmoid())
def test_transformer_multi_outputs(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
x = x + self.param
out = self.linear(x)
return x, out
m = MyModule()
gm = torch.fx.symbolic_trace(m)
new_gm = Transformer(gm).transform()
input = torch.randn(3, 4)
self.assertEqual(new_gm(input), gm(input))
def test_fn_type_annotations(self):
class Foo(torch.nn.Module):
def forward(self, p : Pair, z : torch.Tensor, i : int) -> Dict[str, torch.Tensor]:
return {'a': p.x + p.y + z + i}
foo_scripted = torch.jit.script(Foo())
foo_scripted(Pair(torch.rand(5), torch.rand(5)), torch.rand(5), 3)
fxed = symbolic_trace(Foo())
fxed_scripted = torch.jit.script(fxed)
fxed_scripted(Pair(torch.rand(5), torch.rand(5)), torch.rand(5), 3)
def test_fn_type_annotation_empty(self):
def forward(a : List[torch.Tensor]):
return a[0]
torch.jit.script(symbolic_trace(forward))
def test_wrapped_method(self):
def wrap_with_relu(fn):
@functools.wraps(fn)
def wrapper(*args, **kwargs):
return torch.relu(fn(*args, **kwargs))
return wrapper
class Foo(torch.nn.Module):
@wrap_with_relu
def forward(self, x, w):
return torch.matmul(x, w)
f = Foo()
traced = symbolic_trace(f)
x, w = torch.rand(3, 4), torch.rand(4, 4)
self.assertTrue(any(n.target == torch.relu for n in traced.graph.nodes))
def test_empty_graph_codegen(self):
graph = torch.fx.Graph()
gm = torch.fx.GraphModule(torch.nn.Module(), graph)
self.assertEqual(gm(), None)
def test_sequential(self):
m = torch.nn.Sequential(torch.nn.Conv2d(1, 1, 1))
gm = torch.fx.symbolic_trace(m)
gm_copy = copy.deepcopy(gm)
def test_ctx_mgr(self):
@contextlib.contextmanager
def do_nothing():
yield
class M(torch.nn.Module):
def __init__(self):
super().__init__()
@do_nothing()
def forward(self, x):
return torch.relu(x)
m = M()
self.checkGraphModule(m, (torch.rand(3, 4),))
def test_typename_print(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,),
type_expr=List[float])
output : torch.fx.Node = graph.output(b)
self.assertTrue('typing.List[float]' in str(graph))
def test_layout(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return torch.empty_like(x, layout=torch.strided, pin_memory=False).fill_(0)
traced = symbolic_trace(M())
x = torch.rand(5, 9, 3, 4)
self.assertEqual(traced(x), torch.zeros_like(x))
def test_ellipsis(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x, y):
return x + y[:, 1:10, ...]
traced = symbolic_trace(M())
x, y = torch.rand(5, 9, 3, 4), torch.rand(5, 15, 3, 4)
self.assertEqual(traced(x, y), x + y[:, 1:10, ...])
def test_inf_nan(self):
class FooMod(torch.nn.Module):
def forward(self, x):
return x + float('inf'), x + float('-inf'), x + float('nan')
fm = FooMod()
self.checkGraphModule(fm, (torch.rand(3, 4),))
def test_inf_nan_kwds(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_function', operator.add, (x, float('inf')), {}, name='inf')
c : torch.fx.Node = graph.create_node('call_function', operator.add, (x, float('nan')), {}, name='nan')
graph.output((b, c))
gm = torch.fx.GraphModule(torch.nn.Module(), graph)
x = torch.rand(3, 4)
self.assertEqual(gm(x), (x + float('inf'), x + float('nan')))
def test_deepcopy_recursion_depth(self):
depth = sys.getrecursionlimit() + 20
g = torch.fx.Graph()
x = g.placeholder('x')
for i in range(depth):
x = g.call_function(torch.relu, (x,))
g.output(x)
copied_graph = copy.deepcopy(g)
val_map = {}
for orig_node, new_node in zip(g.nodes, copied_graph.nodes):
val_map[orig_node] = new_node
for orig_node, new_node in zip(g.nodes, copied_graph.nodes):
orig_users = set(orig_node.users.keys())
orig_users_equiv = set(val_map[u] for u in orig_users)
new_users = set(new_node.users.keys())
self.assertEqual(orig_users_equiv, new_users)
@skipIfNoTorchVision
def test_replace_uses(self):
rn18 = torchvision_models.resnet18()
class LowerReluTracer(torch.fx.Tracer):
def is_leaf_module(self, m : torch.nn.Module, qualname : str):
if isinstance(m, torch.nn.ReLU):
return False
return super().is_leaf_module(m, qualname)
rn18_traced = GraphModule(rn18, LowerReluTracer().trace(rn18))
to_erase = []
for node in rn18_traced.graph.nodes:
if node.op == 'call_function' and node.target in [torch.relu, torch.nn.functional.relu]:
kwargs = node.kwargs.copy()
# Neg doesn't have in-place
kwargs.pop('inplace')
with rn18_traced.graph.inserting_before(node):
new_node = rn18_traced.graph.call_function(
the_function=torch.neg, args=node.args, kwargs=node.kwargs)
node.replace_all_uses_with(replace_with=new_node)
to_erase.append(node)
for node in to_erase:
rn18_traced.graph.erase_node(node)
def test_replace_input(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
y : torch.fx.Node = graph.create_node('placeholder', 'y')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,))
output : torch.fx.Node = graph.output(b)
b.replace_input_with(x, y)
gm = torch.fx.GraphModule(torch.nn.Module(), graph)
input_x = torch.randn(33, 44)
input_y = torch.randn(11, 22)
self.assertEqual(gm(input_x, input_y), torch.relu(input_y))
def test_insertion_point(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,))
output : torch.fx.Node = graph.output(b)
with graph.inserting_before(b):
neg : torch.fx.Node = graph.call_function(the_function=torch.neg, args=(x,))
_, *relu_args = b.args
b.args = (neg, *relu_args)
gm = torch.fx.GraphModule(torch.nn.Module(), graph)
input = torch.randn(33, 44)
self.assertEqual(gm(input), torch.relu(torch.neg(input)))
def test_update_args_api(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
y : torch.fx.Node = graph.create_node('placeholder', 'y')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,))
output : torch.fx.Node = graph.output(b)
orig_gm = torch.fx.GraphModule(torch.nn.Module(), graph)
inp_x, inp_y = torch.randn(5, 3), torch.randn(3, 5)
self.assertEqual(orig_gm(inp_x, inp_y), torch.relu(inp_x))
b.update_arg(0, y)
new_gm = torch.fx.GraphModule(torch.nn.Module(), graph)
self.assertEqual(new_gm(inp_x, inp_y), torch.relu(inp_y))
def test_update_kwargs_api(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
y : torch.fx.Node = graph.create_node('placeholder', 'y')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, kwargs={'input': x})
output : torch.fx.Node = graph.output(b)
orig_gm = torch.fx.GraphModule(torch.nn.Module(), graph)
inp_x, inp_y = torch.randn(5, 3), torch.randn(3, 5)
self.assertEqual(orig_gm(inp_x, inp_y), torch.relu(inp_x))
b.update_kwarg('input', y)
new_gm = torch.fx.GraphModule(torch.nn.Module(), graph)
self.assertEqual(new_gm(inp_x, inp_y), torch.relu(inp_y))
def test_move_before(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,))
output : torch.fx.Node = graph.output(b)
neg : torch.fx.Node = graph.call_function(the_function=torch.neg, args=(x,))
_, *relu_args = b.args
b.args = (neg, *relu_args)
b.prepend(neg)
gm = torch.fx.GraphModule(torch.nn.Module(), graph)
input = torch.randn(33, 44)
self.assertEqual(gm(input), torch.relu(torch.neg(input)))
def test_prepend_self(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,))
output : torch.fx.Node = graph.output(b)
b.prepend(b)
x.append(b)
self.assertEqual(len(graph.nodes), 3)
def test_erase_node_error(self):
st = SimpleTest()
traced = symbolic_trace(st)
for node in traced.graph.nodes:
# Test deleting with uses both in another Node and at the output
if node.target in [operator.add, torch.relu]:
with self.assertRaisesRegex(RuntimeError, 'but it still had .* users in the graph'):
traced.graph.erase_node(node)
def test_copy_it(self):
d = immutable_dict([(3, 4), (5, 6)])
l = immutable_list([(3, 4), (5, 6)])
self.assertEqual(d, deepcopy(d))
self.assertEqual(l, deepcopy(l))
def test_get_torch_func_signature(self):
for key in dir(torch):
obj = getattr(torch, key)
if callable(obj):
schemas = get_signature_for_torch_op(obj)
def test_find_uses(self):
graph = torch.fx.Graph()
x = torch.fx.Proxy(graph.placeholder('x'))
y = torch.relu(x)
z = x + x
u = torch.neg(x)
graph.output((y + z + u).node)
graph.lint()
users_of_x = x.node.users
self.assertEqual(len(users_of_x), 3)
expected_ops = set(['relu', 'add', 'neg'])
for use in users_of_x:
assert any(use.name.startswith(prefix) for prefix in expected_ops)
def test_inline_graph(self):
class InlineInto(torch.nn.Module):
def forward(self, x):
return torch.relu(x)
class ToInline(torch.nn.Module):
def forward(self, x):
return torch.neg(x)
inline_into = symbolic_trace(InlineInto())
to_inline = symbolic_trace(ToInline())
combined_graph = torch.fx.Graph()
output_node = combined_graph.graph_copy(inline_into.graph, {})
input_node = list(to_inline.graph.nodes)[0]
assert input_node and input_node.op == 'placeholder'
val_map = {input_node : output_node}
output = combined_graph.graph_copy(to_inline.graph, val_map)
combined_graph.output(output)
combined_module = torch.fx.GraphModule(torch.nn.Module(), combined_graph)
input = torch.rand(3, 4)
self.assertEqual(combined_module(input), input.relu().neg())
def test_multi_insert_point(self):
graph = torch.fx.Graph()
x = torch.fx.Proxy(graph.placeholder('x'))
relu = torch.relu(x)
with graph.inserting_before(relu.node):
y = torch.neg(x)
z = torch.tanh(y)
graph.output((relu.node, z.node))
graph.lint()
expected_ops = ['x', 'neg', 'tanh', 'relu']
for node, expected in zip(graph.nodes, expected_ops):
assert expected in node.name
def test_reassign_args_kwargs_uses(self):
graph = torch.fx.Graph()
x, y = Proxy(graph.placeholder('x')), Proxy(graph.placeholder('y'))
z = x + y
zed = z + z + z
graph.output(zed.node)
graph.lint()
# zed = z + z + z -> zed = z + z + x
zed.node.args = (zed.node.args[0], x.node)
self.assertEqual(x.node.users.keys(), [z.node, zed.node])
# z = x + y -> z = y + y
z.node.args = (y.node, y.node)
self.assertEqual(x.node.users.keys(), [zed.node])
def test_trace_function(self):
def foo(x, y):
return torch.relu(x) + y
x, y = torch.randn(3, 4), torch.randn(3, 4)
self.checkGraphModule(foo, (x, y))
def test_trace_dict_int_keys(self):
class ModWithDictArg(torch.nn.Module):
def forward(self, d : Dict[int, torch.Tensor]):
return d[42]
class CallsModWithDict(torch.nn.Module):
def __init__(self):
super().__init__()
self.m = ModWithDictArg()
def forward(self, x):
return self.m({42: x})
class MyTracer(torch.fx.Tracer):
def is_leaf_module(self, m: torch.nn.Module, module_qualified_name : str) -> bool:
return isinstance(m, ModWithDictArg)
traced_graph = MyTracer().trace(CallsModWithDict())
def test_trace_dict_proxy_keys(self):
class ModWithDictArg(torch.nn.Module):
def forward(self, d : Dict[torch.Tensor, torch.Tensor]):
return d[42]
class CallsModWithDict(torch.nn.Module):
def __init__(self):
super().__init__()
self.m = ModWithDictArg()
def forward(self, x):
return self.m({x: x})
class MyTracer(torch.fx.Tracer):
def is_leaf_module(self, m: torch.nn.Module, module_qualified_name : str) -> bool:
return isinstance(m, ModWithDictArg)
with self.assertRaisesRegex(RuntimeError, 'cannot contain a Node'):
traced_graph = MyTracer().trace(CallsModWithDict())
def test_module_deepcopy_edit_nodes(self):
class Foo(torch.nn.Module):
def forward(self, x):
return torch.relu(x)
traced1 = symbolic_trace(Foo())
copied = copy.deepcopy(traced1)
for node in copied.graph.nodes:
if node.target == torch.relu:
node.target = torch.neg
copied.recompile()
traced1.recompile()
x = torch.randn(15, 15)
torch.testing.assert_allclose(traced1(x), torch.relu(x))
torch.testing.assert_allclose(copied(x), torch.neg(x))
def test_direct_param_use(self):
class TransposeTest(torch.nn.Module):
def __init__(self):
super().__init__()
self.b = torch.nn.Parameter(torch.rand(4, 3))
def forward(self, x):
return self.b
class Foo(torch.nn.Module):
def __init__(self):
super().__init__()
self.a = TransposeTest()
def forward(self, x):
return self.a.b, self.a.b.t(), self.a.b.view(12)
traced = torch.fx.symbolic_trace(Foo())
assert(all('constant' not in node.target for node in traced.graph.nodes))
def test_single_default_arg(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, y=1):
return y
m = M()
self.checkGraphModule(m, ())
self.checkGraphModule(m, (3,))
def test_multiple_default_args(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, y=1, z=2):
return y + z
m = M()
self.checkGraphModule(m, ())
self.checkGraphModule(m, (3,))
self.checkGraphModule(m, (3, 4))
def test_regular_and_default_args(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x, y=1):
return x + y
m = M()
self.checkGraphModule(m, (2,))
self.checkGraphModule(m, (2, 3))
def test_string_literal_return(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self):
return "foo"
m = M()
self.checkGraphModule(m, ())
def test_namedtuple_return_qualname(self):
class NamedTupReturn(torch.nn.Module):
def forward(self, x):
return MyNamedTup(x, x)
traced = symbolic_trace(NamedTupReturn())
input = torch.rand(3, 4)
self.assertEqual(traced(input), MyNamedTup(input, input))
def test_update_args_kwargs_yells_at_you(self):
symtraced = symbolic_trace(SimpleTest())
node = next(iter(symtraced.graph.nodes))
with self.assertRaisesRegex(AttributeError, '__update_args_kwargs'):
node.__update_args_kwargs((), {})
def test_torchbind_class_attribute_in_fx(self):
if TEST_WITH_ROCM or IS_FBCODE or IS_WINDOWS or IS_MACOS:
self.skipTest("torch.classes._TorchScriptTesting._StackString is registered, skipping")
class FooBar1234(torch.nn.Module):
def __init__(self):
super(FooBar1234, self).__init__()
self.f = torch.classes._TorchScriptTesting._StackString(["3", "4"])
def forward(self):
return self.f.top()
m = FooBar1234()
self.checkGraphModule(m, ())
def test_torchbind_class_attribute_in_fx_tensor_arg(self):
if TEST_WITH_ROCM or IS_FBCODE or IS_WINDOWS or IS_MACOS:
self.skipTest("torch.classes._TorchScriptTesting._ReLUClass is registered, skipping")
class FooBar2341(torch.nn.Module):
def __init__(self):
super(FooBar2341, self).__init__()
self.f = torch.classes._TorchScriptTesting._ReLUClass()
def forward(self, x):
return self.f.run(x)
m = FooBar2341()
traced = symbolic_trace(m)
input = torch.randn(3, 4)
self.assertEqual(traced(input), m(input))
self.assertTrue(any(n.op == 'call_method' for n in traced.graph.nodes))
def test_script_method_trace(self):
class Scripted(torch.nn.Module):
def forward(self, x):
return torch.relu(x)
class Holder(torch.nn.Module):
def __init__(self):
super().__init__()
self.s = torch.jit.script(Scripted())
def forward(self, x):
return self.s(x)
h = Holder()
traced = symbolic_trace(h)
input = torch.randn(3, 4)
self.assertEqual(traced(input), h(input))
self.assertTrue(any(n.op == 'call_method' for n in traced.graph.nodes))
def test_namedtuple_return_trace(self):
class NamedTupReturn(torch.nn.Module):
def forward(self, x):
return Pair(x, x)
traced = symbolic_trace(NamedTupReturn())
input = torch.rand(3, 4)
self.assertEqual(traced(input), Pair(input, input))
def test_return_type_exists(self):
class ReturnTypeModule(torch.nn.Module):
def other(self, x: List[str]) -> List[str]:
return x
def forward(self, x: List[str]) -> List[str]:
return self.other(x)
traced = symbolic_trace(ReturnTypeModule())
self.assertIn("-> typing_List[str]", traced._code)
scripted = torch.jit.script(traced)
self.assertIn("-> List[str]", scripted.code)
def getitem_inner(self):
class GetItemBase(torch.nn.Module):
def __init__(self):
super().__init__()
self.register_buffer('pe', torch.randn(8, 8))
class GetItem1(GetItemBase):
def forward(self, x):
return self.pe[:, :x.size(0)]
class GetItem2(GetItemBase):
def forward(self, x):
return self.pe[x.size(0)]
class GetItem3(GetItemBase):
def forward(self, x):
return self.pe[4] # fx creates `self._tensor_constant0` here
self.checkGraphModule(GetItem1(), [torch.zeros(4)])
self.checkGraphModule(GetItem2(), [torch.zeros(4)])
self.checkGraphModule(GetItem3(), [torch.zeros(4)])
@unittest.skipUnless(os.environ.get("FX_PATCH_GETITEM") == "1",
"Will be checked in test_getitem_subproc")
def test_getitem(self):
self.getitem_inner()
def test_getitem_subproc(self):
# need to run this test in a subproc to work around:
# https://github.com/pytorch/pytorch/issues/50710
proc = Process(target=run_getitem_target)
proc.start()
proc.join()
self.assertEqual(proc.exitcode, 0)
def test_user_friendly_call_provenance_with_function(self):
def fn(x):
return wrapper_fn(x)
traced = torch.fx.symbolic_trace(fn)
with self.assertRaisesRegex(RuntimeError, "'wrapper_fn' is "
"being compiled since it was called"
" from 'fn.forward'"):
scripted = torch.jit.script(traced)
def test_user_friendly_call_provenance_with_module(self):
class M(torch.nn.Module):
def forward(self, x):
return wrapper_fn(x)
traced = torch.fx.symbolic_trace(M())
with self.assertRaisesRegex(RuntimeError, "'wrapper_fn' is "
"being compiled since it was called"
" from 'M.forward'"):
scripted = torch.jit.script(traced)
def test_snake_case(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.activations = torch.nn.ModuleDict([
["snake_case", torch.nn.ReLU()],
["PascalCase", torch.nn.LeakyReLU()],
["ALL_CAPS", torch.nn.PReLU()]
])
def forward(self, x):
a = self.activations["snake_case"](x)
b = self.activations["PascalCase"](x)
c = self.activations["ALL_CAPS"](x)
return a, b, c
traced = symbolic_trace(M())
check = [
("activations_snake_case", "activations.snake_case"),
("activations_pascal_case", "activations.PascalCase"),
("activations_all_caps", "activations.ALL_CAPS")
]
i = 0
for node in traced.graph.nodes:
if node.op == "placeholder" or node.op == "output":
continue
name = check[i][0]
target = check[i][1]
self.assertEqual(name, node.name)
self.assertEqual(target, node.target)
i += 1
self.assertEqual(i, 3)
def test_no_mutation(self):
from torch.fx.immutable_collections import immutable_list
x = immutable_list([3, 4])
with self.assertRaisesRegex(NotImplementedError, "new_args"):
x[0] = 4
def test_partial_trace(self):
class Foo(torch.nn.Module):
def forward(self, x, y):
if y:
return 2 * x
else:
return x
mod = Foo()
mod_true = symbolic_trace(mod, concrete_args={'y': True})
mod_false = symbolic_trace(mod, concrete_args={'y': False})
self.assertEqual(mod_true(3, True), 6)
print(mod_true.code)
assert(any([i.target == torch._assert for i in mod_true.graph.nodes]))
with self.assertRaises(AssertionError):
mod_true(3, False)
self.assertEqual(mod_false(3, False), 3)
with self.assertRaises(AssertionError):
mod_false(3, True)
def f_higher(a, f):
return f(a)
nf = symbolic_trace(f_higher, concrete_args={'f': lambda x: x * 2})
self.assertEqual(nf(3, lambda x: x * 2), 6)
def test_custom_traceback_raised_when_exception_source_is_graphmodule(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.W = torch.nn.Parameter(torch.randn(5))
def forward(self, x):
return torch.dot(self.W, x)
traced = torch.fx.symbolic_trace(M())
out = [n for n in traced.graph.nodes if n.op == "output"][-1]
with traced.graph.inserting_before(out):
relu_out = traced.graph.call_method(method_name='relu',
args=(out.args[0],))
out.args = (relu_out,)
traced.recompile()
with self.capture_stderr() as captured:
with self.assertRaises(TypeError):
traced(5)
self.assertRegex(captured[0],
r"Call using an FX-traced Module, line .* of the "
r"traced Module's generated forward function:")
def test_custom_traceback_not_raised_when_exception_source_is_submodule(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear = torch.nn.Linear(3, 4)
def forward(self, x):
return self.linear(x)
traced = torch.fx.symbolic_trace(M())
# Do not change this to `capture_stderr` or another context
# manager without ensuring that the output is as expected
try:
traced(torch.rand(5, 5))
except RuntimeError:
captured = traceback.format_exc()
self.assertNotRegex(captured,
r"Call using an FX-traced Module, line .* of the "
r"traced Module's generated forward function:")
def test_graph_module_replicate_for_dp(self):
class Foo(torch.nn.Module):
def forward(self, x):
return torch.relu(x)
gm = torch.fx.symbolic_trace(Foo())
x = torch.randn(5, 3)
out = gm(x)
replica = gm._replicate_for_data_parallel()
out_replica = replica(x)
torch.testing.assert_allclose(out_replica, out)
def test_ast_rewriter_rewrites_assert(self):
class M(torch.nn.Module):
def forward(self, x: torch.Tensor, y: int, z: int):
assert y == z
return torch.add(x, x)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(M())
traced = GraphModule(ast_rewriter.root, graph, "gm")
traced.graph.lint()
def test_ast_rewriter_rewrites_assert_with_message(self):
class M(torch.nn.Module):
def forward(self, x: torch.Tensor, y: int, z: int):
assert y == z, "msg"
return torch.add(x, x)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(M())
traced = GraphModule(ast_rewriter.root, graph, "gm")
traced.graph.lint()
def test_throw_out_variant(self):
def foo(x):
y = torch.rand_like(x)
torch.sigmoid(x, out=y)
return y
class MyTracer(torch.fx.Tracer):
check_mutable_operations = True
tracer = MyTracer()
with self.assertRaisesRegex(RuntimeError, 'mutable operation aten::sigmoid.out'):
traced_graph = tracer.trace(foo)
def test_ast_rewriter_reassigns_submodules(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.bn = torch.nn.BatchNorm2d(100)
def forward(self, x: torch.Tensor):
return torch.add(x, x)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(M())
traced = GraphModule(ast_rewriter.root, graph, "gm")
traced.graph.lint()
def test_ast_rewriter_wrap(self):
self.assertEqual(3 + 4 + 5, a_lifted_leaf((3, 4), 5))
def to_trace(y):
return (
a_lifted_leaf((4, y), 3)
+ a_lifted_leaf((3, 4), 5)
+ a_lifted_leaf((y, y), y)
)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(to_trace)
traced = GraphModule(ast_rewriter.root, graph, "gm")
self.assertIn("a_lifted_leaf", traced.code)
self.assertEqual(27, traced(2))
self.assertIs(a_lifted_leaf, real_a_lifed_leaf)
def test_ast_rewriter_wrap_fn_directly(self):
self.assertEqual(3 + 4 + 5, a_lifted_leaf2((3, 4), 5))
def to_trace(y):
return (
a_lifted_leaf2((4, y), 3)
+ a_lifted_leaf2((3, 4), 5)
+ a_lifted_leaf2((y, y), y)
)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(to_trace)
traced = GraphModule(ast_rewriter.root, graph, "gm")
self.assertIn("a_lifted_leaf2", traced.code)
self.assertEqual(27, traced(2))
self.assertIs(a_lifted_leaf2, real_a_lifed_leaf2)
def test_profiler_ranges_side_effect(self):
g = torch.fx.Graph()
handle = g.call_function(torch.ops.profiler._record_function_enter, ('test_range',))
g.call_function(torch.ops.profiler._record_function_exit, (handle,))
g.output(None)
found_targets = {}
for node in g.nodes:
if node.op == 'call_function':
found_targets.setdefault(node.target)
self.assertEqual(
found_targets.keys(), [torch.ops.profiler._record_function_enter, torch.ops.profiler._record_function_exit])
g.eliminate_dead_code()
found_targets = {}
for node in g.nodes:
if node.op == 'call_function':
found_targets.setdefault(node.target)
self.assertEqual(
found_targets.keys(), [torch.ops.profiler._record_function_enter, torch.ops.profiler._record_function_exit])
def test_ast_rewriter_wrapped_via_decorator(self):
class F(torch.nn.Module):
def forward(self, x):
return wrapped_via_decorator(x)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(F())
traced = GraphModule(ast_rewriter.root, graph, "gm")
self.assertIn("wrapped_via_decorator", traced.code)
self.assertEqual(traced(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
def test_ast_rewriter_wrapped_via_decorator_and_transformed(self):
self.assertEqual(wrapped_via_decorator(0), 1)
def to_trace(y):
return wrapped_via_decorator(y)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(to_trace)
traced = GraphModule(ast_rewriter.root, graph, "gm")
self.assertIn("wrapped_via_decorator", traced.code)
self.assertEqual(traced(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
transformed = torch.fx.Transformer(traced).transform()
self.assertIn("wrapped_via_decorator", transformed.code)
self.assertEqual(transformed(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
def test_ast_rewriter_wrap_with_submodule(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.batchnorm1d = torch.nn.BatchNorm1d(2, affine=False)
def forward(self, x: torch.Tensor):
return wrapped_with_submodule(x, self.batchnorm1d)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(M())
traced = GraphModule(ast_rewriter.root, graph, "gm")
self.assertIn("wrapped_with_submodule", traced.code)
input = torch.rand(3, 2)
ref_batchnorm1d = torch.nn.BatchNorm1d(2, affine=False)
self.assertEqual(ref_batchnorm1d(input), traced(input))
def test_submodule_manipulation_API(self):
class C(torch.nn.Module):
def __init__(self):
super(C, self).__init__()
self.conv = torch.nn.Conv2d(16, 33, 3, stride=2)
self.param = torch.nn.Parameter(torch.rand(2, 3))
def forward(self, x):
return self.conv(torch.cat([self.param, x]))
class B(torch.nn.Module):
def __init__(self):
super(B, self).__init__()
self.linear = torch.nn.Linear(100, 200)
self.register_buffer("buf", torch.randn(2, 3))
self.net_c = C()
def forward(self, x):
return self.linear(torch.cat([self.buf, self.net_c(x)]))
class A(torch.nn.Module):
def __init__(self):
super(A, self).__init__()
self.net_b = B()
self.param = torch.nn.Parameter(torch.rand(2, 3))
def forward(self, x):
return self.net_b(x) + self.param
a = symbolic_trace(A())
a.add_submodule("net_b.net_c.dropout", torch.nn.Dropout(p=0.2))
conv = [n for n in a.graph.nodes if n.target == "net_b.net_c.conv"][-1]
with a.graph.inserting_before(conv):
with warnings.catch_warnings(record=True) as w:
dropout = a.graph.call_module(module_name="net_b.net_c.dropout",
args=conv.args)
self.assertEqual(len(w), 0)
conv.replace_all_uses_with(dropout)
a.graph.erase_node(conv)
a.recompile()
def module_exists(gm: GraphModule, path: str) -> bool:
return any(path == name for name, _ in gm.named_modules())
def parameter_exists(gm: GraphModule, path: str) -> bool:
return (any(path == name for name, _ in gm.named_parameters())
and any(path == name for name in gm.state_dict().keys()))
def buffer_exists(gm: GraphModule, path: str) -> bool:
return (any(path == name for name, _ in gm.named_buffers())
and any(path == name for name in gm.state_dict().keys()))
# Test that we added the "dropout" submodule
self.assertTrue(module_exists(a, "net_b.net_c.dropout"))
# Test `get_submodule` with an added submodule
self.assertIsNotNone(a.get_submodule("net_b.net_c.dropout"))
# Test that the "conv" submodule is still there
self.assertTrue(module_exists(a, "net_b.net_c.conv"))
# Test `get_submodule` with an original module
self.assertIsNotNone(a.get_submodule("net_b.net_c.conv"))
# Test that the "conv" node is NOT still there
conv = [n for n in a.graph.nodes if n.target == "net_b.net_c.conv"]
self.assertEqual(conv, [])
a.delete_submodule("net_b.net_c.conv")
# Test that the "conv" submodule is now gone
self.assertFalse(module_exists(a, "net_b.net_c.conv"))
# Test `get_submodule` with a deleted submodule
with self.assertRaisesRegex(AttributeError, "has no attribute "
"`conv`"):
self.assertIsNone(a.get_submodule("net_b.net_c.conv"))
# Test `get_attr` warnings
cat = [n for n in a.graph.nodes if n.target == torch.cat][-1]
with a.graph.inserting_before(cat):
with warnings.catch_warnings(record=True) as w:
param = a.graph.get_attr(qualified_name="net_b.net_c.param")
self.assertEqual(len(w), 0)
with self.assertWarnsRegex(UserWarning, "Attempted to "
"insert a get_attr Node with no "
"underlying reference in the "
"owning GraphModule"):
bad_param = a.graph.get_attr(qualified_name="net_b.param")
a.graph.erase_node(bad_param)
cat.args = (*cat.args, param)
a.recompile()
a.graph.lint()
# Test `get_parameter`
a.get_parameter("net_b.net_c.param")
with self.assertRaisesRegex(AttributeError, "is not an "
"nn.Parameter"):
a.get_parameter("net_b.buf")
with self.assertRaisesRegex(AttributeError, "has no attribute "
"`param`"):
a.get_parameter("net_b.param")
# Test `get_buffer`
a.get_buffer("net_b.buf")
with self.assertRaisesRegex(AttributeError, "is not a "
"buffer"):
a.get_buffer("net_b.net_c.param")
with self.assertRaisesRegex(AttributeError, "has no attribute "
"`buf`"):
a.get_buffer("net_b.net_c.buf")
# Test non-nested attributes
a.get_submodule("")
a.get_parameter("param")
# Insert some unused submodules
a.add_submodule("net_b.embedding", torch.nn.Embedding(10, 3))
a.add_submodule("net_b.net_c.embedding", torch.nn.Embedding(10, 3))
a.add_submodule("net_b.net_c.rnn", torch.nn.RNN(10, 20, 2))
a.add_submodule("batch_norm_2d", torch.nn.BatchNorm2d(100))
# Garbage collection
a.delete_all_unused_submodules()
# Test that all the unused submodules are gone
self.assertFalse(module_exists(a, "net_b.embedding"))
self.assertFalse(module_exists(a, "net_b.net_c.embedding"))
self.assertFalse(module_exists(a, "net_b.net_c.rnn"))
self.assertFalse(module_exists(a, "batch_norm_2d"))
# Test that we didn't delete any unused Parameters or buffers
self.assertTrue(parameter_exists(a, "net_b.net_c.param"))
self.assertTrue(buffer_exists(a, "net_b.buf"))
a.graph.lint()
def test_delete_unused_submodules_leaf(self):
class SubModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear = torch.nn.Linear(10, 10)
self.relu = torch.nn.ReLU()
def forward(self, x):
x = self.linear(x)
x = self.relu(x)
return x
class Model(torch.nn.Module):
def __init__(self):
super().__init__()
self.submod = SubModule()
def forward(self, x):
x = self.submod(x)
return x
model = Model()
class MyCustomTracer(torch.fx.Tracer):
def is_leaf_module(self, m: torch.nn.Module, module_qualified_name : str) -> bool:
return module_qualified_name == "submod"
inputs = torch.randn(1, 10)
traced_graph = MyCustomTracer().trace(model)
gm2 = torch.fx.GraphModule(model, traced_graph)
gm2.delete_all_unused_submodules()
torch.testing.assert_allclose(gm2(inputs), model(inputs))
def test_tracing_graphmodules_as_leaf_submodules(self):
class A(torch.nn.Module):
def forward(self, t):
return t + t
class B(torch.nn.Module):
def __init__(self):
super(type(self), self).__init__()
self.calling = False
self.called = False
def forward(self, t):
if self.calling:
return t - t
else:
return t + t
def __call__(self, *args):
self.called = True
self.calling = True
return super(type(self), self).__call__(*args)
self.calling = False
class M(torch.nn.Module):
def __init__(self, a, b):
super().__init__()
self.a = a
self.b = b
def forward(self, t):
x = self.a(t)
y = self.b(t)
return x + y
class LeafTracer(Tracer):
def is_leaf_module(self, module, name):
return True
class LeafTracerNotB(Tracer):
def is_leaf_module(self, module, name):
return False if "b" in name else True
# Recompile calls added "for fun", since they
# chain __call__ wrappers.
#
# Test: B as a regular, non-leaf module
#
a = symbolic_trace(A())
a.recompile()
m = M(a, B())
graph = LeafTracerNotB().trace(m)
gm = GraphModule(m, graph)
gm.recompile()
# Test graphmodule/submodule a is not inlined.
self.assertTrue(isinstance(gm.get_submodule("a"), GraphModule))
match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "a"]
self.assertTrue(len(match) == 1)
# Test submodule b is not treated as leaf.
self.assertFalse(hasattr(gm, "b"))
# Test assert custom __call__ on submodule b was honored.
match = [
n
for n in gm.graph.nodes
if n.op == "call_function" and n.target == operator.sub
]
self.assertTrue(len(match) == 1)
#
# Test: B as a regular, leaf module
# symbolic_trace should only patch torch.nn.Module.__call__,
# which means B.__call__ should still execute
#
a = symbolic_trace(A())
a.recompile()
b = B()
m = M(a, b)
graph = LeafTracer().trace(m)
gm = GraphModule(m, graph)
gm.recompile()
# Test graphmodule/submodule a is not inlined.
self.assertTrue(isinstance(gm.get_submodule("a"), GraphModule))
match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "a"]
self.assertTrue(len(match) == 1)
# Test submodule b is leaf:
self.assertTrue(isinstance(gm.get_submodule("b"), torch.nn.Module))
match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "b"]
self.assertTrue(len(match) == 1)
# Test b.__call__ was run
self.assertTrue(b.called)
self.assertTrue(gm.get_submodule("b").called)
#
# Test: B as GraphModule leaf
# __call__ not honored since symbolic_trace directly invokes forward()
#
a = symbolic_trace(A())
a.recompile()
b = symbolic_trace(B())
b.recompile()
m = M(a, b)
graph = LeafTracer().trace(m)
gm = GraphModule(m, graph)
gm.recompile()
self.assertTrue(isinstance(gm.get_submodule("a"), GraphModule))
match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "a"]
self.assertTrue(len(match) == 1)
self.assertTrue(isinstance(gm.get_submodule("b"), torch.nn.Module))
match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "b"]
self.assertTrue(len(match) == 1)
def _test_graph_module_init_buffer_param_copied(self, use_dict_init: bool):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.register_buffer("my_buff", torch.rand(3, 4))
self.register_parameter(
"my_param", torch.nn.Parameter(torch.rand(3, 4))
)
def forward(self, x):
return x + self.my_buff + self.my_param
mod = MyModule()
mod_traced = symbolic_trace(mod)
# Create new GraphModule based on original, either w/ dict or root module.
orig_buff = mod_traced.get_buffer("my_buff")
orig_param = mod_traced.get_parameter("my_param")
mod_traced_new = GraphModule(
{"my_buff": orig_buff, "my_param": orig_param} if use_dict_init else mod,
mod_traced.graph,
)
# Check that both my_buff and my_param are found and the same.
try:
new_buff = mod_traced_new.get_buffer("my_buff")
except Exception:
self.fail("Did not find my_buff")
self.assertEqual(orig_buff, new_buff)
try:
new_param = mod_traced_new.get_parameter("my_param")
except Exception:
self.fail("Did not find my_param")
self.assertEqual(orig_param, new_param)
x = torch.rand(3, 4)
orig_out = mod_traced(x)
submodules_out = mod_traced_new(x)
self.assertEqual(orig_out, submodules_out)
def test_graph_module_init_buffer_param_copied_dict_init(self):
self._test_graph_module_init_buffer_param_copied(use_dict_init=True)
def test_graph_module_init_buffer_param_copied_mod_init(self):
self._test_graph_module_init_buffer_param_copied(use_dict_init=False)
def test_annotations_with_no_forward_references(self):
class A:
def __call__(self, x: torch.Tensor):
return torch.add(x, x)
class M(torch.nn.Module):
def forward(self, x: torch.Tensor, a: A) -> torch.Tensor:
return a(x)
self.checkGraphModule(M(), (torch.rand(2, 3), A()), kwargs=None)
def test_annotations_with_forward_references(self):
class A:
def __call__(self, x: torch.Tensor):
return torch.add(x, x)
class M(torch.nn.Module):
def forward(self, x: 'torch.Tensor', a: 'A') -> 'torch.Tensor':
return a(x)
self.checkGraphModule(M(), (torch.rand(2, 3), A()), kwargs=None)
def test_annotations_with_non_torch_reference_and_no_internal_forward_references(self):
class A:
def __call__(self, x: torch.Tensor):
return torch.add(x, x)
class M(torch.nn.Module):
def forward(self, x: List[torch.Tensor], a: A) -> torch.Tensor:
return a(x[0])
self.checkGraphModule(M(), (torch.rand(2, 3), A()), kwargs=None)
def test_annotations_with_non_torch_reference_and_internal_forward_references(self):
class A:
def __call__(self, x: torch.Tensor):
return torch.add(x, x)
class M(torch.nn.Module):
def forward(self, x: List['torch.Tensor'], a: A) -> 'torch.Tensor':
return a(x)[0]
self.checkGraphModule(M(), (torch.rand(2, 3), A()), kwargs=None)
@unittest.skipIf(sys.version_info < (3, 7), "`__future__` feature "
"`annotations` is not defined in Python <3.7")
def test_annotation_with_future(self):
try:
import fx.test_future # noqa: F401
finally:
del sys.modules["__future__"]
def test_annotations_empty_tuple(self):
class Foo(torch.nn.Module):
def forward(self, x: Tuple[()], y: Tuple[str, Tuple[()]]):
return "foo"
traced = torch.fx.symbolic_trace(Foo())
x = ()
y = ("bar", ())
traced(x, y)
FileCheck().check("_Tuple[()]") \
.check("typing_Tuple[str,typing_Tuple[()]]") \
.run(traced.code)
scripted = torch.jit.script(traced)
scripted(x, y)
FileCheck().check("Tuple[()]") \
.check("Tuple[str, Tuple[()]]") \
.run(scripted.code)
def test_pytree(self):
def f_sum(x):
return sum(x)
def f_sum_dict(x):
out = 0
for k, v in x.items():
out += v
return out
def f_dict_list_map(x):
new_dict = {}
for k, v in x.items():
new_dict[k] = [i + 1 for i in v]
return new_dict
def f_dict_add(x):
return x['a'] + sum(x['z'])
def f_namedtuple_add(x):
return x.x + x.y
pytree._register_pytree_node(
Foo,
lambda x: ([x.a, x.b], None),
lambda x, _: Foo(x[0], x[1]),
)
fx_pytree.register_pytree_flatten_spec(Foo, lambda x, _: [x.a, x.b])
def f_custom(x):
return x.a + x.b
def f_custom_dict(x):
return f_sum_dict(x.a) + x.b
def f_return_custom(x):
return Foo(x.b, x.a)
tests = [
(f_sum, [PH, PH, PH]),
(f_sum, []),
(f_sum_dict, {'a': PH, 'b': PH, 'c': PH}),
(f_dict_list_map, {'a': (PH, PH), 'b': [PH], 'c': []}),
(f_dict_list_map, {5: (PH, PH, PH)}),
(f_dict_add, {'a': PH, 'z': (PH, PH, PH)}),
(f_dict_add, {'a': PH, 'z': []}),
(f_custom, Foo(PH, PH)),
(f_custom, Foo(PH, 3)),
(f_custom_dict, Foo({'a': PH, 'b': PH}, PH)),
# (f_return_custom, Foo(PH, PH)), # Don't currently support output pytrees
(f_namedtuple_add, Point(PH, PH)),
]
def verify_pytree(f, inp):
val = pytree.tree_map(lambda x: torch.randn(3) if x == PH else x, inp)
num_flat_args = len([i == PH for i in pytree.tree_flatten(inp)[0]])
orig_out = f(val)
nf = symbolic_trace(f, concrete_args={'x': inp})
self.assertEqual(nf(val), orig_out)
assert num_flat_args == 0 or "tree_flatten_spec" in nf.code
assert(sum([i.op == 'placeholder' for i in nf.graph.nodes]) == num_flat_args)
nf = symbolic_trace(nf)
self.assertEqual(nf(val), orig_out)
assert "tree_flatten_spec" not in nf.code
assert(sum([i.op == 'placeholder' for i in nf.graph.nodes]) == 1)
nf = symbolic_trace(nf, concrete_args={'x': inp})
self.assertEqual(nf(val), orig_out)
assert num_flat_args == 0 or "tree_flatten_spec" in nf.code
assert(sum([i.op == 'placeholder' for i in nf.graph.nodes]) == num_flat_args)
pickled = pickle.dumps(nf)
nf = pickle.loads(pickled)
self.assertEqual(nf(val), orig_out)
for f, inp in tests:
verify_pytree(f, inp)
def test_pytree_concrete(self):
def f(b, a):
if b:
return a['a']
else:
return a['z']
inp = {'a': {'a': PH, 'z': PH}, 'b': True}
nf = symbolic_trace(f, concrete_args=inp)
val = pytree.tree_map(lambda x: torch.randn(3) if x == PH else x, inp)
self.assertEqual(nf(**val), f(**val))
nf = symbolic_trace(nf)
self.assertEqual(nf(**val), f(**val))
def run_getitem_target():
from torch.fx._symbolic_trace import _wrapped_methods_to_patch
_wrapped_methods_to_patch.append((torch.Tensor, "__getitem__"))
try:
TestFX().getitem_inner()
finally:
_wrapped_methods_to_patch.pop()
class TestOperatorSignatures(JitTestCase):
def setUp(self):
# Checking for mutable operations whil tracing is feature flagged
# Enable it in testing but not by default
self.orig_tracer_mutable_flag = torch.fx.proxy.TracerBase.check_mutable_operations
torch.fx.proxy.TracerBase.check_mutable_operations = True
def tearDown(self):
torch.fx.proxy.TracerBase.check_mutable_operations = self.orig_tracer_mutable_flag
@onlyCPU
@ops(op_db, allowed_dtypes=(torch.float,))
def test_get_torch_func_signature_exhaustive(self, device, dtype, op):
if not isinstance(op.op, types.BuiltinFunctionType):
raise unittest.SkipTest("This path doesn't work on Python functions")
sample_inputs_itr = op.sample_inputs(device, dtype, requires_grad=False)
schemas = get_signature_for_torch_op(op.op)
if not schemas:
raise RuntimeError('No Schemas Returned')
for sample_input in sample_inputs_itr:
# Iterate through overloads until we hit a match. If we exit this
# loop via `else`, we haven't found a match
for schema in schemas:
try:
bound_args = schema.bind(sample_input.input, *sample_input.args, **sample_input.kwargs)
bound_args.apply_defaults()
op(*bound_args.args, **bound_args.kwargs)
break
except TypeError as e:
pass
else:
raise RuntimeError(f'Did not match any schemas for op {op.name}!')
class TestFXAPIBackwardCompatibility(JitTestCase):
def setUp(self):
self.maxDiff = None
# Checking for mutable operations whil tracing is feature flagged
# Enable it in testing but not by default
self.orig_tracer_mutable_flag = torch.fx.proxy.TracerBase.check_mutable_operations
torch.fx.proxy.TracerBase.check_mutable_operations = True
def tearDown(self):
torch.fx.proxy.TracerBase.check_mutable_operations = self.orig_tracer_mutable_flag
def _fn_to_stable_annotation_str(self, obj):
"""
Unfortunately we have to serialize function signatures manually since
serialization for `inspect.Signature` objects is not stable across
python versions
"""
fn_name = torch.typename(obj)
signature = inspect.signature(obj)
sig_str = f'{fn_name}{signature}'
arg_strs = []
for k, v in signature.parameters.items():
maybe_type_annotation = f': {self._annotation_type_to_stable_str(v.annotation, sig_str)}'\
if v.annotation is not inspect.Signature.empty else ''
def default_val_str(val):
if isinstance(val, (tuple, list)):
str_pieces = ['(' if isinstance(val, tuple) else '[']
str_pieces.append(', '.join(default_val_str(v) for v in val))
if isinstance(val, tuple) and len(str_pieces) == 2:
str_pieces.append(',')
str_pieces.append(')' if isinstance(val, tuple) else ']')
return ''.join(str_pieces)
# Need to fix up some default value strings.
# First case: modules. Default module `repr` contains the FS path of the module.
# Don't leak that
if isinstance(val, types.ModuleType):
return f'<module {val.__name__}>'
# Second case: callables. Callables (such as lambdas) encode their address in
# their string repr. Don't do that
if callable(val):
return f'<function {val.__name__}>'
return str(val)
if v.default is not inspect.Signature.empty:
default_val_str = default_val_str(v.default) if not isinstance(v.default, str) else f"'{v.default}'"
maybe_default = f' = {default_val_str}'
else:
maybe_default = ''
maybe_stars = ''
if v.kind == inspect.Parameter.VAR_POSITIONAL:
maybe_stars = '*'
elif v.kind == inspect.Parameter.VAR_KEYWORD:
maybe_stars = '**'
arg_strs.append(f'{maybe_stars}{k}{maybe_type_annotation}{maybe_default}')
return_annot = f' -> {self._annotation_type_to_stable_str(signature.return_annotation, sig_str)}'\
if signature.return_annotation is not inspect.Signature.empty else ''
return f'{fn_name}({", ".join(arg_strs)}){return_annot}'
def _annotation_type_to_stable_str(self, t, sig_str):
if t is inspect.Signature.empty:
return ''
# Forward ref
if isinstance(t, str):
return f"'{t}'"
if hasattr(typing, 'ForwardRef') and isinstance(t, typing.ForwardRef):
return t.__forward_arg__
if hasattr(typing, '_ForwardRef') and isinstance(t, typing._ForwardRef):
return t.__forward_arg__
trivial_mappings = {
str : 'str',
int : 'int',
float: 'float',
bool: 'bool',
torch.dtype: 'torch.dtype',
torch.Tensor: 'torch.Tensor',
torch.device: 'torch.device',
torch.memory_format: 'torch.memory_format',
slice: 'slice',
torch.nn.Module: 'torch.nn.modules.module.Module',
torch.fx.Graph : 'torch.fx.graph.Graph',
torch.fx.Node : 'torch.fx.node.Node',
torch.fx.Proxy : 'torch.fx.proxy.Proxy',
torch.fx.node.Target : 'torch.fx.node.Target',
torch.fx.node.Argument : 'torch.fx.node.Argument',
torch.fx.graph.PythonCode : 'torch.fx.graph.PythonCode',
torch.fx.graph_module.GraphModule: 'torch.fx.graph_module.GraphModule',
torch.fx.subgraph_rewriter.Match: 'torch.fx.subgraph_rewriter.Match',
Ellipsis : '...',
typing.Any: 'Any',
type(None): 'NoneType',
None: 'None',
typing.Iterator: 'Iterator',
}
mapping = trivial_mappings.get(t, None)
if mapping:
return mapping
# Handle types with contained types
contained = getattr(t, '__args__', None) or []
# Callables contain a bare List for arguments
contained = t if isinstance(t, list) else contained
# Python 3.8 puts type vars into __args__ for unbound types such as Dict
if all(isinstance(ct, typing.TypeVar) for ct in contained):
contained = []
contained_type_annots = [self._annotation_type_to_stable_str(ct, sig_str) for ct in contained]
contained_type_str = f'[{", ".join(contained_type_annots)}]' if len(contained_type_annots) > 0 else ''
origin = getattr(t, '__origin__', None)
if origin is None:
# Unbound types don't have `__origin__` in some Python versions, so fix that up here.
origin = t if t in {typing.Tuple, typing.Union, typing.Dict, typing.List, typing.Type, typing.Callable} else origin
if origin in {tuple, typing.Tuple}:
return f'Tuple{contained_type_str}'
if origin in {typing.Union}:
# Annoying hack to detect Optional
if len(contained) == 2 and (contained[0] is type(None)) ^ (contained[1] is type(None)):
not_none_param = contained[0] if contained[0] is not type(None) else contained[1]
return f'Optional[{self._annotation_type_to_stable_str(not_none_param, sig_str)}]'
return f'Union{contained_type_str}'
if origin in {dict, typing.Dict}:
return f'Dict{contained_type_str}'
if origin in {list, typing.List}:
return f'List{contained_type_str}'
if origin in {type, typing.Type}:
return f'Type{contained_type_str}'
if isinstance(t, typing.Callable):
if len(contained) > 0 and contained[0] is not Ellipsis:
return f'Callable[[{", ".join(contained_type_annots[:-1])}], {contained_type_annots[-1]}]'
else:
return f'Callable{contained_type_str}'
raise RuntimeError(f'Unrecognized type {t} used in BC-compatible type signature {sig_str}.'
f'Please add support for this type and confirm with the '
f'FX team that your signature change is valid.')
def test_function_back_compat(self):
"""
Test backward compatibility for function signatures with
@compatibility(is_backward_compatible=True). Currently this checks for
exact signature matches, which may lead to false positives. If this
becomes too annoying, we can refine this check to actually parse out
the saved schema strings and check if the change is truly backward-
incompatible.
"""
signature_strs = []
for obj in _BACK_COMPAT_OBJECTS:
if not isinstance(obj, type):
signature_strs.append(self._fn_to_stable_annotation_str(obj))
signature_strs.sort()
try:
self.assertExpected('\n'.join(signature_strs), 'fx_backcompat_function_signatures')
except AssertionError as e:
msg = f"{e}\n****** ERROR ******\nAn FX function that has been marked " \
f"as backwards-compatible has experienced a signature change. See the " \
f"above exception context for more information. If this change was " \
f"unintended, please revert it. If it was intended, check with the FX " \
f"team to ensure that the proper deprecation protocols have been followed " \
f"and subsequently --accept the change."
raise AssertionError(msg)
def test_class_member_back_compat(self):
"""
Test backward compatibility for members of classes with
@compatibility(is_backward_compatible=True). Currently this checks for
exact matches on the publicly visible members of the class.
"""
class_method_strs = []
for obj in _BACK_COMPAT_OBJECTS:
if isinstance(obj, type):
public_members = [name for name in obj.__dict__ if not name.startswith('_')]
class_method_strs.append(f'{torch.typename(obj)} {sorted(public_members)}')
class_method_strs.sort()
try:
self.assertExpected('\n'.join(class_method_strs), 'fx_backcompat_class_members')
except AssertionError as e:
msg = f"{e}\n****** ERROR ******\nAn FX class that has been marked " \
f"as backwards-compatible has experienced change in its public members. See the " \
f"above exception context for more information. If this change was " \
f"unintended, please revert it. If it was intended, check with the FX " \
f"team to ensure that the proper deprecation protocols have been followed " \
f"and subsequently --accept the change."
raise AssertionError(msg)
def test_public_api_surface(self):
non_back_compat_objects = {}
def check_symbols_have_bc_designation(m, prefix):
if not m.__name__.startswith('torch.fx'):
return
if m.__name__.startswith('torch.fx.experimental'):
return
for k, v in m.__dict__.items():
if v is m:
continue
if k.startswith('_'):
continue
if isinstance(v, types.ModuleType):
check_symbols_have_bc_designation(v, prefix + [k])
elif isinstance(v, type) or isinstance(v, types.FunctionType):
if v not in _MARKED_WITH_COMATIBLITY:
non_back_compat_objects.setdefault(v)
check_symbols_have_bc_designation(torch.fx, ['torch', 'fx'])
check_symbols_have_bc_designation(torch.fx.passes, ['torch', 'fx', 'passes'])
non_back_compat_strs = [torch.typename(obj) for obj in non_back_compat_objects.keys()]
# Only want objects in torch.fx
non_back_compat_strs = [
s for s in non_back_compat_strs if s.startswith('torch.fx') and not s.startswith('torch.fx.experimental')]
# Only want objects in public namespaces
non_back_compat_strs = [
s for s in non_back_compat_strs if all(not atom.startswith('_') for atom in s.split('.'))]
non_back_compat_strs.sort()
if len(non_back_compat_strs) != 0:
raise AssertionError(f"Public FX API(s) {non_back_compat_strs} introduced but not given a "
f"backwards-compatibility classification! Please decorate these "
f"API(s) with `@torch.fx._compatibility.compatibility` to specify "
f"BC guarantees.")
class TestFunctionalTracing(JitTestCase):
def setUp(self):
# Checking for mutable operations whil tracing is feature flagged
# Enable it in testing but not by default
self.orig_tracer_mutable_flag = torch.fx.proxy.TracerBase.check_mutable_operations
torch.fx.proxy.TracerBase.check_mutable_operations = True
def tearDown(self):
torch.fx.proxy.TracerBase.check_mutable_operations = self.orig_tracer_mutable_flag
IGNORE_FUNCS = ("has_torch_function", "has_torch_function_unary",
"has_torch_function_variadic", "handle_torch_function",
"boolean_dispatch")
TO_PATCH = {"has_torch_function": None,
"has_torch_function_unary": None,
"has_torch_function_variadic": None}
BUILT_IN_FUNC = (AssertionError, "")
PROXY_ITERABLE = (TypeError, r"argument of type 'Proxy' is not iterable")
PROXY_ITERATED = (TraceError, r"Proxy object cannot be iterated")
LEN_ERROR = (RuntimeError, r"'len' is not supported in symbolic tracing by default")
ARG_TYPE_MISMATCH = (TypeError, r", not Proxy$")
CONTROL_FLOW = (TraceError, r"symbolically traced variables cannot be used as inputs to control flow")
INTERPOLATE_ARGS_CONFLICT = (ValueError, r"only one of size or scale_factor should be defined")
MUTABLE = (RuntimeError, r"Tried to trace mutable operation")
UNTRACEABLE_FUNCTIONALS = {
"adaptive_avg_pool1d": BUILT_IN_FUNC,
"avg_pool1d": BUILT_IN_FUNC,
"avg_pool2d": BUILT_IN_FUNC,
"avg_pool3d": BUILT_IN_FUNC,
"celu_": BUILT_IN_FUNC,
"channel_shuffle": BUILT_IN_FUNC,
"conv1d": BUILT_IN_FUNC,
"conv2d": BUILT_IN_FUNC,
"conv3d": BUILT_IN_FUNC,
"conv_tbc": BUILT_IN_FUNC,
"conv_transpose1d": BUILT_IN_FUNC,
"conv_transpose2d": BUILT_IN_FUNC,
"conv_transpose3d": BUILT_IN_FUNC,
"cosine_similarity": BUILT_IN_FUNC,
"elu_": BUILT_IN_FUNC,
"hardtanh_": BUILT_IN_FUNC,
"leaky_relu_": BUILT_IN_FUNC,
"logsigmoid": BUILT_IN_FUNC,
"one_hot": BUILT_IN_FUNC,
"pdist": BUILT_IN_FUNC,
"pixel_shuffle": BUILT_IN_FUNC,
"pixel_unshuffle": BUILT_IN_FUNC,
"relu_": BUILT_IN_FUNC,
"rrelu_": BUILT_IN_FUNC,
"selu_": BUILT_IN_FUNC,
"softplus": BUILT_IN_FUNC,
"softshrink": BUILT_IN_FUNC,
"threshold_": BUILT_IN_FUNC,
"adaptive_avg_pool2d": LEN_ERROR,
"adaptive_avg_pool3d": LEN_ERROR,
"adaptive_max_pool2d_with_indices": LEN_ERROR,
"adaptive_max_pool3d_with_indices": LEN_ERROR,
"instance_norm": CONTROL_FLOW,
"pad": LEN_ERROR,
"adaptive_max_pool1d": PROXY_ITERABLE,
"adaptive_max_pool2d": PROXY_ITERABLE,
"adaptive_max_pool3d": PROXY_ITERABLE,
"fractional_max_pool2d": PROXY_ITERABLE,
"fractional_max_pool3d": PROXY_ITERABLE,
"max_pool1d": PROXY_ITERABLE,
"max_pool2d": PROXY_ITERABLE,
"max_pool3d": PROXY_ITERABLE,
"group_norm": PROXY_ITERATED,
"lp_pool2d": PROXY_ITERATED,
"max_unpool1d": PROXY_ITERATED,
"max_unpool2d": PROXY_ITERATED,
"max_unpool3d": PROXY_ITERATED,
"adaptive_max_pool1d_with_indices": ARG_TYPE_MISMATCH,
"fractional_max_pool2d_with_indices": ARG_TYPE_MISMATCH,
"fractional_max_pool3d_with_indices": ARG_TYPE_MISMATCH,
"hardshrink": ARG_TYPE_MISMATCH,
"layer_norm": ARG_TYPE_MISMATCH,
"lp_pool1d": ARG_TYPE_MISMATCH,
"pairwise_distance": ARG_TYPE_MISMATCH,
"affine_grid": CONTROL_FLOW,
"alpha_dropout": CONTROL_FLOW,
"batch_norm": CONTROL_FLOW,
"binary_cross_entropy": CONTROL_FLOW,
"binary_cross_entropy_with_logits": CONTROL_FLOW,
"celu": CONTROL_FLOW,
"cosine_embedding_loss": CONTROL_FLOW,
"cross_entropy": CONTROL_FLOW,
"ctc_loss": CONTROL_FLOW,
"dropout": CONTROL_FLOW,
"dropout2d": CONTROL_FLOW,
"dropout3d": CONTROL_FLOW,
"elu": CONTROL_FLOW,
"embedding": CONTROL_FLOW,
"embedding_bag": CONTROL_FLOW,
"feature_alpha_dropout": CONTROL_FLOW,
"fold": CONTROL_FLOW,
"gaussian_nll_loss": CONTROL_FLOW,
"glu": CONTROL_FLOW,
"grid_sample": CONTROL_FLOW,
"gumbel_softmax": CONTROL_FLOW,
"hardsigmoid": CONTROL_FLOW,
"hardswish": CONTROL_FLOW,
"hardtanh": CONTROL_FLOW,
"hinge_embedding_loss": CONTROL_FLOW,
"huber_loss": CONTROL_FLOW,
"interpolate": CONTROL_FLOW,
"kl_div": CONTROL_FLOW,
"l1_loss": CONTROL_FLOW,
"leaky_relu": CONTROL_FLOW,
"local_response_norm": CONTROL_FLOW,
"margin_ranking_loss": CONTROL_FLOW,
"max_pool1d_with_indices": CONTROL_FLOW,
"max_pool2d_with_indices": CONTROL_FLOW,
"max_pool3d_with_indices": CONTROL_FLOW,
"mse_loss": CONTROL_FLOW,
"multi_head_attention_forward": CONTROL_FLOW,
"multi_margin_loss": CONTROL_FLOW,
"multilabel_margin_loss": CONTROL_FLOW,
"multilabel_soft_margin_loss": CONTROL_FLOW,
"nll_loss": CONTROL_FLOW,
"poisson_nll_loss": CONTROL_FLOW,
"relu": CONTROL_FLOW,
"relu6": CONTROL_FLOW,
"rrelu": CONTROL_FLOW,
"selu": CONTROL_FLOW,
"silu": CONTROL_FLOW,
"mish": CONTROL_FLOW,
"smooth_l1_loss": CONTROL_FLOW,
"soft_margin_loss": CONTROL_FLOW,
"threshold": CONTROL_FLOW,
"triplet_margin_loss": CONTROL_FLOW,
"triplet_margin_with_distance_loss": CONTROL_FLOW,
"unfold": CONTROL_FLOW,
"upsample": CONTROL_FLOW,
"upsample_bilinear": INTERPOLATE_ARGS_CONFLICT,
"upsample_nearest": INTERPOLATE_ARGS_CONFLICT,
"normalize" : MUTABLE,
}
# List of nn.functionals with Tensor inputs but not with type annotation
FUNCTIONALS_WITHOUT_ANNOTATION = (
"adaptive_max_pool1d",
"adaptive_max_pool2d",
"adaptive_max_pool3d",
"fractional_max_pool2d",
"fractional_max_pool3d",
"max_pool1d",
"max_pool2d",
"max_pool3d",
"gaussian_nll_loss",
"upsample",
"upsample_bilinear",
"upsample_nearest",
)
# Inconsistent behavior between Python 3.8 and other Python versions:
# - Python 3.8+: Re-raise internal exception like `PROXY_ITERATED`
# - Other Python: Raise `argument of type 'Proxy' is not iterable` due to the same
# internal exception above
# Use the following map to override the expected exception for Python 3.8
UNTRACEABLE_FUNCTIONALS_PY38 = {
"adaptive_max_pool1d": PROXY_ITERATED,
"adaptive_max_pool2d": PROXY_ITERATED,
"adaptive_max_pool3d": PROXY_ITERATED,
"fractional_max_pool2d": PROXY_ITERATED,
"fractional_max_pool3d": PROXY_ITERATED,
"max_pool1d": PROXY_ITERATED,
"max_pool2d": PROXY_ITERATED,
"max_pool3d": PROXY_ITERATED,
"group_norm": LEN_ERROR
}
@classmethod
def _get_functional(cls):
functional_list = []
for f in dir(torch.nn.functional):
if not f.islower():
continue
# Ignore internal functions
if f.startswith('_'):
continue
# Ignore supporting functions
if f in cls.IGNORE_FUNCS:
continue
fn = getattr(torch.nn.functional, f)
# Ignore non-callable object like modules
if not isinstance(fn, Callable):
continue
if f not in cls.FUNCTIONALS_WITHOUT_ANNOTATION:
try:
sig = inspect.signature(fn)
has_tensor_arg = False
for arg, param in sig.parameters.items():
if isinstance(param.annotation, type) and issubclass(param.annotation, torch.Tensor):
has_tensor_arg = True
if not has_tensor_arg:
continue
# No signature or Object is not supported
except ValueError:
pass
functional_list.append((f, fn))
return functional_list
@classmethod
def generate_test_func(cls, func_name, fn):
def functional_test(self):
if func_name in self.UNTRACEABLE_FUNCTIONALS_PY38 and \
sys.version_info >= (3, 8) and sys.version_info < (3, 10):
exc, err = self.UNTRACEABLE_FUNCTIONALS_PY38[func_name]
with self.assertRaisesRegex(exc, err):
symbolic_trace(fn)
elif func_name in self.UNTRACEABLE_FUNCTIONALS:
exc, err = self.UNTRACEABLE_FUNCTIONALS[func_name]
with self.assertRaisesRegex(exc, err):
symbolic_trace(fn)
else:
symbolic_trace(fn)
return functional_test
@classmethod
def generate_tests(cls):
functional_list = cls._get_functional()
for func_name, fn in functional_list:
test_name = "test_nn_functional_" + func_name
functional_test = cls.generate_test_func(func_name, fn)
setattr(cls, test_name, functional_test)
@classmethod
def setUpClass(cls):
def no(*args, **kwargs):
return False
for name in cls.TO_PATCH.keys():
cls.TO_PATCH[name] = getattr(torch.nn.functional, name)
setattr(torch.nn.functional, name, no)
@classmethod
def tearDownClass(cls):
for name in cls.TO_PATCH.keys():
setattr(torch.nn.functional, name, cls.TO_PATCH[name])
TestFunctionalTracing.generate_tests()
instantiate_device_type_tests(TestOperatorSignatures, globals())
@skipIfNoTorchVision
class TestVisionTracing(JitTestCase):
def setUp(self):
# Checking for mutable operations whil tracing is feature flagged
# Enable it in testing but not by default
self.orig_tracer_mutable_flag = torch.fx.proxy.TracerBase.check_mutable_operations
torch.fx.proxy.TracerBase.check_mutable_operations = True
def tearDown(self):
torch.fx.proxy.TracerBase.check_mutable_operations = self.orig_tracer_mutable_flag
PROXY_ITERATED = (TraceError, r"Proxy object cannot be iterated")
INCONSISTENT_TYPE = (
RuntimeError,
r"Return value was annotated as having type __torch__.torchvision.models[.\w]+ but is actually of type Tensor"
)
UNTRACEABLE_MODELS = {
"fasterrcnn_resnet50_fpn": PROXY_ITERATED,
"fasterrcnn_mobilenet_v3_large_320_fpn": PROXY_ITERATED,
"fasterrcnn_mobilenet_v3_large_fpn": PROXY_ITERATED,
"maskrcnn_resnet50_fpn": PROXY_ITERATED,
"keypointrcnn_resnet50_fpn": PROXY_ITERATED,
"retinanet_resnet50_fpn": PROXY_ITERATED,
}
UNSCRIPTABLE_MODELS = {
"googlenet": INCONSISTENT_TYPE,
"inception_v3": INCONSISTENT_TYPE,
}
output_transform = {
"fcn_resnet50": lambda x: x["out"],
"fcn_resnet101": lambda x: x["out"],
"deeplabv3_resnet50": lambda x: x["out"],
"deeplabv3_resnet101": lambda x: x["out"],
"deeplabv3_mobilenet_v3_large": lambda x: x["out"],
"lraspp_mobilenet_v3_large": lambda x: x["out"],
"fasterrcnn_resnet50_fpn": lambda x: x[1],
"fasterrcnn_mobilenet_v3_large_fpn": lambda x: x[1],
"fasterrcnn_mobilenet_v3_large_320_fpn": lambda x: x[1],
"maskrcnn_resnet50_fpn": lambda x: x[1],
"keypointrcnn_resnet50_fpn": lambda x: x[1],
"retinanet_resnet50_fpn": lambda x: x[1],
}
@classmethod
def generate_test_fn(cls, name, model_fn, x, kwargs):
def run_test(self):
model = model_fn(**kwargs)
model = model.eval()
if name in self.UNTRACEABLE_MODELS:
err, exc = self.UNTRACEABLE_MODELS[name]
with self.assertRaisesRegex(err, exc):
graph = symbolic_trace(model)
else:
out_transform = self.output_transform.get(name, lambda x: x)
graph : torch.fx.GraphModule = symbolic_trace(model)
a = out_transform(model(x))
b = out_transform(graph(x))
self.assertEqual(a, b)
if name in self.UNSCRIPTABLE_MODELS:
err, exc = self.UNSCRIPTABLE_MODELS[name]
with self.assertRaisesRegex(err, exc):
script = torch.jit.script(graph)
else:
script = torch.jit.script(graph)
c = out_transform(script(x))
self.assertEqual(a, c)
return run_test
@classmethod
def generate_classification_tests(cls):
for k, v in torchvision_models.__dict__.items():
if callable(v) and k[0].lower() == k[0] and k[0] != "_":
test_name = 'test_torchvision_models_' + k
x = torch.rand(1, 3, 299, 299) if k in ['inception_v3'] else torch.rand(1, 3, 224, 224)
kwargs = dict(num_classes=50)
model_test = cls.generate_test_fn(k, v, x, kwargs)
setattr(cls, test_name, model_test)
@classmethod
def generate_segmentation_tests(cls):
for k, v in torchvision_models.segmentation.__dict__.items():
if callable(v) and k[0].lower() == k[0] and k[0] != "_":
test_name = 'test_torchvision_models_segmentation_' + k
x = torch.rand(1, 3, 32, 32)
kwargs = dict(num_classes=10, pretrained_backbone=False)
model_test = cls.generate_test_fn(k, v, x, kwargs)
setattr(cls, test_name, model_test)
@classmethod
def generate_detection_tests(cls):
for k, v in torchvision_models.detection.__dict__.items():
if callable(v) and k[0].lower() == k[0] and k[0] != "_":
test_name = 'test_torchvision_models_detection_' + k
x = [torch.rand(3, 300, 300)]
kwargs = dict(num_classes=10, pretrained_backbone=False)
model_test = cls.generate_test_fn(k, v, x, kwargs)
setattr(cls, test_name, model_test)
@classmethod
def generate_video_tests(cls):
for k, v in torchvision_models.video.__dict__.items():
if callable(v) and k[0].lower() == k[0] and k[0] != "_":
test_name = 'test_torchvision_models_video_' + k
x = torch.rand(1, 3, 4, 112, 112)
kwargs = dict(num_classes=50)
model_test = cls.generate_test_fn(k, v, x, kwargs)
setattr(cls, test_name, model_test)
@classmethod
def generate_tests(cls):
cls.generate_classification_tests()
cls.generate_detection_tests()
cls.generate_segmentation_tests()
cls.generate_video_tests()
if HAS_TORCHVISION:
TestVisionTracing.generate_tests()
if __name__ == '__main__':
run_tests()
| [
"[email protected]"
] | |
ee32eb4c5bf0fa3b9827e0a11a8b943fbb9b709d | 5c5b53b686cd11b76772768096d096b1f2e0636b | /codingtest/findCityBJ18352.py | d3ad4ff983e5fb5e1aeb7f0e0ff5922c2ea6daa9 | [] | no_license | jewerlykim/python_Algorithm | 3e72fe9b145ff172ca76c9d59fd1be6246513ae5 | e83b7e8fc58807ebf05f423d962ad9ce37100ada | refs/heads/main | 2023-08-22T10:23:29.764080 | 2021-10-14T16:13:14 | 2021-10-14T16:13:14 | 322,784,608 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,019 | py | import sys
from collections import deque
# sys.stdin = open("/Users/jewerlykim/Desktop/python_Algorithm/codingtest/18352.txt", 'r')
cityNumber, roadNumber, distance, startCityNumber = map(int, sys.stdin.readline().split())
graph = [[] for _ in range(cityNumber+1)]
visited = [False for _ in range(cityNumber+1)]
distanceGraph = [0 for _ in range(cityNumber+1)]
for _ in range(roadNumber):
departure, arrive = map(int, sys.stdin.readline().split())
graph[departure].append(arrive)
def bfs(startCityNumber):
queue = deque()
queue.append(startCityNumber)
visited[startCityNumber] = True
while queue:
city = queue.popleft()
for i in graph[city]:
if visited[i] == False:
queue.append(i)
visited[i] = True
distanceGraph[i] = distanceGraph[city] + 1
bfs(startCityNumber)
exist = False
for i, value in enumerate(distanceGraph):
if value == distance:
print(i)
exist = True
if not exist:
print(-1)
| [
"[email protected]"
] | |
0064386a9682b065d6f45192f430c384290a799a | c929632612012a436fdec27771a6a8716cdb24bf | /setup.py | 047ed9b775f9449564700e84813c16890e9b8e7d | [] | no_license | imclab/python-ucto | bae2f082068261b70bf71ea681b92bf6e2e6263b | a07e87f568cdb342b84f7b76e9f466d507567211 | refs/heads/master | 2021-01-14T10:30:27.892695 | 2016-03-10T14:49:58 | 2016-03-10T14:49:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,652 | py | #!/usr/bin/env python
from distutils.core import setup, Extension
from Cython.Distutils import build_ext
import glob
import os
import sys
from os.path import expanduser
HOMEDIR = expanduser("~")
VERSION = '0.3.0'
includedirs = [HOMEDIR + '/local/include/','/usr/include/', '/usr/include/libxml2','/usr/local/include/' ]
libdirs = [HOMEDIR + '/local/lib/','/usr/lib','/usr/local/lib']
if 'VIRTUAL_ENV' in os.environ:
includedirs.insert(0,os.environ['VIRTUAL_ENV'] + '/include')
libdirs.insert(0,os.environ['VIRTUAL_ENV'] + '/lib')
if sys.version < '3':
extensions = [ Extension("ucto",
[ "libfolia_classes.pxd", "ucto_classes.pxd", "ucto_wrapper2.pyx"],
language='c++',
include_dirs=includedirs,
library_dirs=libdirs,
libraries=['ucto','folia'],
extra_compile_args=['--std=c++0x'],
) ]
else:
extensions = [ Extension("ucto",
[ "libfolia_classes.pxd", "ucto_classes.pxd", "ucto_wrapper.pyx"],
language='c++',
include_dirs=includedirs,
library_dirs=libdirs,
libraries=['ucto','folia'],
extra_compile_args=['--std=c++0x'],
) ]
setup(
name = 'python-ucto',
version = VERSION,
author = 'Maarten van Gompel',
author_email = "[email protected]",
description = ("This is a Python binding to the tokenizer Ucto. Tokenisation is one of the first step in almost any Natural Language Processing task, yet it is not always as trivial a task as it appears to be. This binding makes the power of the ucto tokeniser available to Python. Ucto itself is a regular-expression based, extensible, and advanced tokeniser written in C++ (https://languagemachines.github.io/ucto)."),
license = "GPL",
keywords = "tokenizer tokenization tokeniser tokenisation nlp computational_linguistics ucto",
url = "https://github.com/proycon/python-ucto",
ext_modules = extensions,
cmdclass = {'build_ext': build_ext},
requires=['ucto (>=0.8.4)'],
install_requires=['Cython'],
classifiers=[
"Development Status :: 4 - Beta",
"Topic :: Text Processing :: Linguistic",
"Programming Language :: Cython",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Operating System :: POSIX",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: GNU General Public License v3 (GPLv3)",
],
)
| [
"[email protected]"
] | |
59c3785a4ac2ee9b31690323dc85dd3b30e8673e | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/get_20200805213236.py | 40816acd83fa8a37b6e300d4e36406c9b1c906c9 | [] | no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 167 | py | def produce(num1,num2):
totalValue = 0
for i in range(num1):
print(i)
totalValue +=num2
print(totalValue)
produce(2,3)
# 4513 = 4 *
| [
"[email protected]"
] | |
6a7d2bc23a45d9781a2a50fbd1c51140a331df8f | 8efe56ee34c455a6b1336897f6d457acbc9c10f9 | /examples/tf/trpo_gym_tf_cartpole.py | d68045cd1292b920783ca7aa1a65100b8b7b1e9b | [
"MIT"
] | permissive | neurips2020submission11699/metarl | ab18d11e708bf569d76cb2fab2bcce089badd111 | ae4825d21478fa1fd0aa6b116941ea40caa152a5 | refs/heads/master | 2022-10-15T22:03:09.948673 | 2020-06-11T19:22:55 | 2020-06-11T19:30:58 | 268,410,657 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,441 | py | #!/usr/bin/env python3
"""An example to train a task with TRPO algorithm."""
import gym
from metarl import wrap_experiment
from metarl.envs import MetaRLEnv
from metarl.experiment import LocalTFRunner
from metarl.experiment.deterministic import set_seed
from metarl.np.baselines import LinearFeatureBaseline
from metarl.tf.algos import TRPO
from metarl.tf.policies import CategoricalMLPPolicy
@wrap_experiment
def trpo_gym_tf_cartpole(ctxt=None, seed=1):
"""Train TRPO with CartPole-v0 environment.
Args:
ctxt (metarl.experiment.ExperimentContext): The experiment
configuration used by LocalRunner to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
"""
set_seed(seed)
with LocalTFRunner(snapshot_config=ctxt) as runner:
env = MetaRLEnv(gym.make('CartPole-v0'))
policy = CategoricalMLPPolicy(name='policy',
env_spec=env.spec,
hidden_sizes=(32, 32))
baseline = LinearFeatureBaseline(env_spec=env.spec)
algo = TRPO(
env_spec=env.spec,
policy=policy,
baseline=baseline,
max_path_length=200,
discount=0.99,
max_kl_step=0.01,
)
runner.setup(algo, env)
runner.train(n_epochs=120, batch_size=4000)
trpo_gym_tf_cartpole()
| [
"[email protected]"
] | |
91ae813803ef41b2393de367a4d0b898cf8d03a7 | caaa1c57129a3e2369e4d6eeda46a94247c686d6 | /flight/migrations/0002_auto_20180921_1020.py | 9af863ff7971c398ad3a5436ebedac81b720fbb6 | [] | no_license | Sundarmax/Ariline--Django-REST | 3884c14ab8440f809c1dbac7d91b7349a9e4d3a0 | 131f875f58c94f0c297a2b82c31c4880d7f5de08 | refs/heads/master | 2020-03-31T02:31:57.754379 | 2018-10-06T10:16:03 | 2018-10-06T10:16:03 | 151,828,051 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 366 | py | # Generated by Django 2.1.1 on 2018-09-21 04:50
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('flight', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='flight',
name='no_seats',
field=models.IntegerField(),
),
]
| [
"[email protected]"
] | |
aa7516b3f811b25c098019f35297cf83e86f947f | ac54aa0127a47fb59211fba9e6cb8431d9d864cd | /apps/post/api.py | 35052cf6869626cc68264ddbd273a9f3f4d61940 | [] | no_license | xiringlama/manutd.org.np | 8919e3c1ad0494f88b819089686a756d67d38598 | f394f16edb96c05e2e864dcec1ec52532cd35ac2 | refs/heads/master | 2021-07-12T00:33:17.197706 | 2017-10-16T14:45:10 | 2017-10-16T14:45:10 | 107,222,122 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 438 | py | from rest_framework import viewsets, mixins
from apps.key.permissions import DistributedKeyAuthentication
from apps.post.models import Post
from apps.post.serializers import PostSerializer
class PostViewSet(mixins.ListModelMixin, mixins.RetrieveModelMixin, viewsets.GenericViewSet):
serializer_class = PostSerializer
queryset = Post.objects.filter(status='Published')
# permission_classes = (DistributedKeyAuthentication,)
| [
"[email protected]"
] | |
ac82a99b56e519a3667ee7e102affd07d4921e27 | 38ecf426f34b025b70208faf5d7de03d7ce1e7f3 | /Loan.py | 04b28028a80419f800080a9e8ca9e2d6c3e6abb6 | [] | no_license | ic1396/LearnPython | 3ed6805e0cfcc622a8376084715f8c5fe3db8058 | faabfecace5dd2ebf28ad75f35c61a29dee801ee | refs/heads/master | 2021-11-22T23:26:10.085015 | 2021-11-06T06:48:19 | 2021-11-06T06:48:19 | 89,790,705 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,500 | py | #!/usr/bin/python3
# 《Python语言程序设计》程序清单7-8
# Programed List 7-8
# 示例:类的抽象与封装 利息类
class Loan:
def __init__(self, annualInterestRate = 2.5,
numberOfYears = 1, loanAmount = 1000, borrower = " "):
self.__annualInterestRate = annualInterestRate
self.__numberOfYears = numberOfYears
self.__loanAmount = loanAmount
self.__borrower = borrower
def getAnnualInterestRate(self):
return self.__annualInterestRate
def getNumberOfYears(self):
return self.__numberOfYears
def getLoanAmount(self):
return self.__loanAmount
def getBorrower(self):
return self.__borrower
def setAnnualInterestRate(self, annualInterestRate):
self.__annualInterestRate = annualInterestRate
def setNumberOfYears(self, numberOfYears):
self.__numberOfYears = numberOfYears
def setLoanAmount(self, loanAmount):
self.__loanAmount = loanAmount
def setBorrower(self, borrower):
self.__borrower = borrower
def getMonthlyPayment(self):
monthlyInterestRate = self.__annualInterestRate / 1200
monthlyPayment = self.__loanAmount * monthlyInterestRate / \
(1 - (1 / (1 + monthlyInterestRate) ** (self.__numberOfYears * 12)))
return monthlyPayment
def getTotalPayment(self):
totalPayment = self.getMonthlyPayment() * self.__numberOfYears * 12
return totalPayment | [
"[email protected]"
] | |
b405952335b6929afbb22de633864b74f1b0ad52 | b2135e3fc77666f043f0fbafd0d88ed9865d5b4f | /Python files/01 Chapter 1.1 - About Printing/01 Printing numbers/77110_02_code.step.py | f7e395198a69c7e49f8b6c88c7007d98253a1ca1 | [] | no_license | Felienne/spea | 164d05e9fbba82c7b7df8d00295f7157054f9248 | ecb06c66aaf6a2dced3f141ca415be9efb7dbff5 | refs/heads/master | 2020-03-17T17:35:27.302219 | 2018-05-17T10:14:49 | 2018-05-17T10:14:49 | 133,794,299 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 75 | py | # This code prints the number 17. Change it so that it prints 19.
print(17) | [
"[email protected]"
] | |
6a7583878c83c37b2fc3a1416f0088ec77d2d1b2 | 0c70dcec22a090e70b1f20613ea6e0a64fd9a037 | /GPS卫星位置的计算/venv/Lib/site-packages/pandas/tests/frame/methods/test_pop.py | 8029640b10a0a2d2d7b0862092fba802273d2e96 | [
"MIT"
] | permissive | payiz-asj/Gis | 82c1096d830878f62c7a0d5dfb6630d4e4744764 | 3d315fed93e2ab850b836ddfd7a67f5618969d10 | refs/heads/main | 2023-06-27T15:25:17.301154 | 2021-08-03T10:02:58 | 2021-08-03T10:02:58 | 392,269,853 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,266 | py | from pandas import DataFrame, Series
import pandas._testing as tm
class TestDataFramePop:
def test_pop(self, float_frame):
float_frame.columns.name = "baz"
float_frame.pop("A")
assert "A" not in float_frame
float_frame["foo"] = "bar"
float_frame.pop("foo")
assert "foo" not in float_frame
assert float_frame.columns.name == "baz"
# gh-10912: inplace ops cause caching issue
a = DataFrame([[1, 2, 3], [4, 5, 6]], columns=["A", "B", "C"], index=["X", "Y"])
b = a.pop("B")
b += 1
# original frame
expected = DataFrame([[1, 3], [4, 6]], columns=["A", "C"], index=["X", "Y"])
tm.assert_frame_equal(a, expected)
# result
expected = Series([2, 5], index=["X", "Y"], name="B") + 1
tm.assert_series_equal(b, expected)
def test_pop_non_unique_cols(self):
df = DataFrame({0: [0, 1], 1: [0, 1], 2: [4, 5]})
df.columns = ["a", "b", "a"]
res = df.pop("a")
assert type(res) == DataFrame
assert len(res) == 2
assert len(df.columns) == 1
assert "b" in df.columns
assert "a" not in df.columns
assert len(df.index) == 2
| [
"[email protected]"
] | |
e99c443f7caab7c5da31ef4e7041c47f6dc04027 | 1b5802806cdf2c3b6f57a7b826c3e064aac51d98 | /tensorrt-integrate-1.5-unet/unet-pytorch-main/unet.py | a00010a9854ff61c486873529329e54e252185eb | [
"MIT"
] | permissive | jinmin527/learning-cuda-trt | def70b3b1b23b421ab7844237ce39ca1f176b297 | 81438d602344c977ef3cab71bd04995c1834e51c | refs/heads/main | 2023-05-23T08:56:09.205628 | 2022-07-24T02:48:24 | 2022-07-24T02:48:24 | 517,213,903 | 36 | 18 | null | 2022-07-24T03:05:05 | 2022-07-24T03:05:05 | null | UTF-8 | Python | false | false | 15,472 | py | import colorsys
import copy
import time
import cv2
import numpy as np
import torch
import torch.nn.functional as F
from PIL import Image
from torch import nn
from nets.unet import Unet as unet
from utils.utils import cvtColor, preprocess_input, resize_image
#--------------------------------------------#
# 使用自己训练好的模型预测需要修改2个参数
# model_path和num_classes都需要修改!
# 如果出现shape不匹配
# 一定要注意训练时的model_path和num_classes数的修改
#--------------------------------------------#
class Unet(object):
_defaults = {
#-------------------------------------------------------------------#
# model_path指向logs文件夹下的权值文件
# 训练好后logs文件夹下存在多个权值文件,选择验证集损失较低的即可。
# 验证集损失较低不代表miou较高,仅代表该权值在验证集上泛化性能较好。
#-------------------------------------------------------------------#
"model_path" : 'model_data/unet_vgg_voc.pth',
#--------------------------------#
# 所需要区分的类的个数+1
#--------------------------------#
"num_classes" : 21,
#--------------------------------#
# 所使用的的主干网络:vgg、resnet50
#--------------------------------#
"backbone" : "vgg",
#--------------------------------#
# 输入图片的大小
#--------------------------------#
"input_shape" : [512, 512],
#-------------------------------------------------#
# mix_type参数用于控制检测结果的可视化方式
#
# mix_type = 0的时候代表原图与生成的图进行混合
# mix_type = 1的时候代表仅保留生成的图
# mix_type = 2的时候代表仅扣去背景,仅保留原图中的目标
#-------------------------------------------------#
"mix_type" : 0,
#--------------------------------#
# 是否使用Cuda
# 没有GPU可以设置成False
#--------------------------------#
"cuda" : True,
}
#---------------------------------------------------#
# 初始化UNET
#---------------------------------------------------#
def __init__(self, **kwargs):
self.__dict__.update(self._defaults)
for name, value in kwargs.items():
setattr(self, name, value)
#---------------------------------------------------#
# 画框设置不同的颜色
#---------------------------------------------------#
if self.num_classes <= 21:
self.colors = [ (0, 0, 0), (128, 0, 0), (0, 128, 0), (128, 128, 0), (0, 0, 128), (128, 0, 128), (0, 128, 128),
(128, 128, 128), (64, 0, 0), (192, 0, 0), (64, 128, 0), (192, 128, 0), (64, 0, 128), (192, 0, 128),
(64, 128, 128), (192, 128, 128), (0, 64, 0), (128, 64, 0), (0, 192, 0), (128, 192, 0), (0, 64, 128),
(128, 64, 12)]
else:
hsv_tuples = [(x / self.num_classes, 1., 1.) for x in range(self.num_classes)]
self.colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
self.colors = list(map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)), self.colors))
#---------------------------------------------------#
# 获得模型
#---------------------------------------------------#
self.generate()
#---------------------------------------------------#
# 获得所有的分类
#---------------------------------------------------#
def generate(self):
self.net = unet(num_classes = self.num_classes, backbone=self.backbone)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
self.net.load_state_dict(torch.load(self.model_path, map_location=device))
self.net = self.net.eval()
print('{} model, and classes loaded.'.format(self.model_path))
if self.cuda:
self.net = nn.DataParallel(self.net)
self.net = self.net.cuda()
#---------------------------------------------------#
# 检测图片
#---------------------------------------------------#
def detect_image(self, image):
#---------------------------------------------------------#
# 在这里将图像转换成RGB图像,防止灰度图在预测时报错。
# 代码仅仅支持RGB图像的预测,所有其它类型的图像都会转化成RGB
#---------------------------------------------------------#
image = cvtColor(image)
#---------------------------------------------------#
# 对输入图像进行一个备份,后面用于绘图
#---------------------------------------------------#
old_img = copy.deepcopy(image)
orininal_h = np.array(image).shape[0]
orininal_w = np.array(image).shape[1]
#---------------------------------------------------------#
# 给图像增加灰条,实现不失真的resize
# 也可以直接resize进行识别
#---------------------------------------------------------#
image_data, nw, nh = resize_image(image, (self.input_shape[1],self.input_shape[0]))
#---------------------------------------------------------#
# 添加上batch_size维度
#---------------------------------------------------------#
image_data = np.expand_dims(np.transpose(preprocess_input(np.array(image_data, np.float32)), (2, 0, 1)), 0)
with torch.no_grad():
images = torch.from_numpy(image_data)
if self.cuda:
images = images.cuda()
#---------------------------------------------------#
# 图片传入网络进行预测
#---------------------------------------------------#
pr = self.net(images)[0]
#---------------------------------------------------#
# 取出每一个像素点的种类
#---------------------------------------------------#
pr = F.softmax(pr.permute(1,2,0),dim = -1).cpu().numpy()
#--------------------------------------#
# 将灰条部分截取掉
#--------------------------------------#
pr = pr[int((self.input_shape[0] - nh) // 2) : int((self.input_shape[0] - nh) // 2 + nh), \
int((self.input_shape[1] - nw) // 2) : int((self.input_shape[1] - nw) // 2 + nw)]
#---------------------------------------------------#
# 进行图片的resize
#---------------------------------------------------#
pr = cv2.resize(pr, (orininal_w, orininal_h), interpolation = cv2.INTER_LINEAR)
#---------------------------------------------------#
# 取出每一个像素点的种类
#---------------------------------------------------#
pr = pr.argmax(axis=-1)
if self.mix_type == 0:
# seg_img = np.zeros((np.shape(pr)[0], np.shape(pr)[1], 3))
# for c in range(self.num_classes):
# seg_img[:, :, 0] += ((pr[:, :] == c ) * self.colors[c][0]).astype('uint8')
# seg_img[:, :, 1] += ((pr[:, :] == c ) * self.colors[c][1]).astype('uint8')
# seg_img[:, :, 2] += ((pr[:, :] == c ) * self.colors[c][2]).astype('uint8')
seg_img = np.reshape(np.array(self.colors, np.uint8)[np.reshape(pr, [-1])], [orininal_h, orininal_w, -1])
#------------------------------------------------#
# 将新图片转换成Image的形式
#------------------------------------------------#
image = Image.fromarray(np.uint8(seg_img))
#------------------------------------------------#
# 将新图与原图及进行混合
#------------------------------------------------#
image = Image.blend(old_img, image, 0.7)
elif self.mix_type == 1:
# seg_img = np.zeros((np.shape(pr)[0], np.shape(pr)[1], 3))
# for c in range(self.num_classes):
# seg_img[:, :, 0] += ((pr[:, :] == c ) * self.colors[c][0]).astype('uint8')
# seg_img[:, :, 1] += ((pr[:, :] == c ) * self.colors[c][1]).astype('uint8')
# seg_img[:, :, 2] += ((pr[:, :] == c ) * self.colors[c][2]).astype('uint8')
seg_img = np.reshape(np.array(self.colors, np.uint8)[np.reshape(pr, [-1])], [orininal_h, orininal_w, -1])
#------------------------------------------------#
# 将新图片转换成Image的形式
#------------------------------------------------#
image = Image.fromarray(np.uint8(seg_img))
elif self.mix_type == 2:
seg_img = (np.expand_dims(pr != 0, -1) * np.array(old_img, np.float32)).astype('uint8')
#------------------------------------------------#
# 将新图片转换成Image的形式
#------------------------------------------------#
image = Image.fromarray(np.uint8(seg_img))
return image
def get_FPS(self, image, test_interval):
#---------------------------------------------------------#
# 在这里将图像转换成RGB图像,防止灰度图在预测时报错。
# 代码仅仅支持RGB图像的预测,所有其它类型的图像都会转化成RGB
#---------------------------------------------------------#
image = cvtColor(image)
#---------------------------------------------------------#
# 给图像增加灰条,实现不失真的resize
# 也可以直接resize进行识别
#---------------------------------------------------------#
image_data, nw, nh = resize_image(image, (self.input_shape[1],self.input_shape[0]))
#---------------------------------------------------------#
# 添加上batch_size维度
#---------------------------------------------------------#
image_data = np.expand_dims(np.transpose(preprocess_input(np.array(image_data, np.float32)), (2, 0, 1)), 0)
with torch.no_grad():
images = torch.from_numpy(image_data)
if self.cuda:
images = images.cuda()
#---------------------------------------------------#
# 图片传入网络进行预测
#---------------------------------------------------#
pr = self.net(images)[0]
#---------------------------------------------------#
# 取出每一个像素点的种类
#---------------------------------------------------#
pr = F.softmax(pr.permute(1,2,0),dim = -1).cpu().numpy().argmax(axis=-1)
#--------------------------------------#
# 将灰条部分截取掉
#--------------------------------------#
pr = pr[int((self.input_shape[0] - nh) // 2) : int((self.input_shape[0] - nh) // 2 + nh), \
int((self.input_shape[1] - nw) // 2) : int((self.input_shape[1] - nw) // 2 + nw)]
t1 = time.time()
for _ in range(test_interval):
with torch.no_grad():
#---------------------------------------------------#
# 图片传入网络进行预测
#---------------------------------------------------#
pr = self.net(images)[0]
#---------------------------------------------------#
# 取出每一个像素点的种类
#---------------------------------------------------#
pr = F.softmax(pr.permute(1,2,0),dim = -1).cpu().numpy().argmax(axis=-1)
#--------------------------------------#
# 将灰条部分截取掉
#--------------------------------------#
pr = pr[int((self.input_shape[0] - nh) // 2) : int((self.input_shape[0] - nh) // 2 + nh), \
int((self.input_shape[1] - nw) // 2) : int((self.input_shape[1] - nw) // 2 + nw)]
t2 = time.time()
tact_time = (t2 - t1) / test_interval
return tact_time
def get_miou_png(self, image):
#---------------------------------------------------------#
# 在这里将图像转换成RGB图像,防止灰度图在预测时报错。
# 代码仅仅支持RGB图像的预测,所有其它类型的图像都会转化成RGB
#---------------------------------------------------------#
image = cvtColor(image)
orininal_h = np.array(image).shape[0]
orininal_w = np.array(image).shape[1]
#---------------------------------------------------------#
# 给图像增加灰条,实现不失真的resize
# 也可以直接resize进行识别
#---------------------------------------------------------#
image_data, nw, nh = resize_image(image, (self.input_shape[1],self.input_shape[0]))
#---------------------------------------------------------#
# 添加上batch_size维度
#---------------------------------------------------------#
image_data = np.expand_dims(np.transpose(preprocess_input(np.array(image_data, np.float32)), (2, 0, 1)), 0)
with torch.no_grad():
images = torch.from_numpy(image_data)
if self.cuda:
images = images.cuda()
#---------------------------------------------------#
# 图片传入网络进行预测
#---------------------------------------------------#
pr = self.net(images)[0]
#---------------------------------------------------#
# 取出每一个像素点的种类
#---------------------------------------------------#
pr = F.softmax(pr.permute(1,2,0),dim = -1).cpu().numpy()
#--------------------------------------#
# 将灰条部分截取掉
#--------------------------------------#
pr = pr[int((self.input_shape[0] - nh) // 2) : int((self.input_shape[0] - nh) // 2 + nh), \
int((self.input_shape[1] - nw) // 2) : int((self.input_shape[1] - nw) // 2 + nw)]
#---------------------------------------------------#
# 进行图片的resize
#---------------------------------------------------#
pr = cv2.resize(pr, (orininal_w, orininal_h), interpolation = cv2.INTER_LINEAR)
#---------------------------------------------------#
# 取出每一个像素点的种类
#---------------------------------------------------#
pr = pr.argmax(axis=-1)
image = Image.fromarray(np.uint8(pr))
return image
| [
"[email protected]"
] | |
aa9a0e1ec0fb65029c338c1783ad70bdc8b72522 | 39c99883c3e55c0a0a7684fc5fd89c767ea93cc8 | /model.py | 1ba435317b63996e67640f13e3a5cb9aac440e01 | [] | no_license | shawntan/billion-word-imputation | 872f478926966aa17e44a1738c58fdb90681a552 | ea581253537ad76e89ec5eaa0cf80d57d61121e4 | refs/heads/master | 2020-04-09T11:14:02.828445 | 2014-11-21T09:32:43 | 2014-11-21T09:32:43 | 22,462,900 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,425 | py | # coding=utf-8
import theano
import sys
import theano.tensor as T
import numpy as np
from theano_toolkit import utils as U
from theano_toolkit import updates
import cPickle as pickle
from numpy_hinton import print_arr
from theano.printing import Print
from vocab import read_file
def create_vocab_vectors(vocab2id,size):
V = U.create_shared(U.initial_weights(len(vocab2id) + 1,size))
V_b = U.create_shared(U.initial_weights(len(vocab2id) + 1))
return V,V_b
def recurrent_combine(X,V,V_b,W_input,b_input,W_state_p,b_state_p,b_state,W_input_hidden,W_state_p_hidden):
def step(curr_input,state_p):
# Build next layer
state = T.dot(curr_input,W_input) + T.dot(state_p,W_state_p) + b_state
state = T.tanh(state)
# RAE stuff
rep_word_vec = T.dot(state,W_input.T) + b_input
rep_curr_input = T.dot(rep_word_vec,V.T) + V_b
rep_state_p = T.dot(state,W_state_p.T) + b_state_p
# Contributions to predictive hidden layer
hidden_partial = T.dot(state_p,W_state_p_hidden) + T.dot(curr_input,W_input_hidden)
return state,rep_curr_input,rep_state_p,hidden_partial
[states,rep_inputs,rep_states,hidden_partials],_ = theano.scan(
step,
sequences = [X[1:]],
outputs_info = [X[0],None,None,None]
)
return states,T.nnet.softmax(rep_inputs),rep_states,hidden_partials
def missing_cost(scores,Y):
probs = T.nnet.softmax(scores)[0]
total_scores_diff = -T.log(probs[Y])
"""
label_score = scores[Y]
scores_diff = -(label_score - (scores + 1))
scores_diff = scores_diff * (scores_diff > 0)
total_scores_diff = (T.sum(scores_diff) - scores_diff[Y])/(scores.shape[0]-1)
"""
return total_scores_diff
def rae_cost(ids,X,states,rep_inputs,rep_states):
# Actual input - reconstructed input error
#input_rec_cost = T.mean(T.sum((X[1:]-rep_inputs)**2,axis=1))
input_rec_cost = -T.mean(T.log(rep_inputs[T.arange(rep_inputs.shape[0]),ids[1:]]))
# Actual prev state - reconstructed prev state error
state_rec_cost = (
# All states except last, all rec states except first
T.sum((states[:-1] - rep_states[1:])**2) +\
# First state (first input) and first rec state
T.sum((X[0] - rep_states[0])**2)
)/states.shape[0]
return input_rec_cost + state_rec_cost
def create_model(ids,Y,vocab2id,size):
word_vector_size = size
rae_state_size = size
predictive_hidden_size = size * 2
V,V_b = create_vocab_vectors(vocab2id,word_vector_size)
X = V[ids]
# RAE parameters
W_input = U.create_shared(U.initial_weights(word_vector_size,rae_state_size))
b_input = U.create_shared(U.initial_weights(rae_state_size))
W_state_p = U.create_shared(U.initial_weights(rae_state_size,rae_state_size))
b_state_p = U.create_shared(U.initial_weights(rae_state_size))
b_state = U.create_shared(U.initial_weights(rae_state_size))
W_input_hidden = U.create_shared(U.initial_weights(word_vector_size,predictive_hidden_size))
W_state_p_hidden = U.create_shared(U.initial_weights(rae_state_size,predictive_hidden_size))
W_full_context_hidden = U.create_shared(U.initial_weights(rae_state_size,predictive_hidden_size))
b_hidden = U.create_shared(U.initial_weights(predictive_hidden_size))
W_output = U.create_shared(U.initial_weights(predictive_hidden_size))
states,rep_inputs,rep_states,hidden_partials = recurrent_combine(
X,
V,V_b,
W_input,b_input,
W_state_p,b_state_p,b_state,
W_input_hidden,W_state_p_hidden,
)
context = states[-1]
hidden = T.dot(context,W_full_context_hidden) + hidden_partials + b_hidden
# hidden = T.tanh(hidden)
hidden = hidden * (hidden > 0)
scores = T.dot(hidden,W_output)
parameters = [
V,
V_b,
W_input,
b_input,
W_state_p,
b_state_p,
b_state,
W_input_hidden,
W_state_p_hidden,
W_full_context_hidden,
b_hidden,
W_output
]
cost = rae_cost(ids,X,states,rep_inputs,rep_states) + missing_cost(scores,Y) + 1e-5*sum(T.sum(w**2) for w in parameters)
return scores, cost, parameters
def training_model(vocab2id,size):
ids = T.ivector('ids')
Y = T.iscalar('Y')
scores, cost, parameters = create_model(ids,Y,vocab2id,size)
gradients = T.grad(cost,wrt=parameters)
print "Computed gradients"
train = theano.function(
inputs = [ids,Y],
updates = updates.adadelta(parameters,gradients,0.95,1e-6),
outputs = cost
)
test = theano.function(
inputs = [ids],
outputs = T.argmax(scores)
)
return test,train, parameters
| [
"[email protected]"
] | |
d9b69a220873901f4475849a5acbf53bdab5a693 | 21b201ebf2ffbbc19fa8d74e5657e12ef597b02d | /research/neural_programmer/data_utils.py | d5bae2d30db51a295f9719d42498a4e5bfc775fa | [] | no_license | alhsnouf/model | fa619691ad9d0afc7ad849a9471e6bb0643a8d47 | 5fe429b115634e642a7469b3f1d4bc0c5cf98782 | refs/heads/master | 2021-04-12T11:16:02.150045 | 2018-03-27T15:19:18 | 2018-03-27T15:19:18 | 126,702,717 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 130 | py | version https://git-lfs.github.com/spec/v1
oid sha256:a53d41aacbe166afd6c9e4a1822c313dd0e1cef93707ab41e75a8afd1ffeb53b
size 27733
| [
"[email protected]"
] | |
b400fba31f2fdf357fddd49b9f1a2872913b8b9d | d4cdc6c9e2580b2011d63f6d62f70ab9e13cd317 | /sld-api-backend/api_v1/endpoints/auth.py | 5c25ca1f22d744b558c204861daeb55ec28aeaec | [
"MIT"
] | permissive | timezombi/Stack-Lifecycle-Deployment | 75cc92bc0267953039f0d66c7c219a8d444817c8 | d84241099fb44762476b4201a2fc195e76975e26 | refs/heads/master | 2023-07-13T11:11:35.001371 | 2021-08-20T13:35:14 | 2021-08-20T13:35:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 954 | py | from typing import Any
from sqlalchemy.orm import Session
from fastapi import APIRouter, Depends
from fastapi.security import OAuth2PasswordRequestForm
from schemas import schemas
from schemas.schemas import Token
from security import deps
from security.tokens import validate_user
router = APIRouter()
@router.post("/access-token", response_model=Token)
def login_access_token(
user: OAuth2PasswordRequestForm = Depends(),
db: Session = Depends(deps.get_db)) -> Any:
"""
OAuth2 compatible token login, get an access token for future requests
"""
return validate_user(db, user.username, user.password)
@router.post("/access-token-json", response_model=Token)
def login_access_token_json(
user: schemas.UserAuthenticate,
db: Session = Depends(deps.get_db)) -> dict:
"""
OAuth2 compatible token login, get an access token for future requests
"""
return validate_user(db, user.username, user.password)
| [
"{ID}+{username}@users.noreply.github.com"
] | {ID}+{username}@users.noreply.github.com |
0a08e11ff1d01c391f047776fe01e6807cafe721 | be0e978e39dd4ab192590e97b2e907b4072c461f | /conf.py | 4ea22b874cef6819e261d30a7a5e5c553f8d29bc | [] | no_license | kbarbary/dessn-analysis | 825007f1bcddf2e7fb92a99ee280cc644fd1ab70 | 2090b417b757447a28b766a6a5a38a7e7ea68c8e | refs/heads/master | 2016-09-05T13:16:07.219179 | 2014-03-04T23:56:41 | 2014-03-04T23:56:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 134 | py | import os
pikdir = 'pik'
plotdir = 'plots'
# Make directories
for d in [pikdir, plotdir]:
if not os.path.exists(d): os.mkdir(d)
| [
"[email protected]"
] | |
7dd3b559cde230f1cd49d4201ecfa533315f92fe | d83118503614bb83ad8edb72dda7f449a1226f8b | /src/dprj/platinumegg/app/cabaret/views/application/evolution/do.py | 403717b769ec36230b647c36b6fe96b538c75f9a | [] | no_license | hitandaway100/caba | 686fe4390e182e158cd9714c90024a082deb8c69 | 492bf477ac00c380f2b2758c86b46aa7e58bbad9 | refs/heads/master | 2021-08-23T05:59:28.910129 | 2017-12-03T19:03:15 | 2017-12-03T19:03:15 | 112,512,044 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,683 | py | # -*- coding: utf-8 -*-
from platinumegg.app.cabaret.util.url_maker import UrlMaker
from platinumegg.app.cabaret.views.application.evolution.base import EvolutionHandler
from platinumegg.app.cabaret.models.Player import PlayerGold, PlayerDeck
from platinumegg.app.cabaret.util.api import BackendApi
from platinumegg.app.cabaret.util.cabareterror import CabaretError
from platinumegg.app.cabaret.util.db_util import ModelRequestMgr
from platinumegg.app.cabaret.util import db_util
import settings_sub
import urllib
from platinumegg.lib.opensocial.util import OSAUtil
from platinumegg.app.cabaret.models.Card import Card
class Handler(EvolutionHandler):
"""進化合成実行.
"""
@classmethod
def getViewerPlayerClassList(cls):
return [PlayerGold, PlayerDeck]
def procBench(self):
v_player = self.getViewerPlayer()
uid = v_player.id
self.__baseid = Card.makeID(uid, 11)
self.__materialid = Card.makeID(uid, 12)
def process(self):
args = self.getUrlArgs('/evolutiondo/')
try:
if settings_sub.IS_BENCH:
requestkey = OSAUtil.makeSessionID()
else:
self.__baseid = int(args.get(0))
self.__materialid = self.getMaterialId()
requestkey = urllib.unquote(args.get(1))
except:
raise CabaretError(u'引数が想定外です', CabaretError.Code.ILLEGAL_ARGS)
v_player = self.getViewerPlayer()
try:
model_mgr = db_util.run_in_transaction(Handler.tr_write, v_player, self.__baseid, self.__materialid, requestkey)
model_mgr.write_end()
except CabaretError,e:
if e.code == CabaretError.Code.ALREADY_RECEIVED:
pass
else:
if settings_sub.IS_LOCAL:
raise CabaretError(u'合成できませんでした.%s' % CabaretError.getCodeString(e.code))
url = UrlMaker.evolution()
self.appRedirect(self.makeAppLinkUrlRedirect(url))
return
url = UrlMaker.evolutionanim()
if settings_sub.IS_BENCH:
self.response.set_status(200)
self.response.send()
else:
self.appRedirect(self.makeAppLinkUrlRedirect(url))
@staticmethod
def tr_write(v_player, basecardid, materialcardid, key):
"""書き込み.
"""
model_mgr = ModelRequestMgr()
BackendApi.tr_evolution_do(model_mgr, v_player, basecardid, materialcardid, key)
model_mgr.write_all()
return model_mgr
def main(request):
return Handler.run(request)
| [
"[email protected]"
] | |
f0a2557bcbcb8ad398c5927172e5d6cba1dc2da0 | da199a7ff8bcc7a37efe2ac9036b785bf45c71c0 | /service_mds/lun_inactive.py | 0114f0829c74333e4b42bcfcf171b7d1b4f7836d | [] | no_license | saxisuer/smartmgr-v2 | f8ed495ce7ce940477f27c12980bfd159bc159c3 | 6e3895062d37b6815a0d6de031652048b8f22ad3 | refs/heads/master | 2021-01-15T21:24:56.622142 | 2017-07-24T14:35:17 | 2017-07-24T14:35:17 | 99,865,861 | 0 | 2 | null | 2017-08-10T01:03:19 | 2017-08-10T01:03:19 | null | UTF-8 | Python | false | false | 3,502 | py | # -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
from pdsframe import *
from service_mds import g
from service_mds import common
import message.pds_pb2 as msg_pds
import message.mds_pb2 as msg_mds
class LunInactiveMachine(BaseMachine):
__metaclass__ = MataMachine
MID = msg_mds.LUN_INACTIVE_REQUEST
def INIT(self, request):
self.response = MakeResponse(msg_mds.LUN_INACTIVE_RESPONSE, request)
self.request = request
self.request_body = request.body.Extensions[msg_mds.lun_inactive_request]
if g.is_ready == False:
self.response.rc.retcode = msg_mds.RC_MDS_SERVICE_IS_NOT_READY
self.response.rc.message = "MDS service is not ready"
self.SendResponse(self.response)
return MS_FINISH
items = self.request_body.lun_name.split("_")
if len(items) != 2 or items[0] != g.node_info.node_name:
self.response.rc.retcode = msg_mds.RC_MDS_LUN_NOT_EXIST
self.response.rc.message = "Lun '%s' is not exist" % self.request_body.lun_name
self.SendResponse(self.response)
return MS_FINISH
lun_name = items[1]
lun_info = common.GetLunInfoByName(lun_name)
if lun_info == None:
self.response.rc.retcode = msg_mds.RC_MDS_LUN_NOT_EXIST
self.response.rc.message = "Lun %s not exist" % (self.request_body.lun_name)
self.SendResponse(self.response)
return MS_FINISH
if lun_info.asm_status == "INACTIVE":
self.response.rc.retcode = msg_mds.RC_MDS_LUN_INACTIVE_NOT_ALLOWED
self.response.rc.message = "Lun %s already inactive state" % self.request_body.lun_name
self.SendResponse(self.response)
return MS_FINISH
if lun_info.asm_status != "ACTIVE":
self.response.rc.retcode = msg_mds.RC_MDS_LUN_INACTIVE_NOT_ALLOWED
self.response.rc.message = "Lun %s not active state, please active first!" % self.request_body.lun_name
self.SendResponse(self.response)
return MS_FINISH
self.database_node_list = [node_info for node_info in g.nsnode_list.nsnode_infos if node_info.sys_mode != "storage"]
self.mds_database_request = MakeRequest(msg_mds.ASMDISK_OFFLINE_REQUEST)
asmdisk_info = common.GetASMDiskInfoByLunName(self.request_body.lun_name)
self.mds_database_request.body.Extensions[msg_mds.asmdisk_offline_request].asmdisk_name = asmdisk_info.asmdisk_name
# 先向第一个计算节点发送请求
self.request_num = 1
return self.send_asm_request()
def send_asm_request(self):
node_info = self.database_node_list[self.request_num-1]
self.SendRequest(node_info.listen_ip, node_info.listen_port, self.mds_database_request, self.Entry_LunInactive)
return MS_CONTINUE
def Entry_LunInactive(self, response):
if response.rc.retcode != msg_pds.RC_SUCCESS:
# 向另外的计算节点发送请求,全部失败才返回
if self.request_num < len(self.database_node_list):
self.request_num += 1
return self.send_asm_request()
else:
self.response.rc.CopyFrom(response.rc)
self.SendResponse(self.response)
return MS_FINISH
self.response.rc.retcode = msg_pds.RC_SUCCESS
self.SendResponse(self.response)
return MS_FINISH
| [
"[email protected]"
] | |
8ffd0e1b3034be62335188db3ccdd16b0c58540c | 38ac429d63369922e12e19cdda042b08b8123027 | /swagger_client/models/json_sort_field_find_attribute_types_request.py | d563a4b2286f13ff3b7dce9832b2c61c8a9565ad | [] | no_license | aviv-julienjehannet/collibra_apiclient | 0dfebe5df2eb929645b87eba42fab4c06ff0a6be | 10a89e7acaf56ab8c7417698cd12616107706b6b | refs/heads/master | 2021-09-12T16:52:19.803624 | 2018-04-19T01:35:20 | 2018-04-19T01:35:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,616 | py | # coding: utf-8
"""
\"Data Governance Center: REST API v2\"
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 2.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class JsonSortFieldFindAttributeTypesRequest(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
allowed enum values
"""
NAME = "NAME"
KIND = "KIND"
STATISTICS_ENABLED = "STATISTICS_ENABLED"
IS_INTEGER = "IS_INTEGER"
ALLOWED_VALUES = "ALLOWED_VALUES"
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
}
attribute_map = {
}
def __init__(self): # noqa: E501
"""JsonSortFieldFindAttributeTypesRequest - a model defined in Swagger""" # noqa: E501
self.discriminator = None
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, JsonSortFieldFindAttributeTypesRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
678cc4f6459ab6e34107a402ad11f6583bb5612e | fe1d717382b175baeea1e0186fcdefad93e086f3 | /arg_greet.py | db7606cbb722b48a2b8c71cf76fca72b6184e4cc | [] | no_license | jiwon73/lecture_2 | 19669014c7398a0f692e9a20302cb798fc67b438 | 4ee4f8badb74da6b852a09b8ccfad6eefcc73d3c | refs/heads/master | 2022-11-19T16:56:53.074924 | 2020-07-15T08:52:08 | 2020-07-15T08:52:08 | 279,532,343 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 155 | py | def greet(*names):
for name in names:
print('안녕하세요',name,'씨')
greet('홍길동','양민춘','이순신')
greet('James','Thomas')
| [
"[email protected]"
] | |
27307bfe340d36b6bb9ee49fcbd2dc75bc39a97f | bd9d75816e6bb174c2b9e443492096339e3f90e3 | /sympy/mpmath/tests/test_rootfinding.py | f6221ada8ef6e46460c9edab405b2b624bc71af7 | [
"BSD-3-Clause"
] | permissive | Rezaian-ma/sympy | ae800f0f1420f2cdbef1e4535e44f5cd47c9d8b0 | 7d8d096215c8f65ba1d4a9c09af78ec0c3844518 | refs/heads/master | 2021-12-03T01:17:38.048732 | 2010-02-14T05:53:55 | 2010-02-14T05:53:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,497 | py | from sympy.mpmath import *
from sympy.mpmath.optimization import *
def test_findroot():
# old tests, assuming secant
mp.dps = 15
assert findroot(lambda x: 4*x-3, mpf(5)).ae(0.75)
assert findroot(sin, mpf(3)).ae(pi)
assert findroot(sin, (mpf(3), mpf(3.14))).ae(pi)
assert findroot(lambda x: x*x+1, mpc(2+2j)).ae(1j)
# test all solvers with 1 starting point
f = lambda x: cos(x)
for solver in [Newton, Secant, MNewton, Muller, ANewton]:
x = findroot(f, 2., solver=solver)
assert abs(f(x)) < eps
# test all solvers with interval of 2 points
for solver in [Secant, Muller, Bisection, Illinois, Pegasus, Anderson,
Ridder]:
x = findroot(f, (1., 2.), solver=solver)
assert abs(f(x)) < eps
# test types
f = lambda x: (x - 2)**2
assert isinstance(findroot(f, 1, force_type=mpf, tol=1e-10), mpf)
assert isinstance(findroot(f, 1., force_type=None, tol=1e-10), float)
assert isinstance(findroot(f, 1, force_type=complex, tol=1e-10), complex)
def test_mnewton():
f = lambda x: polyval([1,3,3,1],x)
x = findroot(f, -0.9, solver='mnewton')
assert abs(f(x)) < eps
def test_anewton():
f = lambda x: (x - 2)**100
x = findroot(f, 1., solver=ANewton)
assert abs(f(x)) < eps
def test_muller():
f = lambda x: (2 + x)**3 + 2
x = findroot(f, 1., solver=Muller)
assert abs(f(x)) < eps
def test_multiplicity():
for i in xrange(1, 5):
assert multiplicity(lambda x: (x - 1)**i, 1) == i
assert multiplicity(lambda x: x**2, 1) == 0
def test_multidimensional():
def f(*x):
return [3*x[0]**2-2*x[1]**2-1, x[0]**2-2*x[0]+x[1]**2+2*x[1]-8]
assert mnorm(jacobian(f, (1,-2)) - matrix([[6,8],[0,-2]]),1) < 1.e-7
for x, error in MDNewton(f, (1,-2), verbose=0,
norm=lambda x: norm(x, inf)):
pass
assert norm(f(*x), 2) < 1e-14
# The Chinese mathematician Zhu Shijie was the very first to solve this
# nonlinear system 700 years ago
f1 = lambda x, y: -x + 2*y
f2 = lambda x, y: (x**2 + x*(y**2 - 2) - 4*y) / (x + 4)
f3 = lambda x, y: sqrt(x**2 + y**2)
def f(x, y):
f1x = f1(x, y)
return (f2(x, y) - f1x, f3(x, y) - f1x)
x = findroot(f, (10, 10))
assert [int(round(i)) for i in x] == [3, 4]
def test_trivial():
assert findroot(lambda x: 0, 1) == 1
assert findroot(lambda x: x, 0) == 0
#assert findroot(lambda x, y: x + y, (1, -1)) == (1, -1)
| [
"[email protected]"
] | |
c306066a382072689bc5aec0380668d5f0faeed0 | 3b802edba5b97a4e97290be657395cd7635f5d35 | /neoman/worker.py | 8b960c99356d6fd8f2bc7ce851f4f9e769264c14 | [
"BSD-2-Clause"
] | permissive | moreati/yubikey-neo-manager | a7678fafbf8f88b29482caa843092f7598b6725c | b0fa3cdf5331bf1504e2744790caddff52b551f6 | refs/heads/master | 2021-01-19T06:55:58.099823 | 2015-06-04T11:30:53 | 2015-06-04T11:30:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,363 | py | # Copyright (c) 2013 Yubico AB
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from PySide import QtGui, QtCore, QtNetwork
from functools import partial
from neoman import messages as m
class _Event(QtCore.QEvent):
EVENT_TYPE = QtCore.QEvent.Type(QtCore.QEvent.registerEventType())
def __init__(self, callback):
super(_Event, self).__init__(_Event.EVENT_TYPE)
self.callback = callback
class QtWorker(QtCore.QObject):
_work_signal = QtCore.Signal(tuple)
_work_done = QtCore.Signal(object)
_work_done_0 = QtCore.Signal()
def __init__(self, window):
super(QtWorker, self).__init__()
self.window = window
self.busy = QtGui.QProgressDialog('', None, 0, 0, window)
self.busy.setWindowTitle(m.wait)
self.busy.setWindowModality(QtCore.Qt.WindowModal)
self.busy.setMinimumDuration(0)
self.busy.setWindowFlags(self.busy.windowFlags()
^ QtCore.Qt.WindowContextHelpButtonHint)
self.busy.setAutoClose(True)
self.work_thread = QtCore.QThread()
self.moveToThread(self.work_thread)
self.work_thread.start()
self._work_signal.connect(self.work)
self._work_done_0.connect(self.busy.reset)
self._manager = QtNetwork.QNetworkAccessManager()
self._manager.finished.connect(self._work_done_0)
self._manager.finished.connect(self._dl_done)
def post(self, title, fn, callback=None):
self.busy.setLabelText(title)
self.busy.show()
self.post_bg(fn, callback)
def post_bg(self, fn, callback=None):
self._work_signal.emit((fn, callback))
def download(self, url, callback=None):
self.busy.setLabelText(m.downloading_file)
self.busy.show()
self.download_bg(url, callback)
def download_bg(self, url, callback=None):
url = QtCore.QUrl(url)
request = QtNetwork.QNetworkRequest(url)
response = self._manager.get(request)
self._dl = (request, response, callback)
def _dl_error(self):
(req, resp, callback) = self._dl
del self._dl
if callback:
event = _Event(partial(callback, resp.error()))
QtGui.QApplication.postEvent(self.window, event)
def _dl_done(self):
(req, resp, callback) = self._dl
del self._dl
if callback:
result = resp.error()
if result is QtNetwork.QNetworkReply.NoError:
result = resp.readAll()
resp.close()
event = _Event(partial(callback, result))
QtGui.QApplication.postEvent(self.window, event)
@QtCore.Slot(tuple)
def work(self, job):
QtCore.QThread.msleep(10) # Needed to yield
(fn, callback) = job
try:
result = fn()
except Exception as e:
result = e
if callback:
event = _Event(partial(callback, result))
QtGui.QApplication.postEvent(self.window, event)
self._work_done_0.emit()
Worker = QtWorker
| [
"[email protected]"
] | |
771df2d2ac822d8885a18f74c6dc9f8bae1bf489 | ee721fac058d6c0472be24f95e3cc8df37f4198d | /Stack/reverse.py | 4028f27a5effe0e1e23e87a5d35cf04f3f7f0712 | [] | no_license | Horlawhumy-dev/Python_DataStructures | 51af03dcbed86a51009c13657b17584f09d0a40d | c5aad1fe6c6566414c76711a0871abf9529fe04f | refs/heads/master | 2023-06-04T09:32:34.776313 | 2021-07-02T21:43:09 | 2021-07-02T21:43:09 | 377,631,264 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 823 | py |
# this program reverses what string inputs given by user
class Stack():
def __init__(self):
self.items = []
def is_empty(self):
return self.items == []
def push(self, item):
self.items.append(item)
def pop(self):
return self.items.pop()
def peek(self):
if not self.is_empty():
return self.items[-1]
return 'Stack is empty!'
def get_items(self):
return self.items
# input from user
word = input('Enter your word: ')
# Initializing stack object
stack = Stack()
# function reversing a given word
def reverse_str(word):
for i in range(len(word)):
stack.push(word[i])
rev = " "
arr = stack.get_items()
while len(arr) > 0:
rev += arr.pop()
return rev
print(reverse_str(word))
| [
"[email protected]"
] | |
ec213dd72742d92bdd997b79ef064fedbc9c7506 | e5b4ed93d6666e195e96a265d3e7cfe4243a7300 | /pbase/day13/code/mypack/games/supermario.py | b0979a018d00553c62cb30752ba30cf8fc1721cc | [] | no_license | Spider251/python | 934f5b8b923c2b61186a6df8445957290e5c4c74 | 8b1931f862e1d5c29fed9af624bcac94c1d25755 | refs/heads/master | 2020-04-05T11:58:04.558098 | 2018-11-09T12:06:06 | 2018-11-09T12:06:06 | 156,852,553 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 125 | py | # file: mypack/games/supermario.py
def play():
print("正在玩 超级玛丽")
print("超级玛丽模块被加载") | [
"[email protected]"
] | |
ed3c30613036feb38c28bf2cee2a563c2faa8cc0 | 8f26514c451e2398d5e3688c184ea74d1dad21b2 | /month_01/day_05/exercise_02.py | 8d6e663126992c62ca4ef5f413334a35d88be2ec | [] | no_license | CircularWorld/Python_exercise | 25e7aebe45b4d2ee4e3e3afded082c56483117de | 96d4d9c5c626f418803f44584c5350b7ce514368 | refs/heads/master | 2022-11-21T07:29:39.054971 | 2020-07-20T10:12:24 | 2020-07-20T10:12:24 | 281,081,559 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,094 | py | '''
字符串: content = "我是京师监狱狱长金海。"
打印第一个字符、打印最后一个字符、打印中间字符
打印字前三个符、打印后三个字符
命题:金海在字符串content中
命题:京师监狱不在字符串content中
通过切片打印“京师监狱狱长”
通过切片打印“长狱狱监师京”
通过切片打印“我师狱海”
倒序打印字符
'''
content = "我是京师监狱狱长金海。"
# 打印第一个字符、打印最后一个字符、打印中间字符
# 打印字前三个符、打印后三个字符
print(content[0],content[-1],content[len(content)//2])
print(content[:3],content[-3:])
# 命题:金海在字符串content中
# 命题:京师监狱不在字符串content中
print('金海'in content)
print('京师监狱' not in content)
# 通过切片打印“京师监狱狱长”
# 通过切片打印“长狱狱监师京”
# 通过切片打印“我师狱海”
# 字符串: content = "我是京师监狱狱长金海。"
print(content[2:-3])
print(content[-4:2:-1])
print(content[::3])
print(content[-1::-1]) | [
"[email protected]"
] | |
cc34be56b4526ad16bfdbf503a373b9a3f5a56a3 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03637/s505835119.py | 2e2c10f3045b463cc3d59b8aad36d02afaeae057 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 215 | py | N=int(input())
a=list(map(int,input().split()))
n4,n2=0,0
for i in range(N):
if a[i] % 4 == 0:n4 += 1
elif a[i] % 2 == 0:n2 += 1
if n4 >= N//2:print('Yes')
elif n4*2 + n2 >= N:print('Yes')
else:print('No') | [
"[email protected]"
] | |
191126e94cbf396eb4cb7f58ebd051eaa21c55b3 | 082782cfbd0d8ac77c0ec3901a9de1c1e748405a | /sutorbank/settings.py | 7813c4759f3668de0041e98d18074ace7b3d9d84 | [] | no_license | daniel-kanchev/sutorbank | 1c1eb020f86ff58f5a3edc2d1c6971e8d66a390d | 0738ec698f5711a9ceeb59e0a683a853a3bf8979 | refs/heads/main | 2023-03-17T17:55:50.537998 | 2021-03-17T14:47:37 | 2021-03-17T14:47:37 | 348,746,138 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 363 | py | BOT_NAME = 'sutorbank'
SPIDER_MODULES = ['sutorbank.spiders']
NEWSPIDER_MODULE = 'sutorbank.spiders'
USER_AGENT = 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:66.0) Gecko/20100101 Firefox/66.0',
ITEM_PIPELINES = {
'sutorbank.pipelines.DatabasePipeline': 300,
}
FEED_EXPORT_ENCODING = 'utf-8'
ROBOTSTXT_OBEY = True
LOG_LEVEL = 'WARNING'
# LOG_LEVEL = 'DEBUG'
| [
"[email protected]"
] | |
b734da395f91fb51745ae74515623e919ce896ee | 2f2667682bb78578445b9e3aac7cc62cfba83d5a | /googlenet/SavedModel_to_trt.py | 41b4f1216a144aa9b441b032c1fc82fe4ca0799b | [] | no_license | Yorwxue/trt_experence | 9c770c2a1cb7c48c9d7f21c46be0107de91f1c41 | 778a6cef019dd8afdae6b608b3cbacb56480c7b1 | refs/heads/master | 2022-12-21T12:38:13.108402 | 2019-08-01T08:11:10 | 2019-08-01T08:11:10 | 195,760,238 | 0 | 0 | null | 2022-12-08T05:57:26 | 2019-07-08T07:36:12 | Python | UTF-8 | Python | false | false | 4,722 | py | # from SavedModel to trt graph
import os
import time
import numpy as np
import tensorflow as tf
import tensorflow.contrib.tensorrt as trt
from tensorflow.examples.tutorials.mnist import input_data
from googlenet.checkpoint_to_SavedModel import image_web_saved_encode
def directory_create(directory):
if not os.path.exists(directory):
os.makedirs(directory)
SavedModel_dir = "./SavedModel/cnn_model/"
SavedModel_path = os.path.join(SavedModel_dir, str(len(os.listdir(SavedModel_dir))-2))
model_tag = "serve" # can be queried by saved_model_cli
summaries_dir = "./trt_model/cnn_model/tensorboard/"
directory_create(summaries_dir)
trt_export_model_dir = "./trt_model/cnn_model/"
trt_export_model_dir = os.path.join(trt_export_model_dir, str(len(os.listdir(trt_export_model_dir))-1))
batch_size = 1
max_GPU_mem_size_for_TRT = 2 << 20
# preparing dataset
# """
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
x_train = mnist.train.images
y_train = mnist.train.labels
x_test = mnist.test.images
y_test = mnist.test.labels
# reshape from 784 to 28*28
# x_train = np.reshape(x_train, [x_train.shape[0], 28, 28, 1])
x_test = np.reshape(x_test, [x_test.shape[0], 28, 28, 1])
# base64 encode
# x_train = [image_web_saved_encode(np.concatenate([image, image, image], axis=2)*255) for image in list(x_train)]
x_test = [image_web_saved_encode(np.concatenate([image, image, image], axis=2) * 255) for image in list(x_test)]
# """
# Inference with TF-TRT `SavedModel` workflow:
# """
graph = tf.Graph()
with graph.as_default():
tfconfig = tf.ConfigProto()
tfconfig.gpu_options.allow_growth = True # maybe necessary
tfconfig.allow_soft_placement = True # maybe necessary
with tf.Session(config=tfconfig) as sess:
# Create a TensorRT inference graph from a SavedModel:
trt_graph = trt.create_inference_graph(
input_graph_def=None,
outputs=None,
# is_dynamic_op=True,
input_saved_model_dir=SavedModel_path,
input_saved_model_tags=[model_tag],
max_batch_size=batch_size,
max_workspace_size_bytes=max_GPU_mem_size_for_TRT,
precision_mode="FP32",
# use_calibration=False, # set False when using INT8
# The following command will create a directory automatically,
# and you must notice that "output_saved_model_dir" need to specific a path without point to any directory
output_saved_model_dir=trt_export_model_dir
)
# Import the TensorRT graph into a new graph and run:
output_node = tf.import_graph_def(
trt_graph,
return_elements=["logits:0"]
)
trt_engine_ops = [n.name for n in trt_graph.node if str(n.op) == 'TRTEngineOp']
print("Number of trt op: %d" % len(trt_engine_ops))
print(trt_engine_ops)
# warm up
print("warm up")
for i in range(5):
prob = sess.run(output_node, {
"import/image_strings:0": [x_test[0]] * batch_size,
"import/image_shapes:0": [(28, 28, 3)] * batch_size
})
print("counter start")
START_TIME = time.time()
prob = sess.run(output_node, feed_dict={
"import/image_strings:0": [x_test[0]] * batch_size,
"import/image_shapes:0": [(28, 28, 3)] * batch_size
})
print("spent %f seconds" % (time.time() - START_TIME))
test_idx = 0
print("label: %d, prediction: %d" % (np.argmax(y_test[test_idx]), np.argmax(prob[0])))
# write graph
merged = tf.summary.merge_all()
train_writer = tf.summary.FileWriter(summaries_dir, trt_graph)
# """
# Inference with TF-TRT frozen graph workflow:
"""
graph = tf.Graph()
with graph.as_default():
with tf.Session() as sess:
# First deserialize your frozen graph:
frozen_model_path = os.path.join(frozen_model_dir, 'frozen_model.pb')
with tf.gfile.GFile(frozen_model_path, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
# Now you can create a TensorRT inference graph from your
# frozen graph:
trt_graph = trt.create_inference_graph(
input_graph_def=graph_def,
outputs=["probs:0"],
max_batch_size=batch_size,
max_workspace_size_bytes=max_GPU_mem_size_for_TRT,
precision_mode="FP32")
# Import the TensorRT graph into a new graph and run:
output_node = tf.import_graph_def(
trt_graph,
return_elements=["probs:0"])
sess.run(output_node, feed_dict={
"image_batch:0": img1
})
# """
| [
"[email protected]"
] | |
05d5bbe7b2195d31cb3a4e49a9314e81afe7450c | f8d9f893a7afa667a9b615742019cd5c52ee2c59 | /core/platform/taskqueue/dev_mode_taskqueue_services_test.py | 77e45c75f94fed4aa4120ec85940c3c7e56c064a | [
"Apache-2.0"
] | permissive | FareesHussain/oppia | 2ac6c48aaea6a70452b79d665995f6ba6560f70d | 2862b7da750ce332c975b64237791f96189d7aa8 | refs/heads/develop | 2023-08-17T19:25:05.551048 | 2021-10-01T10:36:36 | 2021-10-01T10:36:36 | 323,160,532 | 2 | 0 | Apache-2.0 | 2020-12-20T20:38:45 | 2020-12-20T20:38:44 | null | UTF-8 | Python | false | false | 4,935 | py | # coding: utf-8
#
# Copyright 2020 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for methods in the dev_mode_taskqueue_services."""
from __future__ import absolute_import
from __future__ import unicode_literals
import datetime
from core import feconf
from core.domain import taskqueue_services
from core.platform.taskqueue import dev_mode_taskqueue_services
from core.tests import test_utils
import requests
from typing import Any, Dict, Optional
class DevModeTaskqueueServicesUnitTests(test_utils.TestBase):
"""Tests for dev_mode_taskqueue_services."""
def test_creating_dev_mode_task_will_create_the_correct_post_request(
self
) -> None:
correct_queue_name = 'dummy_queue'
dummy_url = '/dummy_handler'
correct_payload = {
'fn_identifier': (
taskqueue_services.FUNCTION_ID_DELETE_EXPS_FROM_USER_MODELS),
'args': [['1', '2', '3']],
'kwargs': {}
}
correct_task_name = 'task1'
# In the type annotation below, payload is of type Dict[str, Any]
# because it mocks the behaviour of
# dev_mode_taskqueue_services.CLIENT.create_task.
def mock_create_task(
queue_name: str,
url: str,
payload: Dict[str, Any],
scheduled_for: Optional[datetime.datetime] = None, # pylint: disable=unused-argument
task_name: Optional[str] = None,
) -> None:
self.assertEqual(queue_name, correct_queue_name)
self.assertEqual(url, dummy_url)
self.assertEqual(payload, correct_payload)
self.assertEqual(task_name, correct_task_name)
swap_create_task = self.swap(
dev_mode_taskqueue_services.CLIENT, 'create_task', mock_create_task)
with swap_create_task:
dev_mode_taskqueue_services.create_http_task(
correct_queue_name, dummy_url, correct_payload,
task_name=correct_task_name)
def test_task_handler_will_create_the_correct_post_request(self) -> None:
queue_name = 'dummy_queue'
dummy_url = '/dummy_handler'
correct_port = dev_mode_taskqueue_services.GOOGLE_APP_ENGINE_PORT
correct_payload = {
'fn_identifier': (
taskqueue_services.FUNCTION_ID_DELETE_EXPS_FROM_USER_MODELS),
'args': [['1', '2', '3']],
'kwargs': {}
}
task_name = 'task1'
correct_headers = {
'X-Appengine-QueueName': queue_name,
'X-Appengine-TaskName': task_name,
'X-Appengine-TaskRetryCount': '0',
'X-Appengine-TaskExecutionCount': '0',
'X-Appengine-TaskETA': '0',
'X-AppEngine-Fake-Is-Admin': '1',
'method': 'POST'
}
# In the type annotation below, we have used Dict[str, Any] for JSON.
# This is because this function mocks requests.post function where the
# type of JSON has been defined Any, hence using Dict[str, Any] here.
# https://github.com/python/typeshed/blob/5e0fc4607323a4657b587bf70e3c26becf1c88d0/stubs/requests/requests/api.pyi#L78
def mock_post(
url: str,
json: Dict[str, Any],
headers: Dict[str, str],
timeout: int
) -> None:
self.assertEqual(
url, 'http://localhost:%s%s' % (
correct_port, dummy_url))
self.assertEqual(json, correct_payload)
self.assertEqual(headers, correct_headers)
self.assertEqual(timeout, feconf.DEFAULT_TASKQUEUE_TIMEOUT_SECONDS)
swap_post = self.swap(requests, 'post', mock_post)
with swap_post:
# I have to test _task_handler by calling it because I cannot
# surround this task handler in a context manager reliably. The
# task_handler is called by a queue thread that is instantiated by
# the Cloud Tasks Emulator which has a non-determistic execution
# time. Creating a task will execute correctly but the program will
# exit the context before actually calling _task_handler().
dev_mode_taskqueue_services._task_handler( # pylint: disable=protected-access
dummy_url, correct_payload, queue_name, task_name=task_name)
| [
"[email protected]"
] | |
d851c1b76ebb72393f7423de98c40690a78c7c5b | e1e3ee617a50c44c7027ebabc3c918797f8daef8 | /sorter.py | 9566fdab8378d1cc2d5fe387c44cc5a9bdb5fec2 | [] | no_license | Kain-Huang/pithy | 179490f6af0d1a77dde015c5570d9d8f75bd3e41 | 6ed323782cad80954f9ab4a6d81726370d7ff53c | refs/heads/master | 2022-01-05T12:12:27.395657 | 2019-05-19T18:25:08 | 2019-05-19T18:25:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 258 | py | import sys
arg = sys.argv[1]
print arg
from commands import getoutput as go
a = arg.split("BREAKKKKK")
dird = ""
fil = a[-1]+".py"
for i in a[:-1]: dird+=i+"/"
go("mkdir -p code/"+dird)
print dird+fil
print go("cp code/%s.py code/%s/%s" % (arg,dird,fil))
| [
"[email protected]"
] | |
c9c89d48c222f86dee205223c3208cf1a0857b72 | ac5e52a3fc52dde58d208746cddabef2e378119e | /exps-mrsp.0/mrsp_ut=3.5_rd=0.5_rw=0.04_rn=4_u=0.075-0.325_p=harmonic-2/sched=RUN_trial=42/params.py | 2298340fefd6fd7ac91681022ad274284fb678f8 | [] | no_license | ricardobtxr/experiment-scripts | 1e2abfcd94fb0ef5a56c5d7dffddfe814752eef1 | 7bcebff7ac2f2822423f211f1162cd017a18babb | refs/heads/master | 2023-04-09T02:37:41.466794 | 2021-04-25T03:27:16 | 2021-04-25T03:27:16 | 358,926,457 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 250 | py | {'cpus': 4,
'duration': 30,
'final_util': '3.512524',
'max_util': '3.5',
'periods': 'harmonic-2',
'release_master': False,
'res_distr': '0.5',
'res_nmb': '4',
'res_weight': '0.04',
'scheduler': 'RUN',
'trial': 42,
'utils': 'uni-medium-3'}
| [
"[email protected]"
] | |
6665b847961f9fbe18e23a6309b0424a0ede5776 | 82b946da326148a3c1c1f687f96c0da165bb2c15 | /sdk/python/pulumi_azure_native/securityinsights/v20190101preview/get_dynamics365_data_connector.py | 2076d0d3c76949d8502ecd1dc8596fbb938831a6 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | morrell/pulumi-azure-native | 3916e978382366607f3df0a669f24cb16293ff5e | cd3ba4b9cb08c5e1df7674c1c71695b80e443f08 | refs/heads/master | 2023-06-20T19:37:05.414924 | 2021-07-19T20:57:53 | 2021-07-19T20:57:53 | 387,815,163 | 0 | 0 | Apache-2.0 | 2021-07-20T14:18:29 | 2021-07-20T14:18:28 | null | UTF-8 | Python | false | false | 5,317 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetDynamics365DataConnectorResult',
'AwaitableGetDynamics365DataConnectorResult',
'get_dynamics365_data_connector',
]
@pulumi.output_type
class GetDynamics365DataConnectorResult:
"""
Represents Dynamics365 data connector.
"""
def __init__(__self__, data_types=None, etag=None, id=None, kind=None, name=None, tenant_id=None, type=None):
if data_types and not isinstance(data_types, dict):
raise TypeError("Expected argument 'data_types' to be a dict")
pulumi.set(__self__, "data_types", data_types)
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if kind and not isinstance(kind, str):
raise TypeError("Expected argument 'kind' to be a str")
pulumi.set(__self__, "kind", kind)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if tenant_id and not isinstance(tenant_id, str):
raise TypeError("Expected argument 'tenant_id' to be a str")
pulumi.set(__self__, "tenant_id", tenant_id)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="dataTypes")
def data_types(self) -> 'outputs.Dynamics365DataConnectorDataTypesResponse':
"""
The available data types for the connector.
"""
return pulumi.get(self, "data_types")
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
Etag of the azure resource
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> str:
"""
Azure resource Id
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def kind(self) -> str:
"""
Expected value is 'Dynamics365'.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def name(self) -> str:
"""
Azure resource name
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="tenantId")
def tenant_id(self) -> str:
"""
The tenant id to connect to, and get the data from.
"""
return pulumi.get(self, "tenant_id")
@property
@pulumi.getter
def type(self) -> str:
"""
Azure resource type
"""
return pulumi.get(self, "type")
class AwaitableGetDynamics365DataConnectorResult(GetDynamics365DataConnectorResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetDynamics365DataConnectorResult(
data_types=self.data_types,
etag=self.etag,
id=self.id,
kind=self.kind,
name=self.name,
tenant_id=self.tenant_id,
type=self.type)
def get_dynamics365_data_connector(data_connector_id: Optional[str] = None,
operational_insights_resource_provider: Optional[str] = None,
resource_group_name: Optional[str] = None,
workspace_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetDynamics365DataConnectorResult:
"""
Represents Dynamics365 data connector.
:param str data_connector_id: Connector ID
:param str operational_insights_resource_provider: The namespace of workspaces resource provider- Microsoft.OperationalInsights.
:param str resource_group_name: The name of the resource group within the user's subscription. The name is case insensitive.
:param str workspace_name: The name of the workspace.
"""
__args__ = dict()
__args__['dataConnectorId'] = data_connector_id
__args__['operationalInsightsResourceProvider'] = operational_insights_resource_provider
__args__['resourceGroupName'] = resource_group_name
__args__['workspaceName'] = workspace_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:securityinsights/v20190101preview:getDynamics365DataConnector', __args__, opts=opts, typ=GetDynamics365DataConnectorResult).value
return AwaitableGetDynamics365DataConnectorResult(
data_types=__ret__.data_types,
etag=__ret__.etag,
id=__ret__.id,
kind=__ret__.kind,
name=__ret__.name,
tenant_id=__ret__.tenant_id,
type=__ret__.type)
| [
"[email protected]"
] | |
2c81ad50a51119c1b403adffc535dc8e0b3e962b | 82b946da326148a3c1c1f687f96c0da165bb2c15 | /sdk/python/pulumi_azure_native/insights/data_collection_rule.py | 62121e6c6a62bf9f01862f7d7c4774bc76a24ffb | [
"Apache-2.0",
"BSD-3-Clause"
] | permissive | morrell/pulumi-azure-native | 3916e978382366607f3df0a669f24cb16293ff5e | cd3ba4b9cb08c5e1df7674c1c71695b80e443f08 | refs/heads/master | 2023-06-20T19:37:05.414924 | 2021-07-19T20:57:53 | 2021-07-19T20:57:53 | 387,815,163 | 0 | 0 | Apache-2.0 | 2021-07-20T14:18:29 | 2021-07-20T14:18:28 | null | UTF-8 | Python | false | false | 17,724 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['DataCollectionRuleArgs', 'DataCollectionRule']
@pulumi.input_type
class DataCollectionRuleArgs:
def __init__(__self__, *,
resource_group_name: pulumi.Input[str],
data_collection_rule_name: Optional[pulumi.Input[str]] = None,
data_flows: Optional[pulumi.Input[Sequence[pulumi.Input['DataFlowArgs']]]] = None,
data_sources: Optional[pulumi.Input['DataCollectionRuleDataSourcesArgs']] = None,
description: Optional[pulumi.Input[str]] = None,
destinations: Optional[pulumi.Input['DataCollectionRuleDestinationsArgs']] = None,
kind: Optional[pulumi.Input[Union[str, 'KnownDataCollectionRuleResourceKind']]] = None,
location: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a DataCollectionRule resource.
:param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.
:param pulumi.Input[str] data_collection_rule_name: The name of the data collection rule. The name is case insensitive.
:param pulumi.Input[Sequence[pulumi.Input['DataFlowArgs']]] data_flows: The specification of data flows.
:param pulumi.Input['DataCollectionRuleDataSourcesArgs'] data_sources: The specification of data sources.
This property is optional and can be omitted if the rule is meant to be used via direct calls to the provisioned endpoint.
:param pulumi.Input[str] description: Description of the data collection rule.
:param pulumi.Input['DataCollectionRuleDestinationsArgs'] destinations: The specification of destinations.
:param pulumi.Input[Union[str, 'KnownDataCollectionRuleResourceKind']] kind: The kind of the resource.
:param pulumi.Input[str] location: The geo-location where the resource lives.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
"""
pulumi.set(__self__, "resource_group_name", resource_group_name)
if data_collection_rule_name is not None:
pulumi.set(__self__, "data_collection_rule_name", data_collection_rule_name)
if data_flows is not None:
pulumi.set(__self__, "data_flows", data_flows)
if data_sources is not None:
pulumi.set(__self__, "data_sources", data_sources)
if description is not None:
pulumi.set(__self__, "description", description)
if destinations is not None:
pulumi.set(__self__, "destinations", destinations)
if kind is not None:
pulumi.set(__self__, "kind", kind)
if location is not None:
pulumi.set(__self__, "location", location)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group. The name is case insensitive.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="dataCollectionRuleName")
def data_collection_rule_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the data collection rule. The name is case insensitive.
"""
return pulumi.get(self, "data_collection_rule_name")
@data_collection_rule_name.setter
def data_collection_rule_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "data_collection_rule_name", value)
@property
@pulumi.getter(name="dataFlows")
def data_flows(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['DataFlowArgs']]]]:
"""
The specification of data flows.
"""
return pulumi.get(self, "data_flows")
@data_flows.setter
def data_flows(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['DataFlowArgs']]]]):
pulumi.set(self, "data_flows", value)
@property
@pulumi.getter(name="dataSources")
def data_sources(self) -> Optional[pulumi.Input['DataCollectionRuleDataSourcesArgs']]:
"""
The specification of data sources.
This property is optional and can be omitted if the rule is meant to be used via direct calls to the provisioned endpoint.
"""
return pulumi.get(self, "data_sources")
@data_sources.setter
def data_sources(self, value: Optional[pulumi.Input['DataCollectionRuleDataSourcesArgs']]):
pulumi.set(self, "data_sources", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
Description of the data collection rule.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def destinations(self) -> Optional[pulumi.Input['DataCollectionRuleDestinationsArgs']]:
"""
The specification of destinations.
"""
return pulumi.get(self, "destinations")
@destinations.setter
def destinations(self, value: Optional[pulumi.Input['DataCollectionRuleDestinationsArgs']]):
pulumi.set(self, "destinations", value)
@property
@pulumi.getter
def kind(self) -> Optional[pulumi.Input[Union[str, 'KnownDataCollectionRuleResourceKind']]]:
"""
The kind of the resource.
"""
return pulumi.get(self, "kind")
@kind.setter
def kind(self, value: Optional[pulumi.Input[Union[str, 'KnownDataCollectionRuleResourceKind']]]):
pulumi.set(self, "kind", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
The geo-location where the resource lives.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
class DataCollectionRule(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
data_collection_rule_name: Optional[pulumi.Input[str]] = None,
data_flows: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['DataFlowArgs']]]]] = None,
data_sources: Optional[pulumi.Input[pulumi.InputType['DataCollectionRuleDataSourcesArgs']]] = None,
description: Optional[pulumi.Input[str]] = None,
destinations: Optional[pulumi.Input[pulumi.InputType['DataCollectionRuleDestinationsArgs']]] = None,
kind: Optional[pulumi.Input[Union[str, 'KnownDataCollectionRuleResourceKind']]] = None,
location: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
"""
Definition of ARM tracked top level resource.
API Version: 2019-11-01-preview.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] data_collection_rule_name: The name of the data collection rule. The name is case insensitive.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['DataFlowArgs']]]] data_flows: The specification of data flows.
:param pulumi.Input[pulumi.InputType['DataCollectionRuleDataSourcesArgs']] data_sources: The specification of data sources.
This property is optional and can be omitted if the rule is meant to be used via direct calls to the provisioned endpoint.
:param pulumi.Input[str] description: Description of the data collection rule.
:param pulumi.Input[pulumi.InputType['DataCollectionRuleDestinationsArgs']] destinations: The specification of destinations.
:param pulumi.Input[Union[str, 'KnownDataCollectionRuleResourceKind']] kind: The kind of the resource.
:param pulumi.Input[str] location: The geo-location where the resource lives.
:param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: DataCollectionRuleArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Definition of ARM tracked top level resource.
API Version: 2019-11-01-preview.
:param str resource_name: The name of the resource.
:param DataCollectionRuleArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(DataCollectionRuleArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
data_collection_rule_name: Optional[pulumi.Input[str]] = None,
data_flows: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['DataFlowArgs']]]]] = None,
data_sources: Optional[pulumi.Input[pulumi.InputType['DataCollectionRuleDataSourcesArgs']]] = None,
description: Optional[pulumi.Input[str]] = None,
destinations: Optional[pulumi.Input[pulumi.InputType['DataCollectionRuleDestinationsArgs']]] = None,
kind: Optional[pulumi.Input[Union[str, 'KnownDataCollectionRuleResourceKind']]] = None,
location: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = DataCollectionRuleArgs.__new__(DataCollectionRuleArgs)
__props__.__dict__["data_collection_rule_name"] = data_collection_rule_name
__props__.__dict__["data_flows"] = data_flows
__props__.__dict__["data_sources"] = data_sources
__props__.__dict__["description"] = description
__props__.__dict__["destinations"] = destinations
__props__.__dict__["kind"] = kind
__props__.__dict__["location"] = location
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["tags"] = tags
__props__.__dict__["etag"] = None
__props__.__dict__["immutable_id"] = None
__props__.__dict__["name"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:insights:DataCollectionRule"), pulumi.Alias(type_="azure-native:insights/v20191101preview:DataCollectionRule"), pulumi.Alias(type_="azure-nextgen:insights/v20191101preview:DataCollectionRule"), pulumi.Alias(type_="azure-native:insights/v20210401:DataCollectionRule"), pulumi.Alias(type_="azure-nextgen:insights/v20210401:DataCollectionRule")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(DataCollectionRule, __self__).__init__(
'azure-native:insights:DataCollectionRule',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'DataCollectionRule':
"""
Get an existing DataCollectionRule resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = DataCollectionRuleArgs.__new__(DataCollectionRuleArgs)
__props__.__dict__["data_flows"] = None
__props__.__dict__["data_sources"] = None
__props__.__dict__["description"] = None
__props__.__dict__["destinations"] = None
__props__.__dict__["etag"] = None
__props__.__dict__["immutable_id"] = None
__props__.__dict__["kind"] = None
__props__.__dict__["location"] = None
__props__.__dict__["name"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["tags"] = None
__props__.__dict__["type"] = None
return DataCollectionRule(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="dataFlows")
def data_flows(self) -> pulumi.Output[Optional[Sequence['outputs.DataFlowResponse']]]:
"""
The specification of data flows.
"""
return pulumi.get(self, "data_flows")
@property
@pulumi.getter(name="dataSources")
def data_sources(self) -> pulumi.Output[Optional['outputs.DataCollectionRuleResponseDataSources']]:
"""
The specification of data sources.
This property is optional and can be omitted if the rule is meant to be used via direct calls to the provisioned endpoint.
"""
return pulumi.get(self, "data_sources")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
Description of the data collection rule.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def destinations(self) -> pulumi.Output[Optional['outputs.DataCollectionRuleResponseDestinations']]:
"""
The specification of destinations.
"""
return pulumi.get(self, "destinations")
@property
@pulumi.getter
def etag(self) -> pulumi.Output[str]:
"""
Resource entity tag (ETag).
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter(name="immutableId")
def immutable_id(self) -> pulumi.Output[str]:
"""
The immutable ID of this data collection rule. This property is READ-ONLY.
"""
return pulumi.get(self, "immutable_id")
@property
@pulumi.getter
def kind(self) -> pulumi.Output[Optional[str]]:
"""
The kind of the resource.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
The geo-location where the resource lives.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The resource provisioning state.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The type of the resource.
"""
return pulumi.get(self, "type")
| [
"[email protected]"
] | |
a6f320488fbfcac32b54d57d57061287558b662e | 89e40bf548403e440c230e06fa6301021ec8b0c7 | /sw_expert_academy/D2/p1946.py | 12221cf2e3b7cf1e668cdf6f634c66ecca732743 | [] | no_license | TaeJuneJoung/Algorithm | b9cf5724501918c7302099b8194d26bd19512bd0 | ecc2934a376c91ecec8bfd15af377d8a2973d71d | refs/heads/master | 2020-06-19T13:50:14.720987 | 2019-08-04T14:35:43 | 2019-08-04T14:35:43 | 196,732,653 | 0 | 0 | null | 2019-08-04T14:35:44 | 2019-07-13T14:46:42 | Python | UTF-8 | Python | false | false | 717 | py | """
[1946.간단한 압축 풀기]
10개가 찍히면 다음줄로 내려가는 처리가 중요한 문제
또한, 테스트케이스가 하나 끝나면 한줄을 내려줘야한다
T : 테스트케이스
N : 받는 갯수
sum_num : 10개씩 띄어주기 위해서 사용
value : string타입의 값
num : value가 나오는 횟수
"""
T = int(input())
for t in range(1, T+1):
print("#{}".format(t))
N = int(input())
sum_num = 0
for i in range(N):
value, num = map(str, input().split())
num = int(num)
for j in range(num):
print(value, end="")
sum_num += 1
if sum_num == 10:
sum_num = 0
print()
print() | [
"[email protected]"
] | |
79ea21858064c500d5f2adf83982fe2f10cbeafd | 04dc3d8883c7b5510610ec3e86e4238606fc1e45 | /tasks/tasks_fetch_currency_exchange.py | 5103de89945a2192d07ba82a923e1b3ed841eb2b | [
"MIT"
] | permissive | xyla-io/almacen | 72294c6d7758d39ca12c22af174145d716769b82 | 7b7f235dc7939777f971f1b5eadd5621e980c15e | refs/heads/main | 2022-12-28T22:10:46.905278 | 2020-10-14T19:42:57 | 2020-10-16T19:50:55 | 304,113,749 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,398 | py | import models
from . import base
from config import CompanyConfiguration
from typing import List, Dict, Optional
from jones import FixerAPI
from datetime import datetime
class FetchBaseCurrencyExchangeReportTask(base.FetchReportTask):
api: Optional[FixerAPI]
min_currency_dates_to_fetch: Dict[str, datetime]
max_currency_dates_to_fetch: Dict[str, datetime]
base_currency: str
def __init__(self, base_currency: str, task_set: CompanyConfiguration.TaskSet, identifier_prefix: str):
super().__init__(task_set=task_set, identifier_prefix=identifier_prefix)
self.base_currency = base_currency
self.min_currency_dates_to_fetch = {}
self.max_currency_dates_to_fetch = {}
@property
def task_type(self) -> models.ReportTaskType:
return models.ReportTaskType.fetch_base_currency_exchage_rates
@property
def debug_description(self) -> str:
return '{}: ({} -> {}) — {}'.format(
self.company_display_name,
self.base_currency,
', '.join(self.currencies),
self.task_type.value
)
@property
def task_identifier_columns(self) -> Dict[str, any]:
return {
'base': self.base_currency,
'target': self.currencies,
}
@property
def currencies(self) -> List[str]:
return self.task_set.config['currency_exchange']['currencies']
@property
def report_table_model(self) -> models.ReportTableModel:
return models.CurrencyExchangeRatesTableModel(schema_name=self.report_table_schema)
@property
def api_credentials_key(self) -> str:
return self.task_set.config['currency_exchange']['credentials_key']
class FetchCurrencyExchangeReportTask(base.CombinedReportTask):
@property
def task_type(self) -> models.ReportTaskType:
return models.ReportTaskType.fetch_currency_exchange_rates
@property
def debug_description(self) -> str:
return '{}: ({}) — {}'.format(
self.company_display_name,
', '.join(self.currencies),
self.task_type.value
)
@property
def currencies(self) -> List[str]:
return self.task_set.config['currency_exchange']['currencies']
def generate_subtasks(self) -> List[base.ReportTask]:
return [
FetchBaseCurrencyExchangeReportTask(
base_currency=c,
task_set=self.task_set,
identifier_prefix='{}.{}-{}'.format(self.identifier, c, '_'.join(self.currencies))
)
for c in self.currencies
]
| [
"[email protected]"
] | |
72afca3f30972d5d9f1f07b20ac5b50b0d4d7a58 | d554b1aa8b70fddf81da8988b4aaa43788fede88 | /5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/226/users/4165/codes/1575_994.py | 1ea93adeac7832332d12b5ce2126e1464ed3bbe4 | [] | no_license | JosephLevinthal/Research-projects | a3bc3ca3b09faad16f5cce5949a2279cf14742ba | 60d5fd6eb864a5181f4321e7a992812f3c2139f9 | refs/heads/master | 2022-07-31T06:43:02.686109 | 2020-05-23T00:24:26 | 2020-05-23T00:24:26 | 266,199,309 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 44 | py | print("Universidade Federal do Amazonas")
| [
"[email protected]"
] | |
cdad717e47a15a103068cedb950db7175e3f5c00 | 34d88082307281333ef4aeeec012a3ff5f8ec06e | /100 python/Q090.py | 57e5bf3e138599fd5fe71f041643aeb9d105c6eb | [] | no_license | JKChang2015/Python | a6f8b56fa3f9943682470ae57e5ad3266feb47a7 | adf3173263418aee5d32f96b9ea3bf416c43cc7b | refs/heads/master | 2022-12-12T12:24:48.682712 | 2021-07-30T22:27:41 | 2021-07-30T22:27:41 | 80,747,432 | 1 | 8 | null | 2022-12-08T04:32:06 | 2017-02-02T17:05:19 | HTML | UTF-8 | Python | false | false | 364 | py | # Q090
# Created by JKChang
# 09/05/2017, 15:37
# Tag: remove particular element
# Description: By using list comprehension, please write a program to print the list after removing the value 24 in
# [12,24,35,24,88,120,155].
li = [12, 24, 35, 24, 88, 120, 155]
l = [x for x in li if x != 24]
# l = [x for (i, x) in enumerate(li) if x != 24]
print(l)
| [
"[email protected]"
] | |
1c754907b1ef0c71898c4d9a19507c4825d2d577 | c2ce7155a393e1056b5fdc4d3f9b9a89046e9285 | /aw_nas/btcs/layer2/controller.py | 27a221b2b58c7ce980ff31517931a6ac4fda1e97 | [
"MIT"
] | permissive | blyucs/aw_nas | 9c068dab1bd84a35e58a4c426f7c852a67b93882 | 8a32196ce342b8ad9e3885895735d1286e25beba | refs/heads/master | 2023-08-19T11:00:00.526229 | 2021-08-21T05:16:13 | 2021-08-21T05:16:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 47,320 | py | """
2-layer controller.
"""
from aw_nas import utils, assert_rollout_type
from aw_nas.utils import DistributedDataParallel
from aw_nas.controller.base import BaseController
from aw_nas.btcs.layer2.search_space import (
Layer2Rollout,
Layer2DiffRollout,
DenseMicroRollout,
DenseMicroDiffRollout,
StagewiseMacroRollout,
StagewiseMacroDiffRollout,
SinkConnectMacroDiffRollout,
)
from collections import OrderedDict
import numpy as np
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
try:
# from torch.nn.SyncBatchNorm import convert_sync_batch_norm as convert_sync_bn
from torch.nn import SyncBatchNorm
convert_sync_bn = SyncBatchNorm.convert_sync_batchnorm
except ImportError:
convert_sync_bn = lambda m: m
class Layer2Optimizer(optim.Optimizer):
def __init__(self, params, **opt_cfg):
super(Layer2Optimizer, self).__init__([torch.tensor([])], defaults={})
macro_opt_type = opt_cfg["macro"].pop("type")
micro_opt_type = opt_cfg["micro"].pop("type")
# currently width alphas & macro-alpha share the same optimizer
self.macro_optimizer = getattr(optim, macro_opt_type)(
nn.ParameterList(params[0:2]), **opt_cfg["macro"]
) # after adding width-alphas, as 2nd
self.micro_optimizer = getattr(optim, micro_opt_type)(
nn.ParameterList(params[2:]), **opt_cfg["micro"]
)
def step(self):
self.macro_optimizer.step()
self.micro_optimizer.step()
torch.optim.layer2 = Layer2Optimizer # add patch the torch optim
class Layer2DiffController(BaseController, nn.Module):
NAME = "layer2-differentiable"
def __init__(
self,
search_space,
rollout_type,
mode="eval",
device="cuda",
macro_controller_type="random_sample",
macro_controller_cfg={},
micro_controller_type="random_sample",
micro_controller_cfg={},
inspect_hessian_every=-1,
save_alphas_every=-1,
multiprocess=False,
schedule_cfg=None,
):
super(Layer2DiffController, self).__init__(
search_space, rollout_type, schedule_cfg=schedule_cfg
)
nn.Module.__init__(self)
self.search_space = search_space
self.rollout_type = rollout_type
self.device = device
self.to(self.device)
self.inspect_hessian_every = inspect_hessian_every
self.inspect_hessian = False
self.save_alphas_every = save_alphas_every
self.save_alphas = False
self.saved_dict = {
"macro": [],
"micro": [],
"width": [],
}
self.multiprocess = multiprocess
# the macro/micro controllers
if macro_controller_type == "macro-stagewise-diff":
self.macro_controller = MacroStagewiseDiffController(
self.search_space.macro_search_space,
macro_controller_type,
device=self.device,
multiprocess=self.multiprocess,
**macro_controller_cfg,
)
elif macro_controller_type == "macro-sink-connect-diff":
self.macro_controller = MacroSinkConnectDiffController(
self.search_space.macro_search_space,
macro_controller_type,
device=self.device,
multiprocess=self.multiprocess,
**macro_controller_cfg,
)
else:
raise NotImplementedError()
if micro_controller_type == "micro-dense-diff":
self.micro_controller = MicroDenseDiffController(
self.search_space.micro_search_space,
micro_controller_type,
device=self.device,
multiprocess=self.multiprocess,
**micro_controller_cfg,
)
else:
raise NotImplementedError()
object.__setattr__(self, "parallel_model", self)
self._parallelize()
def _parallelize(self):
if self.multiprocess:
net = convert_sync_bn(self).to(self.device)
object.__setattr__(
self,
"parallel_model",
DistributedDataParallel(
self, (self.device,), find_unused_parameters=True
),
)
def on_epoch_start(self, epoch):
super(Layer2DiffController, self).on_epoch_start(epoch)
if self.inspect_hessian_every >= 0 and epoch % self.inspect_hessian_every == 0:
self.inspect_hessian = True
if self.save_alphas_every >= 0 and epoch % self.save_alphas_every == 0:
self.save_alphas = True
# save alphas every epoch
if self.save_alphas:
self.saved_dict["macro"].append(
[alpha.data.cpu() for alpha in self.macro_controller.cg_alphas]
)
self.saved_dict["micro"].append(
[alpha.data.cpu() for alpha in self.micro_controller.cg_alphas]
)
self.saved_dict["width"].append(
[
width_alpha.cpu()
for width_alpha in self.macro_controller.width_alphas
]
)
self.macro_controller.on_epoch_start(epoch)
self.micro_controller.on_epoch_start(epoch)
def set_device(self, device):
self.device = device
self.to(device)
def set_mode(self, mode):
super(Layer2DiffController, self).set_mode(mode)
if mode == "train":
nn.Module.train(self)
elif mode == "eval":
nn.Module.eval(self)
else:
raise Exception("Unrecognized mode: {}".format(mode))
def parameters(self, recurse=False):
# FIXME: normal nn.module.parameters() use recurse=True to acquire all params
param_list = nn.ParameterList([])
param_list.extend(self.macro_controller.parameters())
param_list.extend(self.micro_controller.parameters())
return param_list
def _entropy_loss(self):
return (
self.macro_controller._entropy_loss()
+ self.micro_controller._entropy_loss()
)
def sample(self, n=1, batch_size=1):
if self.multiprocess:
return self.parallel_model.forward(n=n, batch_size=batch_size)
else:
return self.forward(n=n, batch_size=batch_size)
def forward(self, n=1, batch_size=1):
rollouts = []
macro_rollouts = self.macro_controller.forward(n=n, batch_size=batch_size)
micro_rollouts = self.micro_controller.forward(n=n, batch_size=batch_size)
for i in range(n):
rollouts.append(
Layer2DiffRollout(
macro_rollouts[i], micro_rollouts[i], self.search_space
)
)
return rollouts
def gradient(self, loss, return_grads=True, zero_grads=True):
if zero_grads:
self.zero_grad()
if self.inspect_hessian:
for name, param in self.named_parameters():
max_eig = utils.torch_utils.max_eig_of_hessian(loss, param)
self.logger.info("Max eigenvalue of Hessian of %s: %f", name, max_eig)
_loss = loss + self._entropy_loss()
_loss.backward()
if return_grads:
return utils.get_numpy(_loss), [
(k, v.grad.clone()) for k, v in self.named_parameters()
]
return utils.get_numpy(_loss)
def step_current_gradient(self, optimizer):
self.macro_controller.step_current_gradient(optimizer.macro_optimizer)
self.micro_controller.step_current_gradient(optimizer.micro_optimizer)
def step_gradient(self, gradients, optimizer):
self.macro_controller.step_gradient(gradients[0], optimizer.macro_optimizer)
self.micro_controller.step_gradient(gradients[1], optimizer.micro_optimizer)
def step(self, rollouts, optimizer, perf_name):
macro_rollouts = [r.macro for r in rollouts]
micro_rollouts = [r.micro for r in rollouts]
macro_loss = self.macro_controller.step(
macro_rollouts, optimizer.macro_optimizer, perf_name
)
micro_loss = self.micro_controller.step(
micro_rollouts, optimizer.micro_optimizer, perf_name
)
return macro_loss, micro_loss
def summary(self, rollouts, log=False, log_prefix="", step=None):
macro_rollouts = [r.macro for r in rollouts]
micro_rollouts = [r.micro for r in rollouts]
self.macro_controller.summary(
macro_rollouts, log=log, log_prefix=log_prefix, step=None
)
self.micro_controller.summary(
micro_rollouts, log=log, log_prefix=log_prefix, step=None
)
def save(self, path):
"""Save the parameters to disk."""
torch.save({"epoch": self.epoch, "state_dict": self.state_dict()}, path)
self.logger.info("Saved controller network to %s", path)
"""save alphas"""
if self.save_alphas_every is not None:
# os.path.dirname means the parent path of the `PATH`
torch.save(
self.saved_dict,
os.path.join(os.path.dirname(os.path.dirname(path)), "alphas.pth"),
)
def load(self, path):
"""Load the parameters from disk."""
checkpoint = torch.load(path, map_location=torch.device("cpu"))
self.load_state_dict(checkpoint["state_dict"])
self.on_epoch_start(checkpoint["epoch"])
self.logger.info("Loaded controller network from %s", path)
# since the layer2controller.parameters() is a list of [macro_parameters(), micro_parameters()], we need to override the zero_grad() since it used model.parameters()
def zero_grad(self):
for param in self.parameters():
for p in param:
if p.grad is not None:
p.grad.detach_()
p.grad.zero_()
@classmethod
def supported_rollout_types(cls):
return ["layer2", "layer2-differentiable"]
class GetArchMacro(torch.autograd.Function):
@staticmethod
def forward(
ctx,
search_space,
op_weights,
device,
i_stage,
):
stage_conn = torch.zeros(
(
search_space.stage_node_nums[i_stage],
search_space.stage_node_nums[i_stage],
)
).to(device)
stage_conn[search_space.idxes[i_stage]] = op_weights
ctx.save_for_backward(
torch.as_tensor(op_weights), torch.as_tensor(search_space.idxes[i_stage])
)
return stage_conn
@staticmethod
def backward(ctx, grad_output):
op_weights, idxes = ctx.saved_tensors
op_weights_grad = grad_output[idxes[0], idxes[1]]
return None, op_weights_grad, None, None, None
class MacroStagewiseDiffController(BaseController, nn.Module):
NAME = "macro-stagewise-diff"
SCHEDULABLE_ATTRS = [
"gumbel_temperature",
"entropy_coeff",
"force_uniform",
"width_gumbel_temperature",
"width_entropy_coeff",
]
def __init__(
self,
search_space,
rollout_type,
mode="eval",
device="cuda",
use_prob=False,
gumbel_hard=False,
gumbel_temperature=1.0,
use_sigmoid=False,
use_edge_normalization=False,
entropy_coeff=0.01,
max_grad_norm=None,
force_uniform=False,
full_init=False, # use all-one initialization and big flops reg
progressive_pruning_th=None,
multiprocess=False,
per_stage_width=True, # default use per stage width
width_entropy_coeff=0.01,
width_gumbel_temperature=1.0,
schedule_cfg=None,
):
super(MacroStagewiseDiffController, self).__init__(
search_space, rollout_type, schedule_cfg=schedule_cfg
)
nn.Module.__init__(self)
self.device = device
# sampling
self.use_prob = use_prob
self.gumbel_hard = gumbel_hard
self.gumbel_temperature = gumbel_temperature
self.use_sigmoid = use_sigmoid
# use_prob / use_sigmoid should not the True at the same time
# if both false use plain gumbel softmax
assert not (use_prob and use_sigmoid)
# edge normalization
self.use_edge_normalization = use_edge_normalization
# training
self.entropy_coeff = entropy_coeff
self.max_grad_norm = max_grad_norm
self.force_uniform = force_uniform
self.progressive_pruning_th = progressive_pruning_th
self.width_choice = self.search_space.width_choice
self.multiprocess = multiprocess
self.per_stage_width = per_stage_width
self.width_gumbel_temperature = width_gumbel_temperature
self.width_entropy_coeff = width_entropy_coeff
# generate parameters
self.full_init = full_init
if not self.full_init:
init_value = 1.0e-3
else:
init_value = 1.0
self.cg_alphas = nn.ParameterList(
[
nn.Parameter(
init_value * torch.randn(sum(self.search_space.num_possible_edges))
)
]
)
# width choices [#cells , #width_choice]
if self.width_choice is not None:
if not self.per_stage_width:
self.width_alphas = nn.ParameterList(
[
nn.Parameter(
init_value
* torch.randn(
len(self.search_space.cell_layout),
len(self.width_choice),
)
)
]
)
else:
self.width_alphas = nn.ParameterList(
[
nn.Parameter(
init_value
* torch.randn(
len(self.search_space.stage_node_nums),
len(self.width_choice),
)
)
]
)
self.stage_num_alphas = (
self.search_space.num_possible_edges
) # used for competible with sink-connecting ss
if self.use_edge_normalization:
raise NotImplementedError("MacroDiffController does not support edge-norm")
else:
self.cg_betas = None
self.get_arch = GetArchMacro()
self.to(self.device)
def set_mode(self, mode):
super(MacroStagewiseDiffController, self).set_mode(mode)
if mode == "train":
nn.Module.train(self)
elif mode == "eval":
nn.Module.eval(self)
else:
raise Exception("Unrecognized mode: {}".format(mode))
def set_device(self, device):
self.device = device
self.to(device)
def progressive_pruning(self):
for alpha in self.cg_alphas:
# inpalce replace alphas that smaller than the pruning threshold, no grad
alpha.data = alpha * (alpha.gt(self.progressive_pruning_th).float())
def forward(self, n=1, batch_size=1):
return self.sample(n=n, batch_size=batch_size)
def sample(self, n=1, batch_size=1):
if self.progressive_pruning_th is not None:
self.progressive_pruning()
width_arch, width_logits = self.sample_width(n=n, batch_size=batch_size)
rollouts = []
for i_sample in range(n):
# op_weights.shape: [num_edges, [batch_size,] num_ops]
# edge_norms.shape: [num_edges] do not have batch_size.
op_weights_list = []
edge_norms_list = []
sampled_list = []
logits_list = []
for alphas in self.cg_alphas:
if (
self.progressive_pruning_th is not None
and self.progressive_pruning_th > 0
):
alphas = alphas.clamp(self.progressive_pruning_th, 1.0e4)
else:
pass
if self.force_uniform: # cg_alpha parameters will not be in the graph
# NOTE: `force_uniform` config does not affects edge_norms (betas),
# if one wants a force_uniform search, keep `use_edge_normalization=False`
alphas = torch.zeros_like(alphas)
if batch_size > 1:
expanded_alpha = (
alphas.reshape([alphas.shape[0], 1, alphas.shape[1]])
.repeat([1, batch_size, 1])
.reshape([-1, alphas.shape[-1]])
)
else:
expanded_alpha = alphas
if self.use_prob:
sampled = F.softmax(
expanded_alpha / self.gumbel_temperature, dim=-1
)
elif self.use_sigmoid:
sampled = utils.relaxed_bernoulli_sample(
expanded_alpha, self.gumbel_temperature
)
else:
# gumbel sampling
sampled, _ = utils.gumbel_softmax(
expanded_alpha, self.gumbel_temperature, hard=False
)
if self.gumbel_hard:
op_weights = utils.straight_through(sampled)
else:
op_weights = sampled
if batch_size > 1:
sampled = sampled.reshape([-1, batch_size, op_weights.shape[-1]])
op_weights = op_weights.reshape(
[-1, batch_size, op_weights.shape[-1]]
)
op_weights_list.append(op_weights)
sampled_list.append(utils.get_numpy(sampled))
# logits_list.append(utils.get_numpy(alphas))
logits_list.append(alphas)
stage_conns = []
split_op_weights = torch.split(op_weights, self.stage_num_alphas)
for i_stage in range(self.search_space.stage_num):
stage_conn = self.get_arch.apply(
self.search_space,
split_op_weights[i_stage],
self.device,
i_stage,
)
stage_conns.append(stage_conn)
rollouts.append(
StagewiseMacroDiffRollout(
arch=stage_conns,
sampled=sampled_list,
logits=logits_list,
width_arch=width_arch[i_sample],
width_logits=width_logits[i_sample],
search_space=self.search_space,
)
)
return rollouts
def sample_width(self, n=1, batch_size=1):
assert batch_size == 1, "sample_width should not have batch size > 1"
width_sampled_list = []
width_logits_list = []
width_op_weights_list = []
for _ in range(n):
# sample the width alphas
for width_alphas in self.width_alphas:
if self.force_uniform: # cg_alpha parameters will not be in the graph
# NOTE: `force_uniform` config does not affects edge_norms (betas),
# if one wants a force_uniform search, keep `use_edge_normalization=False`
width_alphas = torch.zeros_like(width_alphas)
if batch_size > 1:
expanded_width_alpha = (
width_alphas.reshape(
[width_alphas.shape[0], 1, width_alphas.shape[1]]
)
.repeat([1, batch_size, 1])
.reshape([-1, width_alphas.shape[-1]])
)
else:
expanded_width_alpha = width_alphas
if self.use_prob:
width_sampled = F.softmax(
expanded_width_alpha / self.width_gumbel_temperature, dim=-1
)
elif self.use_sigmoid:
width_sampled = utils.relaxed_bernoulli_sample(
expanded_width_alpha, self.width_gumbel_temperature
)
else:
# gumbel sampling
width_sampled, _ = utils.gumbel_softmax(
expanded_width_alpha, self.width_gumbel_temperature, hard=False
)
if self.gumbel_hard:
width_op_weights = utils.straight_through(width_sampled)
else:
width_op_weights = width_sampled
if batch_size > 1:
width_sampled = width_sampled.reshape(
[-1, batch_size, width_op_weights.shape[-1]]
)
width_op_weights = width_op_weights.reshape(
[-1, batch_size, width_op_weights.shape[-1]]
)
if not self.per_stage_width:
width_op_weights_full = width_op_weights
width_sampled_full = width_sampled
width_alphas_full = width_alphas
else:
# the last stage has one more node
node_list = self.search_space.stage_node_nums.copy()
# let the 1st stage num_node -1
# to let all reduction cell uses the width-alphas of next stage
node_list[0] = node_list[0] - 1
width_op_weights_full = torch.cat(
[
width_op_weights[idx_stage].repeat(num_nodes - 1, 1)
for idx_stage, num_nodes in enumerate(node_list)
]
)
width_sampled_full = torch.cat(
[
width_sampled[idx_stage].repeat(num_nodes - 1, 1)
for idx_stage, num_nodes in enumerate(node_list)
]
)
width_alphas_full = torch.cat(
[
width_alphas[idx_stage].repeat(num_nodes - 1, 1)
for idx_stage, num_nodes in enumerate(node_list)
]
)
width_op_weights_list.append(width_op_weights_full)
width_sampled_list.append(utils.get_numpy(width_sampled_full))
# logits_list.append(utils.get_numpy(alphas))
width_logits_list.append(width_alphas_full)
return width_op_weights_list, width_logits_list
def save(self, path):
"""Save the parameters to disk."""
torch.save({"epoch": self.epoch, "state_dict": self.state_dict()}, path)
self.logger.info("Saved controller network to %s", path)
def load(self, path):
"""Load the parameters from disk."""
checkpoint = torch.load(path, map_location=torch.device("cpu"))
self.load_state_dict(checkpoint["state_dict"])
self.on_epoch_start(checkpoint["epoch"])
self.logger.info("Loaded controller network from %s", path)
def _entropy_loss(self):
ent_loss = 0.0
if self.entropy_coeff > 0:
alphas = self.cg_alphas[0].split(
[i - 1 for i in self.search_space.stage_node_nums]
)
probs = [F.softmax(alpha, dim=-1) for alpha in self.cg_alphas]
ent_loss = (
self.entropy_coeff
* sum(-(torch.log(prob) * prob).sum() for prob in probs)
+ ent_loss
)
if self.width_entropy_coeff > 0:
width_alphas = self.width_alphas
probs = [F.softmax(alpha, dim=-1) for alpha in self.width_alphas]
ent_loss = (
self.width_entropy_coeff
* sum(-(torch.log(prob) * prob).sum() for prob in probs)
+ ent_loss
)
return ent_loss
def gradient(self, loss, return_grads=True, zero_grads=True):
raise NotImplementedError(
"the grad function is implemented in the layer2diffcontroller.gradient()"
)
def step_current_gradient(self, optimizer):
if self.max_grad_norm is not None:
torch.nn.utils.clip_grad_norm_(self.parameters(), self.max_grad_norm)
optimizer.step()
def step_gradient(self, gradients, optimizer):
self.zero_grad()
named_params = dict(self.named_parameters())
for k, grad in gradients:
named_params[k].grad = grad
# clip the gradients
if self.max_grad_norm is not None:
torch.nn.utils.clip_grad_norm_(self.parameters(), self.max_grad_norm)
# apply the gradients
optimizer.step()
def step(self, rollouts, optimizer, perf_name): # very memory inefficient
self.zero_grad()
losses = [r.get_perf(perf_name) for r in rollouts]
optimizer.step()
[l.backward() for l in losses]
return np.mean([l.detach().cpu().numpy() for l in losses])
def __getstate__(self):
state = super(MacroStagewiseDiffController, self).__getstate__().copy()
del state["get_arch"]
return state
def __setstate__(self, state):
super(MacroStagewiseDiffController, self).__setstate__(state)
self.get_arch = GetArchMacro()
def summary(self, rollouts, log=False, log_prefix="", step=None):
num = len(rollouts)
logits_list = [
[utils.get_numpy(logits) for logits in r.logits] for r in rollouts
]
_ss = self.search_space
if self.gumbel_hard:
cg_logprobs = [0.0 for _ in range(_ss.num_cell_groups)]
cg_entros = [0.0 for _ in range(_ss.num_cell_groups)]
for rollout, logits in zip(rollouts, logits_list):
for cg_idx, (vec, cg_logits) in enumerate(zip(rollout.arch, logits)):
prob = utils.softmax(cg_logits)
logprob = np.log(prob)
if self.gumbel_hard:
inds = np.argmax(utils.get_numpy(vec.op_weights), axis=-1)
cg_logprobs[cg_idx] += np.sum(logprob[range(len(inds)), inds])
cg_entros[cg_idx] += -(prob * logprob).sum()
# mean across rollouts
if self.gumbel_hard:
cg_logprobs = [s / num for s in cg_logprobs]
total_logprob = sum(cg_logprobs)
cg_logprobs_str = ",".join(["{:.2f}".format(n) for n in cg_logprobs])
cg_entros = [s / num for s in cg_entros]
total_entro = sum(cg_entros)
cg_entro_str = ",".join(["{:.2f}".format(n) for n in cg_entros])
if log:
# maybe log the summary
self.logger.info(
"%s%d rollouts: %s ENTROPY: %2f (%s)",
log_prefix,
num,
"-LOG_PROB: %.2f (%s) ;" % (-total_logprob, cg_logprobs_str)
if self.gumbel_hard
else "",
total_entro,
cg_entro_str,
)
if step is not None and not self.writer.is_none():
if self.gumbel_hard:
self.writer.add_scalar("log_prob", total_logprob, step)
self.writer.add_scalar("entropy", total_entro, step)
stats = [
(n + " ENTRO", entro) for n, entro in zip(_ss.cell_group_names, cg_entros)
]
if self.gumbel_hard:
stats += [
(n + " LOGPROB", logprob)
for n, logprob in zip(_ss.cell_group_names, cg_logprobs)
]
return OrderedDict(stats)
@classmethod
def supported_rollout_types(cls):
return ["macro-stagewise", "macro-stagewise-diff", "macro-sink-connect-diff"]
class GetArchMacroSinkConnect(torch.autograd.Function):
@staticmethod
def forward(
ctx,
search_space,
op_weights,
device,
i_stage,
):
stage_conn = torch.zeros(
(
search_space.stage_node_nums[i_stage],
search_space.stage_node_nums[i_stage],
)
).to(device)
stage_conn[np.arange(len(op_weights)) + 1, np.arange(len(op_weights))] = 1
stage_conn[-1, : len(op_weights)] = op_weights
ctx.save_for_backward(
torch.as_tensor(op_weights), torch.as_tensor(search_space.idxes[i_stage])
)
return stage_conn
@staticmethod
def backward(ctx, grad_output):
op_weights, idxes = ctx.saved_tensors
op_weights_grad = grad_output[-1, : len(op_weights)]
return None, op_weights_grad, None, None, None
class MacroSinkConnectDiffController(MacroStagewiseDiffController):
NAME = "macro-sink-connect-diff"
# The TF_NAS-like macro search space(sink-based connecting)
# during each stage, before the reduction node, a `sinking point` aggregate the output of each node's output with softmax
# noted that cg-alpha here should denote whether connected or not
def __init__(self, *args, **kwargs):
super(MacroSinkConnectDiffController, self).__init__(*args, **kwargs)
if not self.full_init:
self.cg_alphas = nn.ParameterList(
[
nn.Parameter(
1e-3
* torch.randn(
sum([n - 1 for n in self.search_space.stage_node_nums])
)
)
]
)
else:
self.cg_alphas = nn.ParameterList(
[
nn.Parameter(
torch.ones(
sum([n - 1 for n in self.search_space.stage_node_nums])
)
)
]
)
assert (
self.use_sigmoid == False
) # sink-connecting should introduce competition in edges
self.get_arch = GetArchMacroSinkConnect()
self.stage_num_alphas = [n - 1 for n in self.search_space.stage_node_nums]
self.to(self.device) # move the newly generated cg_alphas to cuda
# The only difference with MacroStageWiseDiffController's sample is that the arch is packed into `sink-connect-diff-rollout`
def sample(self, n=1, batch_size=1):
# if use progressive pruning
if self.progressive_pruning_th is not None:
self.progressive_pruning()
width_arch, width_logits = self.sample_width(n=n, batch_size=batch_size)
rollouts = []
for i_sample in range(n):
# op_weights.shape: [num_edges, [batch_size,] num_ops]
# edge_norms.shape: [num_edges] do not have batch_size.
op_weights_list = []
edge_norms_list = []
sampled_list = []
logits_list = []
for alphas in self.cg_alphas:
splits = [i - 1 for i in self.search_space.stage_node_nums]
op_weights = []
sampleds = []
for alpha in alphas.split(splits):
if (
self.force_uniform
): # cg_alpha parameters will not be in the graph
# NOTE: `force_uniform` config does not affects edge_norms (betas),
# if one wants a force_uniform search, keep `use_edge_normalization=False`
alpha = torch.zeros_like(alpha)
if batch_size > 1:
expanded_alpha = (
alpha.reshape([alpha.shape[0], 1, alpha.shape[1]])
.repeat([1, batch_size, 1])
.reshape([-1, alpha.shape[-1]])
)
else:
expanded_alpha = alpha
if self.use_prob:
sampled = F.softmax(
expanded_alpha / self.gumbel_temperature, dim=-1
)
elif self.use_sigmoid:
sampled = utils.relaxed_bernoulli_sample(
expanded_alpha, self.gumbel_temperature
)
else:
# gumbel sampling
sampled, _ = utils.gumbel_softmax(
expanded_alpha, self.gumbel_temperature, hard=False
)
if self.gumbel_hard:
op_weight = utils.straight_through(sampled)
else:
op_weight = sampled
if batch_size > 1:
sampled = sampled.reshape([-1, batch_size, op_weight.shape[-1]])
op_weight = op_weight.reshape(
[-1, batch_size, op_weight.shape[-1]]
)
op_weights.append(op_weight)
sampleds.append(sampled)
op_weights = torch.cat(op_weights)
sampleds = torch.cat(sampleds)
op_weights_list.append(op_weights)
sampled_list.append(utils.get_numpy(sampleds))
logits_list.append(alphas)
stage_conns = []
split_op_weights = torch.split(op_weights, self.stage_num_alphas)
for i_stage in range(self.search_space.stage_num):
stage_conn = self.get_arch.apply(
self.search_space,
split_op_weights[i_stage],
self.device,
i_stage,
)
stage_conns.append(stage_conn)
rollouts.append(
SinkConnectMacroDiffRollout(
arch=stage_conns,
sampled=sampled_list,
logits=logits_list,
width_arch=width_arch[i_sample],
width_logits=width_logits[i_sample],
search_space=self.search_space,
)
)
return rollouts
def __setstate__(self, state):
super(MacroSinkConnectDiffController, self).__setstate__(state)
self.get_arch = GetArchMacroSinkConnect()
class GetArchMicro(torch.autograd.Function):
@staticmethod
def forward(ctx, search_space, op_weights, device):
empty_arch = torch.zeros(
(
search_space._num_nodes,
search_space._num_nodes,
search_space.num_op_choices,
)
).to(device)
empty_arch[search_space.idx] = op_weights
ctx.save_for_backward(
torch.as_tensor(op_weights), torch.as_tensor(search_space.idx)
)
return empty_arch
@staticmethod
def backward(ctx, grad_output):
op_weights, idxes = ctx.saved_tensors
op_weights_grad = grad_output[idxes[0], idxes[1]]
return None, op_weights_grad, None
class MicroDenseDiffController(BaseController, nn.Module):
NAME = "micro-dense-diff"
SCHEDULABLE_ATTRS = ["gumbel_temperature", "entropy_coeff", "force_uniform"]
def __init__(
self,
search_space,
rollout_type,
mode="eval",
device="cuda",
use_prob=False,
gumbel_hard=False,
gumbel_temperature=1.0,
use_sigmoid=True,
use_edge_normalization=False,
entropy_coeff=0.01,
max_grad_norm=None,
force_uniform=False,
full_init=False,
progressive_pruning_th=None,
multiprocess=False,
schedule_cfg=None,
):
super(MicroDenseDiffController, self).__init__(
search_space, rollout_type, schedule_cfg=schedule_cfg
)
nn.Module.__init__(self)
self.device = device
# sampling
self.use_prob = use_prob
self.use_sigmoid = use_sigmoid
self.gumbel_hard = gumbel_hard
self.gumbel_temperature = gumbel_temperature
assert not (use_prob and use_sigmoid)
# edge normalization
self.use_edge_normalization = use_edge_normalization
# training
self.entropy_coeff = entropy_coeff
self.max_grad_norm = max_grad_norm
self.force_uniform = force_uniform
self.full_init = full_init
self.progressive_pruning_th = progressive_pruning_th
self.multiprocess = multiprocess
_num_init_nodes = self.search_space.num_init_nodes
_num_edges_list = [
sum(
_num_init_nodes + i
for i in range(self.search_space.get_num_steps(i_cg))
)
for i_cg in range(self.search_space.num_cell_groups)
]
if not self.full_init:
self.cg_alphas = nn.ParameterList(
[
nn.Parameter(
1e-3
* torch.randn(
_num_edges,
len(self.search_space.cell_shared_primitives[i_cg]),
)
) # shape: [num_edges, num_ops]
for i_cg, _num_edges in enumerate(_num_edges_list)
]
)
else:
self.cg_alphas = nn.ParameterList(
[
nn.Parameter(
1
* torch.ones(
_num_edges,
len(self.search_space.cell_shared_primitives[i_cg]),
)
) # shape: [num_edges, num_ops]
for i_cg, _num_edges in enumerate(_num_edges_list)
]
)
if self.use_edge_normalization:
raise NotImplementedError("MicroDenseController does not support edge-norm")
else:
self.cg_betas = None
self.get_arch = GetArchMicro()
self.to(self.device)
def set_mode(self, mode):
super(MicroDenseDiffController, self).set_mode(mode)
if mode == "train":
nn.Module.train(self)
elif mode == "eval":
nn.Module.eval(self)
else:
raise Exception("Unrecognized mode: {}".format(mode))
def set_device(self, device):
self.device = device
self.to(device)
def progressive_pruning(self):
for alpha in self.cg_alphas:
# inpalce replace alphas that smaller than the pruning threshold, no grad
alpha.data = alpha * (alpha.gt(self.progressive_pruning_th).float())
def forward(self, n=1, batch_size=1): # pylint: disable=arguments-differ
return self.sample(n=n, batch_size=batch_size)
def sample(self, n=1, batch_size=1):
if self.progressive_pruning_th is not None:
self.progressive_pruning()
rollouts = []
for _ in range(n):
# op_weights.shape: [num_edges, [batch_size,] num_ops]
# edge_norms.shape: [num_edges] do not have batch_size.
op_weights_list = []
edge_norms_list = []
sampled_list = []
logits_list = []
for alphas in self.cg_alphas:
if self.force_uniform: # cg_alpha parameters will not be in the graph
# NOTE: `force_uniform` config does not affects edge_norms (betas),
# if one wants a force_uniform search, keep `use_edge_normalization=False`
alphas = torch.zeros_like(alphas)
if batch_size > 1:
expanded_alpha = (
alphas.reshape([alphas.shape[0], 1, alphas.shape[1]])
.repeat([1, batch_size, 1])
.reshape([-1, alphas.shape[-1]])
)
else:
expanded_alpha = alphas
if self.use_prob:
# probability as sample
sampled = F.softmax(
expanded_alpha / self.gumbel_temperature, dim=-1
)
elif self.use_sigmoid:
sampled = utils.relaxed_bernoulli_sample(
expanded_alpha, self.gumbel_temperature
)
else:
# gumbel sampling
sampled, _ = utils.gumbel_softmax(
expanded_alpha, self.gumbel_temperature, hard=False
)
if self.gumbel_hard:
op_weights = utils.straight_through(sampled)
else:
op_weights = sampled
if batch_size > 1:
sampled = sampled.reshape([-1, batch_size, op_weights.shape[-1]])
op_weights = op_weights.reshape(
[-1, batch_size, op_weights.shape[-1]]
)
op_weights_list.append(op_weights)
sampled_list.append(utils.get_numpy(sampled))
# logits_list.append(utils.get_numpy(alphas))
logits_list.append((alphas))
if self.use_edge_normalization:
raise NotImplementedError
else:
arch_list = []
logits_arch_list = []
for op_weights in op_weights_list:
arch = self.get_arch.apply(
self.search_space, op_weights, self.device
)
arch_list.append(arch)
for logits in logits_list:
logits_arch = self.get_arch.apply(
self.search_space, logits, self.device
)
logits_arch_list.append(logits_arch)
rollouts.append(
DenseMicroDiffRollout(
arch_list,
sampled_list,
logits_list,
logits_arch_list,
search_space=self.search_space,
)
)
return rollouts
def save(self, path):
"""Save the parameters to disk."""
torch.save({"epoch": self.epoch, "state_dict": self.state_dict()}, path)
self.logger.info("Saved controller network to %s", path)
def load(self, path):
"""Load the parameters from disk."""
checkpoint = torch.load(path, map_location=torch.device("cpu"))
self.load_state_dict(checkpoint["state_dict"])
self.on_epoch_start(checkpoint["epoch"])
self.logger.info("Loaded controller network from %s", path)
def _entropy_loss(self):
if self.entropy_coeff > 0:
probs = [F.softmax(alpha, dim=-1) for alpha in self.cg_alphas]
return self.entropy_coeff * sum(
-(torch.log(prob) * prob).sum() for prob in probs
)
return 0.0
def gradient(self, loss, return_grads=True, zero_grads=True):
raise NotImplementedError(
"the grad function is implemented in the layer2diffcontroller.gradient()"
)
def step_current_gradient(self, optimizer):
if self.max_grad_norm is not None:
torch.nn.utils.clip_grad_norm_(self.parameters(), self.max_grad_norm)
optimizer.step()
def step_gradient(self, gradients, optimizer):
self.zero_grad()
named_params = dict(self.named_parameters())
for k, grad in gradients:
named_params[k].grad = grad
# clip the gradients
if self.max_grad_norm is not None:
torch.nn.utils.clip_grad_norm_(self.parameters(), self.max_grad_norm)
# apply the gradients
optimizer.step()
def step(self, rollouts, optimizer, perf_name): # very memory inefficient
self.zero_grad()
losses = [r.get_perf(perf_name) for r in rollouts]
optimizer.step()
[l.backward() for l in losses]
return np.mean([l.detach().cpu().numpy() for l in losses])
def __getstate__(self):
state = super(MicroDenseDiffController, self).__getstate__().copy()
del state["get_arch"]
return state
def __setstate__(self, state):
super(MicroDenseDiffController, self).__setstate__(state)
self.get_arch = GetArchMicro()
def summary(self, rollouts, log=False, log_prefix="", step=None):
num = len(rollouts)
logits_list = [
[utils.get_numpy(logits) for logits in r.logits] for r in rollouts
]
_ss = self.search_space
if self.gumbel_hard:
cg_logprobs = [0.0 for _ in range(_ss.num_cell_groups)]
cg_entros = [0.0 for _ in range(_ss.num_cell_groups)]
for rollout, logits in zip(rollouts, logits_list):
for cg_idx, (vec, cg_logits) in enumerate(zip(rollout.arch, logits)):
prob = utils.softmax(cg_logits)
logprob = np.log(prob)
if self.gumbel_hard:
inds = np.argmax(utils.get_numpy(vec), axis=-1)
cg_logprobs[cg_idx] += np.sum(logprob[range(len(inds)), inds])
cg_entros[cg_idx] += -(prob * logprob).sum()
# mean across rollouts
if self.gumbel_hard:
cg_logprobs = [s / num for s in cg_logprobs]
total_logprob = sum(cg_logprobs)
cg_logprobs_str = ",".join(["{:.2f}".format(n) for n in cg_logprobs])
cg_entros = [s / num for s in cg_entros]
total_entro = sum(cg_entros)
cg_entro_str = ",".join(["{:.2f}".format(n) for n in cg_entros])
if log:
# maybe log the summary
self.logger.info(
"%s%d rollouts: %s ENTROPY: %2f (%s)",
log_prefix,
num,
"-LOG_PROB: %.2f (%s) ;" % (-total_logprob, cg_logprobs_str)
if self.gumbel_hard
else "",
total_entro,
cg_entro_str,
)
if step is not None and not self.writer.is_none():
if self.gumbel_hard:
self.writer.add_scalar("log_prob", total_logprob, step)
self.writer.add_scalar("entropy", total_entro, step)
stats = [
(n + " ENTRO", entro) for n, entro in zip(_ss.cell_group_names, cg_entros)
]
if self.gumbel_hard:
stats += [
(n + " LOGPROB", logprob)
for n, logprob in zip(_ss.cell_group_names, cg_logprobs)
]
return OrderedDict(stats)
@classmethod
def supported_rollout_types(cls):
return ["micro-dense", "micro-dense-diff"]
| [
"[email protected]"
] | |
f26247827774f537f5498e3343140e8ee540b7e4 | 375e834e7a2ff7b085b88cc162fb8215e14cd132 | /Python/largest-triangle-area.py | 69ea932c4fb00f00dab5018641b8bbcd1559e8ed | [
"MIT"
] | permissive | tickpeach/LeetCode-Solutions | 0842086aa1781191fe68639c884986f843194262 | 16c96776781d04672d653cef48f4f7989685cbe9 | refs/heads/master | 2020-04-01T02:46:38.356672 | 2018-10-12T18:15:41 | 2018-10-12T18:15:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 930 | py | # Time: O(n^3)
# Space: O(1)
try:
xrange # Python 2
except NameError:
xrange = range # Python 3
class Solution(object):
def largestTriangleArea(self, points):
"""
:type points: List[List[int]]
:rtype: float
"""
result = 0
for i in xrange(len(points)-2):
for j in xrange(i+1, len(points)-1):
for k in xrange(j+1, len(points)):
result = max(result,
0.5 * abs(points[i][0] * points[j][1] +
points[j][0] * points[k][1] +
points[k][0] * points[i][1] -
points[j][0] * points[i][1] -
points[k][0] * points[j][1] -
points[i][0] * points[k][1]))
return result
| [
"[email protected]"
] | |
23cfc92ea7ec20a590b33c1593e2e718bd0af201 | f2658c4bd7f833ace25ac2b63e88317b05f4602d | /2017 July/2017-July-13/tf_rdf/st_rdf_test/model/WaysName.py | a9f3bd56d882d733a2a1cfe3b833a1a3e59fdefb | [] | no_license | xiaochao00/telanav_diary | e4c34ac0a14b65e4930e32012cc2202ff4ed91e2 | 3c583695e2880322483f526c98217c04286af9b2 | refs/heads/master | 2022-01-06T19:42:55.504845 | 2019-05-17T03:11:46 | 2019-05-17T03:11:46 | 108,958,763 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,704 | py | #-------------------------------------------------------------------------------
# Name: RelationsGeneralCarto model
# Purpose: this model is used to mapping the
# columns: [ ]
#
# Author: Kuang
#
# Created: 10/12/2015
# Copyright: (c) rex 2015
# Licence: <your licence>
#-------------------------------------------------------------------------------
from model.record import Record
from model.record import CSV_SEP
from model.constants import *
from operator import itemgetter
from xml.dom import minidom
from collections import defaultdict
import os
import sys
import datetime
import json
import csv
ROOT_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)),"..")
PREERRED_ROUTE_CONFIG_FILE = os.path.join(os.path.dirname(ROOT_DIR), "..", "config", "Route.xml")
GLOBAL_KEY_PREFIX = "ways_navlink_"
LF = '\n'
PREV_LINKID = -1
STATISTIC_GENERAL_NAME_FEATURE_KEYS = (
'route_type',
'attached_to_base',
'precedes_base',
'name_direction_prefix',
'street_type',
'name_direction_suffix',
'name_base',
'is_exonym',
'name_type',
'direction_on_sign',
'street_name'
)
STATISTIC_TRANS_NAME_FEATURE_KEYS = (
'attached_to_base',
'precedes_base',
'name_direction_prefix',
'street_type',
'name_direction_suffix',
'name_base',
'direction_on_sign',
'street_name'
)
class WaysName(Record):
def __init__(self, region):
Record.__init__(self)
self.name_dump_file = os.path.join(ROOT_DIR, 'temporary', self.__class__.__name__)
self.admin_hierarchy_dump_file = os.path.join(ROOT_DIR, 'temporary', 'admin_hierarchy')
self.stat = {}
self.region = region
self.link_name_dict = {}
self.admin_hierarchy_dict = {}
self.preferred_route_type = defaultdict()
self.names_dump = {}
def dump2file(self):
cmd = "select \
rnl.link_id, \
rrl.road_name_id, \
rrl.is_exit_name, \
rrl.is_name_on_roadsign, \
rrn.route_type, \
rrn.name_type, \
rrn.is_exonym, \
rrn.language_code, \
rrn.attached_to_base, \
rrn.precedes_base, \
rrn.prefix, \
rrn.street_type, \
rrn.suffix, \
rrn.base_name, \
rrn.direction_on_sign, \
rrn.street_name, \
rrnt.*, \
vrn.*, \
vpt.*, \
vgo.*, \
rl.left_admin_place_id, \
rl.right_admin_place_id, \
rrl.road_link_id, \
rnl.iso_country_code \
from \
rdf_nav_link rnl \
inner join rdf_road_link rrl on rnl.link_id = rrl.link_id \
inner join rdf_road_name rrn on rrl.road_name_id = rrn.road_name_id \
left join rdf_road_name_trans rrnt on rrn.road_name_id = rrnt.road_name_id \
left join vce_road_name vrn on rrn.road_name_id = vrn.road_name_id \
left join vce_phonetic_text vpt on vrn.phonetic_id = vpt.phonetic_id \
left join vce_geo_override vgo on vrn.phonetic_id = vgo.phonetic_id \
inner join rdf_link rl on rnl.link_id = rl.link_id \
where rnl.iso_country_code in (%s) \
order by rnl.link_id " % (REGION_COUNTRY_CODES(self.region, GLOBAL_KEY_PREFIX))
print cmd
self.cursor.copy_expert("COPY (%s) TO STDOUT DELIMITER '%s' CSV " % (cmd, CSV_SEP), open(self.name_dump_file, "w"))
def dumpadminhierarchyfile(self):
cmd = "select * from rdf_admin_hierarchy"
print cmd
self.cursor.copy_expert("COPY (%s) TO STDOUT DELIMITER '%s' CSV " % (cmd, CSV_SEP), open(self.admin_hierarchy_dump_file, "w"))
#load admin hierarchy file into memory
with open(self.admin_hierarchy_dump_file, "r") as admin_hierarchy_f:
admin_line_ps = csv.reader(admin_hierarchy_f, delimiter=CSV_SEP)
for admin_line in admin_line_ps:
admin_line_p = [x.strip() for x in admin_line]
# for admin_line in admin_hierarchy_f:
# admin_line = admin_line.strip()
# #admin_line_p = admin_line.split(CSV_SEP)
# admin_line_p = Record.split(admin_line)
if len(admin_line_p) < 1:
continue
self.admin_hierarchy_dict[admin_line_p[0]] = [admin_line_p[3], admin_line_p[4], admin_line_p[5], admin_line_p[6], admin_line_p[7]]
def get_statistic(self):
self._load_preferred_route_type()
self.dumpadminhierarchyfile()
try:
self.dump2file()
except:
print 'Some table or schema don\'t exist! Please check the upper sql'
return {}
processcount = 0
with open(self.name_dump_file, "r",1024*1024*1024) as csv_f:
lines = csv.reader(csv_f, delimiter=CSV_SEP)
for line in lines:
line_p = [x.strip() for x in line]
# for line in csv_f:
# line = line.rstrip()
# # line_p = line.split(CSV_SEP)
# line_p = Record.split(line)
if len(line_p) < 1:
sys.stderr.write('Error: invalid line %s\n' % line)
continue
self._build(line_p)
processcount += 1
if processcount%5000 == 0:
print "\rProcess index [ "+str(processcount)+" ]",
print "\rProcess index [ "+str(processcount)+" ]",
self.__statistic()
self.__dump_name(None, None, None)
# write to file
with open(os.path.join(ROOT_DIR, "output", "stat", self.__class__.__name__), 'w') as stf:
stf.write(json.dumps(self.stat))
return self.stat
def _load_preferred_route_type(self):
doc = None
try:
doc = minidom.parse(PREERRED_ROUTE_CONFIG_FILE)
except:
print "failed to parse route type configure file"
return
if not doc:
return
root = doc.documentElement
if root:
routeelmts = root.getElementsByTagName('Route')
if routeelmts:
for recelmt in routeelmts:
iso_country_code = self._get_node_value(recelmt, "ISO_COUNTRY_CODE")
route_type = self._get_node_value(recelmt, "ROUTE_TYPE")
is_preferred_display = self._get_node_value(recelmt, "IS_PREFERRED_DISPLAY")
key = "%s_%s"%(iso_country_code, route_type)
if is_preferred_display and is_preferred_display == "Y":
self.preferred_route_type[key.lower()] = 0
elif is_preferred_display and is_preferred_display == "N":
self.preferred_route_type[key.lower()] = 2
def _get_node_value(self, xmlnode, key):
elmt = xmlnode.getElementsByTagName(key)
if not elmt:
return None
return elmt[0].childNodes[0].data
def _get_name_type(self, line_p):
road_type = 0 if line_p[3] == "" else int(line_p[3])
if line_p[1] == "Y":
return "exit_ref"
elif road_type >= 1 and road_type <=6:
return "ref"
elif line_p[2] == "N":
return "alt_name"
elif not (road_type >= 1 and road_type <=6) and (line_p[4] == "B" and line_p[5] == "N"):
return "name"
elif not (road_type >= 1 and road_type <=6) and not (line_p[4] == "B" and line_p[5] == "N"):
return "alt_name"
def _routetypecmp(self, list_a, list_b):
key_a = "%s_%s" % (list_a[18], list_a[3])
#is_prefer_a = self.preferred_route_type.has_key(key_a.lower())
prefer_a_priority = self.preferred_route_type.get(key_a.lower(), 1)
key_b = "%s_%s" % (list_b[18], list_b[3])
#is_prefer_b = self.preferred_route_type.has_key(key_b.lower())
prefer_b_priority = self.preferred_route_type.get(key_b.lower(), 1)
#v = 0 if (is_prefer_a and is_prefer_b) or (not is_prefer_a and not is_prefer_b) else -1 if is_prefer_a else 1;
v = prefer_a_priority - prefer_b_priority
if v != 0:
return v
route_type_a = int(list_a[3])
route_type_b = int(list_b[3])
v = route_type_a - route_type_b
if v != 0:
return v
if list_a[14] < list_b[14]:
return -1
else:
return 1
def __dump_name(self, lang, name_type, names):
"""It's used for debug only, comment the "return" for debug.
"""
return
if names:
key = (lang, name_type)
val = '%d;%s' % (self.link_id*1000+100, names[14])
self.names_dump.setdefault(key, []).append(val)
if len(self.names_dump) >= 30000 or not names:
self.__dump_name_imp()
self.names_dump.clear()
def __dump_name_imp(self):
for key, val in self.names_dump.iteritems():
lang, name_type = key
outfile = '%s_%s' % (name_type, lang)
with open(outfile, 'a') as ofs:
ofs.write('\n'.join(val))
ofs.write('\n')
def __statistic(self):
name_dict = {}
for name_id in self.link_name_dict.keys():
names = self.link_name_dict.get(name_id)
name_type = self._get_name_type(names)
language = names[6]
if not name_dict.has_key(language):
name_dict[language] = {}
name_type_dict = name_dict[language]
if name_type_dict.has_key(name_type):
name_type_list = name_type_dict[name_type]
name_type_list.append(names)
else:
name_type_dict[name_type] = [names]
for lang in name_dict.keys():
for nametype in name_dict[lang].keys():
if nametype != "ref":
nameslist = sorted(name_dict[lang][nametype], key=itemgetter(14, 17))
else:
nameslist = sorted(name_dict[lang][nametype], cmp=self._routetypecmp)
for key in STATISTIC_GENERAL_NAME_FEATURE_KEYS:
getattr(self,'_WaysName__get_'+key)(nametype, lang.lower(), key, nameslist[0])
if key == 'street_name':
self.__dump_name(lang, nametype, nameslist[0])
for transkey in STATISTIC_TRANS_NAME_FEATURE_KEYS:
trans_literation_type = nameslist[0][15][0][1]
if trans_literation_type == "":
continue
if transkey == STATISTIC_TRANS_NAME_FEATURE_KEYS[7]:
trans_full_key = "{0}{1}:{2}:trans:{3}".format(GLOBAL_KEY_PREFIX, nametype, lang.lower(), trans_literation_type)
else:
trans_full_key = "{0}{1}:{2}:trans:{3}:{4}".format(GLOBAL_KEY_PREFIX, nametype, lang.lower(), trans_literation_type, transkey)
getattr(self,'_WaysName__get_trans_'+transkey)(trans_full_key, nameslist[0][15][0])
for p_keys in self._get_phonetic_key(nameslist[0][16]):
self.__count("{0}{1}:{2}:{3}".format(GLOBAL_KEY_PREFIX, nametype, lang.lower(), p_keys))
#26(0):road_name_id, 27(1):phonetic_id, 28(2):preferred, 29(3):type
#30(4):phonetic_id, 31(5):phonetic_string, 32(6):phonetic_language_code, 33(7):transcription_method
#34(8):geo_override_id, 35(9):phonetic_id, 36(10):admin_place_id, 37(11):preferred
#38(12):left_admin_place_id, 39(13):right_admin_place_id
def _get_phonetic_key(self, listphonetics):
preferred_phonetics_dict = {}
for phonetics in listphonetics:
if phonetics[10] != "":
if self._is_geo_override(phonetics[12], phonetics[10]) or self._is_geo_override(phonetics[13], phonetics[10]):
preferred_phonetics_dict[phonetics[1]] = phonetics
if len(preferred_phonetics_dict) == 0:
for phonetics in listphonetics:
if phonetics[2] == "Y":
preferred_phonetics_dict[phonetics[1]] = phonetics
preferred_phonetic = {}
length = 0
for key, vals in preferred_phonetics_dict.items():
phonetic_key = "phonetics:{0}:{1}:{2}".format(vals[6], vals[7], vals[3])
if not preferred_phonetic.has_key(phonetic_key):
preferred_phonetic[phonetic_key] = None
if len(preferred_phonetic) == 0:
return []
return preferred_phonetic.keys()
def _is_geo_override(self, admin_place_id, geo_admin_place_id):
if self.admin_hierarchy_dict.has_key(admin_place_id):
admin_hierarchy = self.admin_hierarchy_dict.get(admin_place_id)
if admin_hierarchy[4] == geo_admin_place_id:
return True
if admin_hierarchy[3] == geo_admin_place_id:
return True
if admin_hierarchy[2] == geo_admin_place_id:
return True
if admin_hierarchy[1] == geo_admin_place_id:
return True
if admin_hierarchy[0] == geo_admin_place_id:
return True
def __get_trans_attached_to_base(self, fullkey, p):
if p[8] == "Y":
self.__count(fullkey)
def __get_trans_precedes_base(self, fullkey, p):
if p[9] == "Y":
self.__count(fullkey)
def __get_trans_name_direction_prefix(self, fullkey, p):
if p[5] != "":
self.__count(fullkey)
def __get_trans_street_type(self, fullkey, p):
if p[4] != "":
self.__count(fullkey)
def __get_trans_name_direction_suffix(self, fullkey, p):
if p[6] != "":
self.__count(fullkey)
def __get_trans_name_base(self, fullkey, p):
if p[2] != "":
self.__count(fullkey)
def __get_trans_direction_on_sign(self, fullkey, p):
if p[7] != "":
self.__count(fullkey)
def __get_trans_street_name(self, fullkey, p):
if p[3] != "":
self.__count(fullkey)
def __get_route_type(self, nametype, language, key, p):
if p[3] != "":
self.__count("%s%s:%s:%s"%(GLOBAL_KEY_PREFIX, nametype, language, key))
def __get_attached_to_base(self, nametype, language, key, p):
if p[7] == "Y":
self.__count("%s%s:%s:%s"%(GLOBAL_KEY_PREFIX, nametype, language, key))
def __get_precedes_base(self, nametype, language, key, p):
if p[8] == "Y":
self.__count("%s%s:%s:%s"%(GLOBAL_KEY_PREFIX, nametype, language, key))
def __get_name_direction_prefix(self, nametype, language, key, p):
if p[9] != "":
self.__count("%s%s:%s:%s"%(GLOBAL_KEY_PREFIX, nametype, language, key))
def __get_street_type(self, nametype, language, key, p):
if p[10] != "":
self.__count("%s%s:%s:%s"%(GLOBAL_KEY_PREFIX, nametype, language, key))
def __get_name_direction_suffix(self, nametype, language, key, p):
if p[11] != "":
self.__count("%s%s:%s:%s"%(GLOBAL_KEY_PREFIX, nametype, language, key))
def __get_name_base(self, nametype, language, key, p):
if p[12] != "":
self.__count("%s%s:%s:%s"%(GLOBAL_KEY_PREFIX, nametype, language, key))
def __get_is_exonym(self, nametype, language, key, p):
if p[5] == "Y":
self.__count("%s%s:%s:%s"%(GLOBAL_KEY_PREFIX, nametype, language, key))
def __get_name_type(self, nametype, language, key, p):
if p[4] != "":
self.__count("%s%s:%s:%s"%(GLOBAL_KEY_PREFIX, nametype, language, key))
def __get_direction_on_sign(self, nametype, language, key, p):
if p[13] != "":
self.__count("%s%s:%s:%s"%(GLOBAL_KEY_PREFIX, nametype, language, key))
def __get_street_name(self, nametype, language, key, p):
if p[14] != "":
self.__count("%s%s:%s"%(GLOBAL_KEY_PREFIX, nametype, language))
#0:link_id, 1:road_name_id, 2:is_exit_name, 3:is_name_on_roadsign, 4:route_type, 5:name_type, 6:is_exonym, 7:language_code
#8:attached_to_base, 9:precedes_base, 10:prefix, 11:street_type, 12:suffix, 13:base_name, 14:direction_on_sign, 15:street_name
#16:road_name_id, 17:transliteration_type, 18:base_name, 19:street_name, 20:street_type, 21:prefix, 22:suffix, 23:direction_on_sign, 24:attached_to_base, 25:precedes_base
#26(0):road_name_id, 27(1):phonetic_id, 28(2):preferred, 29(3):type
#30(4):phonetic_id, 31(5):phonetic_string, 32(6):phonetic_language_code, 33(7):transcription_method
#34(8):geo_override_id, 35(9):phonetic_id, 36(10):admin_place_id, 37(11):preferred
#38(12):left_admin_place_id, 39(13):right_admin_place_id, 40:road link id, 41: iso country code
def _build(self, line_p):
global PREV_LINKID
if int(line_p[0]) != PREV_LINKID:
if len(self.link_name_dict) != 0:
self.__statistic()
self.link_name_dict.clear()
PREV_LINKID = int(line_p[0])
self.link_id = PREV_LINKID
road_name_id = (line_p[1], long(line_p[40]))
#road_name_id = line_p[1]
if not self.link_name_dict.has_key(road_name_id):
# 1:road_name_id, 2:is_exit_name, 3:is_name_on_roadsign, 4:route_type, 5:name_type, 6:is_exonym, 7:language_code
# 8:attached_to_base, 9:precedes_base, 10:prefix, 11:street_type, 12:suffix, 13:base_name, 14:direction_on_sign, 15:street_name
road_name_list = [line_p[1], line_p[2], line_p[3], line_p[4], line_p[5], line_p[6], line_p[7], line_p[8], line_p[9], line_p[10], line_p[11], line_p[12], line_p[13], line_p[14], line_p[15], [], [], long(line_p[40]), line_p[41]]
else:
road_name_list = self.link_name_dict[road_name_id]
trans = [line_p[16], line_p[17], line_p[18], line_p[19], line_p[20], line_p[21], line_p[22], line_p[23], line_p[24], line_p[25]]
road_name_list[15].append(trans)
phonetics = [line_p[26], line_p[27], line_p[28], line_p[29], line_p[30], line_p[31], line_p[32], line_p[33], line_p[34], line_p[35], line_p[36], line_p[37], line_p[38], line_p[39]]
road_name_list[16].append(phonetics)
self.link_name_dict[road_name_id] = road_name_list
def __add(self, line, dict):
subdict = {}
if not dict.has_key(line[0]):
dict[line[0]] = subdict
else:
subdict = dict.get(line[0])
subdict[line[2]] = (float(line[3])/100000, float(line[4])/100000)
def __count(self,key):
key = key.lower()
if self.stat.has_key(key):
self.stat[key] += 1
else:
self.stat[key] = 1
if __name__ == "__main__":
# use to test this model
bg = datetime.datetime.now()
stat = WaysName('na').get_statistic()
keys = stat.keys()
print "==>"
print "{%s}"%(",".join(map(lambda px: "\"%s\":%s"%(px,stat[px]) ,keys)))
print "<=="
ed = datetime.datetime.now()
print "Cost time:"+str(ed - bg)
| [
"[email protected]"
] | |
2ed90e62775bcf2abcceb6808eb7d46bfad27f24 | 5a9d8c64c6478f3816b63f59f1cdaca73c0848eb | /pythonNet/ex07_Thread/array.py | fbebd4f0134d54c89387bb558a5577494bae457e | [] | no_license | wangredfei/nt_py | f68134977e6d1e05cf17cec727644509f084c462 | fedf03c0d52565f588e9b342d1c51df0b6dc2681 | refs/heads/master | 2020-04-08T07:55:08.302589 | 2018-11-23T09:53:48 | 2018-11-23T09:53:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 503 | py | from multiprocessing import Process,Array
import time
# # 开辟一个共享内存,存入整数列表
# shm = Array('i', [1,2,3,4,5])
# def fun():
# for s in shm:
# print(s)
# shm[0]=1000
# p = Process(target = fun)
# p.start()
# p.join()
# for i in shm:
# print(i)
shm = Array('c',b'hello')
def fun():
for i in shm:
print(i)
shm[0] = b'H'
p = Process(target = fun)
p.start()
p.join()
for i in shm:
print(i,end="")
print()
print(shm.value)# 打印字符串 | [
"[email protected]"
] | |
3e05f88b40601505afeed262deb49042d529da7a | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_277/ch118_2020_03_30_20_02_35_602471.py | 1f099ee2f4af19fc094bf59344f12a9a10646426 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 172 | py | import math
def reflexao_total_interna(n1,n2,o2):
o1=math.sin(o2*math.pi/180)*n2/n1
a=math.sin(o1)
if a > 1:
return True
else:
return False
| [
"[email protected]"
] | |
ab320487cab51af6170a88923ce8087b084a8206 | 9c529778ea60e590e448589e35eb4dae941e832a | /evennia-engine/evenv/share/doc/networkx-2.4/examples/drawing/plot_spectral_grid.py | 3f2bc92202d5f7a92efbf193f918e48aa2443540 | [
"MIT",
"BSD-3-Clause"
] | permissive | rajammanabrolu/WorldGeneration | 3d0976ffba8588fcebda8b8593be694e1bc1501d | 5e97df013399e1a401d0a7ec184c4b9eb3100edd | refs/heads/master | 2022-11-25T20:10:52.682064 | 2021-09-08T11:50:23 | 2021-09-08T11:50:23 | 235,484,371 | 69 | 5 | MIT | 2022-11-22T08:50:22 | 2020-01-22T02:32:53 | Python | UTF-8 | Python | false | false | 1,603 | py | """
==================
Spectral Embedding
==================
The spectral layout positions the nodes of the graph based on the
eigenvectors of the graph Laplacian $L = D - A$, where $A$ is the
adjacency matrix and $D$ is the degree matrix of the graph.
By default, the spectral layout will embed the graph in two
dimensions (you can embed your graph in other dimensions using the
``dim`` argument to either :func:`~drawing.nx_pylab.draw_spectral` or
:func:`~drawing.layout.spectral_layout`).
When the edges of the graph represent similarity between the incident
nodes, the spectral embedding will place highly similar nodes closer
to one another than nodes which are less similar.
This is particularly striking when you spectrally embed a grid
graph. In the full grid graph, the nodes in the center of the
graph are pulled apart more than nodes on the periphery.
As you remove internal nodes, this effect increases.
"""
import matplotlib.pyplot as plt
import networkx as nx
options = {
'node_color': 'C0',
'node_size': 100,
}
G = nx.grid_2d_graph(6, 6)
plt.subplot(332)
nx.draw_spectral(G, **options)
G.remove_edge((2, 2), (2, 3))
plt.subplot(334)
nx.draw_spectral(G, **options)
G.remove_edge((3, 2), (3, 3))
plt.subplot(335)
nx.draw_spectral(G, **options)
G.remove_edge((2, 2), (3, 2))
plt.subplot(336)
nx.draw_spectral(G, **options)
G.remove_edge((2, 3), (3, 3))
plt.subplot(337)
nx.draw_spectral(G, **options)
G.remove_edge((1, 2), (1, 3))
plt.subplot(338)
nx.draw_spectral(G, **options)
G.remove_edge((4, 2), (4, 3))
plt.subplot(339)
nx.draw_spectral(G, **options)
plt.show()
| [
"[email protected]"
] | |
efbc42ba62610026e9e989063cfe821d499f6971 | 7c17d6047a8a31a54a42dc213a0a3c26ccb320fd | /djlistener/djlistener/asgi.py | c9bd80b307fc14acff4568b13981b3e2eb69841c | [] | no_license | morlandi/sinewave | 7d8cd55d4b0fb72b30c99144b09ce55da1722c2d | 39e2fe778ca84d045a877f0ef7938ba7a5ef05ce | refs/heads/master | 2023-04-16T11:29:11.748802 | 2021-06-28T14:46:43 | 2021-06-28T14:46:43 | 152,848,099 | 9 | 5 | null | 2023-03-31T14:55:40 | 2018-10-13T07:44:20 | Python | UTF-8 | Python | false | false | 792 | py | """
ASGI entrypoint. Configures Django and then runs the application
defined in the ASGI_APPLICATION setting.
"""
# import os
# import django
# from channels.routing import get_default_application
# os.environ.setdefault("DJANGO_SETTINGS_MODULE", "djlistener.settings")
# django.setup()
# application = get_default_application()
import os
from channels.routing import ProtocolTypeRouter
from channels.routing import URLRouter
from django.core.asgi import get_asgi_application
from django.urls import path
from . import consumers
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'djlistener.settings')
application = ProtocolTypeRouter({
"http": get_asgi_application(),
"websocket": URLRouter([
path("ws/sinewave/", consumers.SinewaveSyncConsumer.as_asgi()),
]),
})
| [
"[email protected]"
] | |
8782e35c54ec3809fa7022d9699b4f8f0f1a0bb6 | be0f3dfbaa2fa3d8bbe59229aef3212d032e7dd1 | /Gauss_v45r8/Gen/DecFiles/options/42300000.py | 25e564709eaf15a934a790a92af651aa2170f74d | [] | no_license | Sally27/backup_cmtuser_full | 34782102ed23c6335c48650a6eaa901137355d00 | 8924bebb935b96d438ce85b384cfc132d9af90f6 | refs/heads/master | 2020-05-21T09:27:04.370765 | 2018-12-12T14:41:07 | 2018-12-12T14:41:07 | 185,989,173 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,133 | py | # file /home/hep/ss4314/cmtuser/Gauss_v45r8/Gen/DecFiles/options/42300000.py generated: Fri, 27 Mar 2015 15:48:05
#
# Event Type: 42300000
#
# ASCII decay Descriptor: pp -> [W+ -> tau+ nu_tau]cc ...
#
from Gaudi.Configuration import *
importOptions( "$DECFILESROOT/options/Wtaunu.py" )
from Configurables import Generation
Generation().EventType = 42300000
Generation().SampleGenerationTool = "Special"
from Configurables import Special
Generation().addTool( Special )
Generation().Special.ProductionTool = "PythiaProduction"
from Configurables import ToolSvc
from Configurables import EvtGenDecay
ToolSvc().addTool( EvtGenDecay )
ToolSvc().EvtGenDecay.UserDecayFile = "$DECFILESROOT/dkfiles/W_taunutau.dec"
Generation().Special.CutTool = "PythiaHiggsType"
from Configurables import PythiaHiggsType
Generation().Special.addTool( PythiaHiggsType )
Generation().Special.PythiaHiggsType.NumberOfLepton = 1
from GaudiKernel import SystemOfUnits
Generation().Special.PythiaHiggsType.LeptonPtMin = 4*SystemOfUnits.GeV
Generation().Special.PythiaHiggsType.LeptonIsFromMother = True
Generation().Special.PythiaHiggsType.NumberOfbquarks = -1
| [
"[email protected]"
] | |
f445c13cd68294138922d2b7dfada304cc3fb281 | 43a96eafd9108dd48f91d0b7c70cf4cd99e7eae2 | /audio_zen/utils.py | 0415dcc5518ed493db8332c1c592d75d1b272e7c | [
"MIT"
] | permissive | yaoao2017/FullSubNet | ec5096f9ed958aa6aceacb5cefcd96a1c77be1c9 | 213df1b46d5bc3d61d774a75aebae5b731046bd2 | refs/heads/main | 2023-08-28T01:22:24.022365 | 2021-11-01T11:43:47 | 2021-11-01T11:43:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,430 | py | import importlib
import os
import time
from copy import deepcopy
from functools import reduce
import torch
def load_checkpoint(checkpoint_path, device):
_, ext = os.path.splitext(os.path.basename(checkpoint_path))
assert ext in (".pth", ".tar"), "Only support ext and tar extensions of l1 checkpoint."
model_checkpoint = torch.load(os.path.abspath(os.path.expanduser(checkpoint_path)), map_location=device)
if ext == ".pth":
print(f"Loading {checkpoint_path}.")
return model_checkpoint
else: # load tar
print(f"Loading {checkpoint_path}, epoch = {model_checkpoint['epoch']}.")
return model_checkpoint["l1"]
def prepare_empty_dir(dirs, resume=False):
"""
if resume the experiment, assert the dirs exist. If not the resume experiment, set up new dirs.
Args:
dirs (list): directors list
resume (bool): whether to resume experiment, default is False
"""
for dir_path in dirs:
if resume:
assert dir_path.exists(), "In resume mode, you must be have an old experiment dir."
else:
dir_path.mkdir(parents=True, exist_ok=True)
def check_nan(tensor, key=""):
if torch.sum(torch.isnan(tensor)) > 0:
print(f"Found NaN in {key}")
class ExecutionTime:
"""
Count execution time.
Examples:
timer = ExecutionTime()
...
print(f"Finished in {timer.duration()} seconds.")
"""
def __init__(self):
self.start_time = time.time()
def duration(self):
return int(time.time() - self.start_time)
def initialize_module(path: str, args: dict = None, initialize: bool = True):
"""
Load module or function dynamically with "args".
Args:
path: module path in this project.
args: parameters that will be passed to the Class or the Function in the module.
initialize: whether to initialize the Class or the Function with args.
Examples:
Config items are as follows:
[model]
path = "model.FullSubNetModel"
[model.args]
n_frames = 32
...
This function will:
1. Load the "model.full_sub_net" module.
2. Call "FullSubNetModel" Class (or Function) in "model.full_sub_net" module.
3. If initialize is True:
instantiate (or call) the Class (or the Function) and pass the parameters (in "[model.args]") to it.
"""
module_path = ".".join(path.split(".")[:-1])
class_or_function_name = path.split(".")[-1]
module = importlib.import_module(module_path)
class_or_function = getattr(module, class_or_function_name)
if initialize:
if args:
return class_or_function(**args)
else:
return class_or_function()
else:
return class_or_function
def print_tensor_info(tensor, flag="Tensor"):
def floor_tensor(float_tensor):
return int(float(float_tensor) * 1000) / 1000
print(
f"{flag}\n"
f"\t"
f"max: {floor_tensor(torch.max(tensor))}, min: {float(torch.min(tensor))}, "
f"mean: {floor_tensor(torch.mean(tensor))}, std: {floor_tensor(torch.std(tensor))}")
def set_requires_grad(nets, requires_grad=False):
"""
Args:
nets: list of networks
requires_grad
"""
if not isinstance(nets, list):
nets = [nets]
for net in nets:
if net is not None:
for param in net.parameters():
param.requires_grad = requires_grad
def merge_config(*config_dicts):
"""
Deep merge configuration dicts.
Args:
*config_dicts: any number of configuration dicts.
Notes:
1. The values of item in the later configuration dict(s) will update the ones in the former dict(s).
2. The key in the later dict must be exist in the former dict. It means that the first dict must consists of all keys.
Examples:
a = [
"a": 1,
"b": 2,
"c": {
"d": 1
}
]
b = [
"a": 2,
"b": 2,
"c": {
"e": 1
}
]
c = merge_config(a, b)
c = [
"a": 2,
"b": 2,
"c": {
"d": 1,
"e": 1
}
]
Returns:
New deep-copied configuration dict.
"""
def merge(older_dict, newer_dict):
for new_key in newer_dict:
if new_key not in older_dict:
# Checks items in custom config must be within common config
raise KeyError(f"Key {new_key} is not exist in the common config.")
if isinstance(older_dict[new_key], dict):
older_dict[new_key] = merge(older_dict[new_key], newer_dict[new_key])
else:
older_dict[new_key] = deepcopy(newer_dict[new_key])
return older_dict
return reduce(merge, config_dicts[1:], deepcopy(config_dicts[0]))
def prepare_device(n_gpu: int, keep_reproducibility=False):
"""
Choose to use CPU or GPU depend on the value of "n_gpu".
Args:
n_gpu(int): the number of GPUs used in the experiment. if n_gpu == 0, use CPU; if n_gpu >= 1, use GPU.
keep_reproducibility (bool): if we need to consider the repeatability of experiment, set keep_reproducibility to True.
See Also
Reproducibility: https://pytorch.org/docs/stable/notes/randomness.html
"""
if n_gpu == 0:
print("Using CPU in the experiment.")
device = torch.device("cpu")
else:
# possibly at the cost of reduced performance
if keep_reproducibility:
print("Using CuDNN deterministic mode in the experiment.")
torch.backends.cudnn.benchmark = False # ensures that CUDA selects the same convolution algorithm each time
torch.set_deterministic(True) # configures PyTorch only to use deterministic implementation
else:
# causes cuDNN to benchmark multiple convolution algorithms and select the fastest
torch.backends.cudnn.benchmark = True
device = torch.device("cuda:0")
return device
def expand_path(path):
return os.path.abspath(os.path.expanduser(path))
def basename(path):
filename, ext = os.path.splitext(os.path.basename(path))
return filename, ext | [
"[email protected]"
] | |
7de97cc4e386019c8c8287f8821f5d0eba631a12 | 594fd699d9f8070c867b83b11881ca1f624b417b | /EstruturaDeDecisao/mais_barato.py | e1caf21790f912b0b381f4166b4196d14a2831b6 | [] | no_license | felipmarqs/exerciciospythonbrasil | f140df2c59b933cc0460d5986afc8c6ddd493556 | 6d02e85ae5986d3b20cfd8781174998d871eeb90 | refs/heads/master | 2020-04-04T05:25:23.751175 | 2018-12-12T18:44:38 | 2018-12-12T18:44:38 | 155,745,781 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 515 | py | #Faça um programa que pergunte o preço de três produtos e informe qual produto você deve comprar, sabendo que a decisão é sempre pelo mais barato.
p1 = float(input("Qual o preço do primeiro produto ? R$"))
p2 = float(input("Qual o preço do segundo produto ? R$"))
p3 = float(input("Qual o preço do terceiro produto ? R$"))
if p1 < p2 and p1 < p3:
print("Compre o primeiro!")
elif p2 < p1 and p2 < p3:
print(("Compre o segundo!"))
elif p3 < p1 and p3 < p2:
print("Compre o terceiro!")
| [
"[email protected]"
] | |
98cbaad2a1b572d0d4e77f410261e0ed4a72c31a | c41d00f9c9f716709ea3cbded76fbebfb9058fa7 | /polygon/problem2/views/base.py | fb74e4c275f13782212f9c76efdf95507f4b7003 | [
"MIT"
] | permissive | revectores/eoj3 | 56e5d8f5e0f513752a159086125902ce3086e868 | 3435280768b366cc82f74f1f08697f059e0f2141 | refs/heads/master | 2020-04-01T17:56:34.122990 | 2018-10-15T10:59:14 | 2018-10-15T10:59:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,798 | py | import re
import traceback
from itertools import chain
from django.contrib import messages
from django.core.exceptions import PermissionDenied
from django.db import transaction
from django.db.models import Count
from django.db.models import Q
from django.http import Http404
from django.http import HttpResponse
from django.shortcuts import get_object_or_404, redirect
from django.urls import reverse
from django.views import View
from django.views.generic import ListView
from django.views.generic.base import ContextMixin, TemplateView
from account.models import User
from account.permissions import is_admin_or_root
from contest.models import Contest
from polygon.base_views import PolygonBaseMixin
from polygon.models import Revision, FavoriteProblem
from polygon.rejudge import rejudge_all_submission_on_problem
from problem.models import Problem
from problem.views import StatusList
from submission.models import Submission
from utils.permission import is_problem_manager
class ProblemList(PolygonBaseMixin, ListView):
template_name = 'polygon/problem2/list.jinja2'
context_object_name = 'problem_list'
paginate_by = 100
def get_queryset(self):
if 'exact' in self.request.GET:
self.search_text = q = self.request.GET['exact']
query = Q(pk__exact=q)
elif 'q' in self.request.GET:
self.search_text = q = self.request.GET['q']
query = Q(title__icontains=q) | Q(alias__icontains=q) | Q(source__icontains=q)
if q.isdigit():
query |= Q(pk__exact=q)
self.id_searching_recommendation = q
else:
self.search_text = ''
query = None
if is_admin_or_root(self.request.user):
qs = Problem.objects.all()
else:
qs = self.request.user.managing_problems.all()
if query:
qs = qs.filter(query)
qs = qs.only("update_time", "title", "id", "create_time", "source", "alias")\
.order_by("-update_time").prefetch_related("revisions").annotate(Count('revisions'))
favorite_problems = set(FavoriteProblem.objects.filter(user=self.request.user).values_list("problem_id", flat=True))
if favorite_problems:
for p in qs:
p.liked = p.id in favorite_problems
qs = sorted(list(qs), key=lambda p: not p.liked)
return qs
@staticmethod
def get_problem_latest_revision(problem):
problem.latest_revision, problem.my_latest_revision = None, None
for revision in sorted(problem.revisions.all(), key=lambda x: x.create_time, reverse=True):
return revision
def get_context_data(self, **kwargs):
data = super().get_context_data(**kwargs)
for problem in data['problem_list']:
problem.latest_revision = self.get_problem_latest_revision(problem)
data['search_text'] = self.search_text
if hasattr(self, "id_searching_recommendation"):
pid = self.id_searching_recommendation
if Problem.objects.filter(id=pid).exists() and \
is_admin_or_root(self.request.user) or self.request.user.managing_problems.filter(id=pid).exists():
data["suggest_problem"] = Problem.objects.get(id=pid)
data["suggest_problem"].latest_revision = self.get_problem_latest_revision(data["suggest_problem"])
return data
class ProblemCreate(PolygonBaseMixin, View):
@staticmethod
def get_unused_problem():
revised_probs = set(Revision.objects.values_list("problem_id", flat=True))
for problem in Problem.objects.all().order_by("id"):
if not problem.description and not problem.input and not problem.output and not problem.cases and \
problem.id not in revised_probs:
return problem
return None
def post(self, request, *args, **kwargs):
fallback_url = reverse('polygon:problem_list_2')
alias = request.POST.get('answer', '')
if not re.match(r'^[a-z0-9]{2,30}$', alias):
messages.error(request, "Alias must only contain lower-case letters and digits.")
return redirect(fallback_url)
if 'force' in request.GET:
problem = None
else:
problem = self.get_unused_problem()
if not problem:
problem = Problem.objects.create()
problem.title = 'Problem #%d' % problem.id
problem.alias = 'p%d' % problem.id
problem.save(update_fields=['title', 'alias'])
problem.memory_limit = 512 # UPD: default memory limit has been raised to 512 MB
problem.alias = alias
problem.save(update_fields=['memory_limit', 'alias'])
problem.managers.add(request.user)
revision = Revision.objects.create(problem=problem,
user=self.request.user,
revision=1,
time_limit=problem.time_limit,
memory_limit=problem.memory_limit)
return redirect(reverse('polygon:revision_update', kwargs={"pk": problem.pk, "rpk": revision.pk}))
class ProblemClone(PolygonBaseMixin, View):
def post(self, request, *args, **kwargs):
try:
n = request.POST['answer']
if '-' in n:
contest_id, identifier = n.split('-')
contest = Contest.objects.get(pk=contest_id)
if (contest.access_level >= 20 and contest.status > 0) or \
(contest.access_level >= 30 and contest.always_running):
problem = contest.contestproblem_set.get(identifier=identifier).problem
else:
raise PermissionError
else:
problem = Problem.objects.get(pk=n)
if not problem.visible and not is_problem_manager(request.user, problem):
raise PermissionError
if 'force' in request.GET:
new_prob = None
else:
new_prob = ProblemCreate.get_unused_problem()
if not new_prob:
new_prob = Problem.objects.create()
new_prob.managers.add(request.user)
saved_id = new_prob.id
problem.clone_parent = problem.id
problem.id = saved_id
problem.alias = 'p%d' % problem.id
problem.visible = False
problem.save()
except:
messages.error(request, "Problem does not exist or not available.")
return redirect(reverse('polygon:problem_list_2'))
return redirect(reverse('polygon:problem_list_2') + "?exact=%d" % saved_id)
class PolygonProblemMixin(ContextMixin, PolygonBaseMixin):
raise_exception = True
post_allowed_for_low_permission = False
def init_revision(self, *args, **kwargs):
pass
def dispatch(self, request, *args, **kwargs):
self.request = request
self.problem = get_object_or_404(Problem, pk=kwargs.get('pk'))
self.latest_revisions = self.problem.revisions.all().order_by("-revision")[:5]
if is_problem_manager(self.request.user, self.problem):
self.permission = 2
else:
self.permission = 0
self.init_revision(*args, **kwargs)
return super().dispatch(request, *args, **kwargs)
def test_func(self):
"""
Permission 2: read & write
Permission 1: read (will not accept post request)
"""
if not super().test_func():
return False
if self.request.method == "POST" and self.permission < 2 and not self.post_allowed_for_low_permission:
return False
elif self.permission < 1:
return False
return True
def get_context_data(self, **kwargs):
data = super().get_context_data(**kwargs)
data['problem'] = self.problem
data['latest_revisions'] = self.latest_revisions
data['admin_list'] = self.problem.managers.all()
data['level_select'] = self.problem._meta.get_field('level').choices
return data
class ProblemRevisionMixin(PolygonProblemMixin):
model_class = None
def verify_belong_to_revision(self, id):
def expand_queryset(qs):
return set(chain(*qs.values_list("id", "parent_id"))) - {0}
if self.model_class is None:
raise NotImplementedError("\"model_class\" should not be None when checking revision")
if isinstance(id, str) and id.isdigit():
id = int(id)
qs = self.model_class.objects.filter(revision=self.revision.id)
ret = set()
while True:
ret_nxt = expand_queryset(qs)
if ret_nxt == ret: break
ret = ret_nxt
if id in ret: return True
qs = self.model_class.objects.filter(id__in=ret)
return False
def group_well_ordered(self):
if not self.revision.enable_group:
return True
expect_group_number = 1
for case in self.revision.cases.all().order_by("case_number"):
if case.group != expect_group_number:
if case.group == expect_group_number + 1:
expect_group_number += 1
else:
return False
if expect_group_number != self.revision.group_count:
return False
return True
def case_number_well_ordered(self):
for idx, case in enumerate(self.revision.cases.all().order_by("case_number"), start=1):
if case.case_number != idx:
return False
return True
def revision_health_check(self):
self.errors = []
self.warnings = []
if not self.group_well_ordered():
self.errors.append("Group numbers are NOT well ordered.")
if not self.case_number_well_ordered():
self.warnings.append("Case numbers are not perfectly ordered.")
if not self.revision.active_statement:
self.errors.append("Must have an active statement.")
if not (256 <= self.revision.memory_limit <= 4096):
self.errors.append("Memory limit should be between 256MB and 4GB.")
if not (500 <= self.revision.time_limit <= 30000):
self.errors.append("Time limit should be between 500ms and 30 seconds.")
if not self.revision.time_limit * self.revision.cases.count() <= 900000:
self.warnings.append("Time limit of all cases exceeds 900 seconds. This brings potential problems that "
"judge requests will timeout.")
if not self.revision.well_form_policy:
self.warnings.append("Well form policy is not enabled. This brings potential problems in end-of-line and "
"unexpected spaces. Make sure you want to do this.")
if not self.revision.cases.filter(activated=True).exists():
self.errors.append("There is no activated tests for this problem.")
# check samples always first
case_sample_test = list(self.revision.cases.filter(activated=True).order_by("case_number").
values_list("in_samples", flat=True))
try:
first_not_sample = case_sample_test.index(False)
if any(case_sample_test[first_not_sample:]):
self.warnings.append("Samples are not first in the tests.")
except ValueError:
pass
if not all(map(lambda case: case.activated, self.revision.cases.filter(in_samples=True))):
self.warnings.append("Not all the samples are in final tests.")
def init_revision(self, *args, **kwargs):
self.revision = self.problem.revisions.select_related("active_statement", "active_checker", "active_validator",
"active_interactor", "user").filter(pk=kwargs['rpk'])
if len(self.revision) == 0:
raise Http404("Revision matches not found.")
else: self.revision = self.revision[0]
if self.revision.user != self.request.user or self.revision.status != 0:
self.permission = 1
self.revision_health_check()
def get_context_data(self, **kwargs):
data = super().get_context_data(**kwargs)
data['revision'] = self.revision
data['revision_errors'] = self.errors
data['revision_warnings'] = self.warnings
data['revision_readonly'] = self.revision.status != 0
return data
class ProblemStatus(PolygonProblemMixin, StatusList):
template_name = 'polygon/problem2/status.jinja2'
privileged = True
def get_selected_from(self):
return Submission.objects.filter(problem_id=self.problem.id)
class ProblemRejudge(PolygonProblemMixin, View):
def post(self, request, *args, **kwargs):
rejudge_all_submission_on_problem(self.problem)
return redirect(reverse('polygon:problem_status', kwargs={'pk': self.problem.id}))
class ProblemBasicInfoManage(PolygonProblemMixin, TemplateView):
"""
This includes admin and alias
"""
template_name = 'polygon/problem2/basic_info.jinja2'
def get_context_data(self, **kwargs):
data = super().get_context_data(**kwargs)
data['admin_list'] = self.problem.managers.all()
data['level_select'] = self.problem._meta.get_field('level').choices
return data
@transaction.atomic()
def post(self, request, pk):
self.problem.alias = request.POST['alias']
if not re.match(r'^[a-z0-9]{2,30}$', self.problem.alias):
messages.error(request, "Alias must contain only lower-case letters and digits.")
return redirect(reverse('polygon:problem_basic_info', kwargs=self.kwargs))
self.problem.source = request.POST['source']
self.problem.level = request.POST['level']
my_set = set(map(int, filter(lambda x: x, request.POST['admin'].split(','))))
self.problem.managers.clear()
for key in my_set:
self.problem.managers.add(User.objects.get(pk=key))
self.problem.save()
return redirect(request.POST.get('next', self.request.path))
class ProblemFavoriteToggle(PolygonProblemMixin, View):
def post(self, request, *args, **kwargs):
with transaction.atomic():
if FavoriteProblem.objects.filter(user=request.user, problem=self.problem).exists():
FavoriteProblem.objects.filter(user=request.user, problem=self.problem).delete()
else:
FavoriteProblem.objects.get_or_create(user=request.user, problem=self.problem)
return HttpResponse()
| [
"[email protected]"
] | |
ed599d93d1e0fe2eb4569c2b7658cd44121bcf99 | ee7cf88c40c848d75b20136b55f7d273ce21d2f2 | /util/ec3po/interpreter.py | 23e896c640f86f5a3e69d92966555287bb296fe5 | [
"BSD-3-Clause"
] | permissive | akappy7/ChromeOS_EC_LED_Diagnostics | 4e0dfbaf3103bbf8c34ab2c9a4c9c87a39cde3b8 | 73b8a148756a1aa723c8277e8475f0e43d703fbf | refs/heads/master | 2020-12-07T15:16:10.831477 | 2016-05-05T06:29:52 | 2016-05-05T06:29:52 | 57,103,470 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,519 | py | # Copyright 2015 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""EC-3PO EC Interpreter
interpreter provides the interpretation layer between the EC UART and the user.
It receives commands through its command pipe, formats the commands for the EC,
and sends the command to the EC. It also presents data from the EC to either be
displayed via the interactive console interface, or some other consumer. It
additionally supports automatic command retrying if the EC drops a character in
a command.
"""
from __future__ import print_function
import binascii
# pylint: disable=cros-logging-import
import logging
import os
import Queue
import select
import sys
COMMAND_RETRIES = 3 # Number of attempts to retry a command.
EC_MAX_READ = 1024 # Max bytes to read at a time from the EC.
EC_SYN = '\xec' # Byte indicating EC interrogation.
EC_ACK = '\xc0' # Byte representing correct EC response to interrogation.
class LoggerAdapter(logging.LoggerAdapter):
"""Class which provides a small adapter for the logger."""
def process(self, msg, kwargs):
"""Prepends the served PTY to the beginning of the log message."""
return '%s - %s' % (self.extra['pty'], msg), kwargs
class Interpreter(object):
"""Class which provides the interpretation layer between the EC and user.
This class essentially performs all of the intepretation for the EC and the
user. It handles all of the automatic command retrying as well as the
formation of commands for EC images which support that.
Attributes:
logger: A logger for this module.
ec_uart_pty: An opened file object to the raw EC UART PTY.
ec_uart_pty_name: A string containing the name of the raw EC UART PTY.
cmd_pipe: A multiprocessing.Connection object which represents the
Interpreter side of the command pipe. This must be a bidirectional pipe.
Commands and responses will utilize this pipe.
dbg_pipe: A multiprocessing.Connection object which represents the
Interpreter side of the debug pipe. This must be a unidirectional pipe
with write capabilities. EC debug output will utilize this pipe.
cmd_retries: An integer representing the number of attempts the console
should retry commands if it receives an error.
log_level: An integer representing the numeric value of the log level.
inputs: A list of objects that the intpreter selects for reading.
Initially, these are the EC UART and the command pipe.
outputs: A list of objects that the interpreter selects for writing.
ec_cmd_queue: A FIFO queue used for sending commands down to the EC UART.
last_cmd: A string that represents the last command sent to the EC. If an
error is encountered, the interpreter will attempt to retry this command
up to COMMAND_RETRIES.
enhanced_ec: A boolean indicating if the EC image that we are currently
communicating with is enhanced or not. Enhanced EC images will support
packed commands and host commands over the UART. This defaults to False
and is changed depending on the result of an interrogation.
interrogating: A boolean indicating if we are in the middle of interrogating
the EC.
connected: A boolean indicating if the interpreter is actually connected to
the UART and listening.
"""
def __init__(self, ec_uart_pty, cmd_pipe, dbg_pipe, log_level=logging.INFO):
"""Intializes an Interpreter object with the provided args.
Args:
ec_uart_pty: A string representing the EC UART to connect to.
cmd_pipe: A multiprocessing.Connection object which represents the
Interpreter side of the command pipe. This must be a bidirectional
pipe. Commands and responses will utilize this pipe.
dbg_pipe: A multiprocessing.Connection object which represents the
Interpreter side of the debug pipe. This must be a unidirectional pipe
with write capabilities. EC debug output will utilize this pipe.
cmd_retries: An integer representing the number of attempts the console
should retry commands if it receives an error.
log_level: An optional integer representing the numeric value of the log
level. By default, the log level will be logging.INFO (20).
"""
logger = logging.getLogger('EC3PO.Interpreter')
self.logger = LoggerAdapter(logger, {'pty': ec_uart_pty})
self.ec_uart_pty = open(ec_uart_pty, 'a+')
self.ec_uart_pty_name = ec_uart_pty
self.cmd_pipe = cmd_pipe
self.dbg_pipe = dbg_pipe
self.cmd_retries = COMMAND_RETRIES
self.log_level = log_level
self.inputs = [self.ec_uart_pty, self.cmd_pipe]
self.outputs = []
self.ec_cmd_queue = Queue.Queue()
self.last_cmd = ''
self.enhanced_ec = False
self.interrogating = False
self.connected = True
def __str__(self):
"""Show internal state of the Interpreter object.
Returns:
A string that shows the values of the attributes.
"""
string = []
string.append('%r' % self)
string.append('ec_uart_pty: %s' % self.ec_uart_pty)
string.append('cmd_pipe: %r' % self.cmd_pipe)
string.append('dbg_pipe: %r' % self.dbg_pipe)
string.append('cmd_retries: %d' % self.cmd_retries)
string.append('log_level: %d' % self.log_level)
string.append('inputs: %r' % self.inputs)
string.append('outputs: %r' % self.outputs)
string.append('ec_cmd_queue: %r' % self.ec_cmd_queue)
string.append('last_cmd: \'%s\'' % self.last_cmd)
string.append('enhanced_ec: %r' % self.enhanced_ec)
string.append('interrogating: %r' % self.interrogating)
return '\n'.join(string)
def EnqueueCmd(self, command):
"""Enqueue a command to be sent to the EC UART.
Args:
command: A string which contains the command to be sent.
"""
self.ec_cmd_queue.put(command)
self.logger.debug('Commands now in queue: %d', self.ec_cmd_queue.qsize())
# Add the EC UART as an output to be serviced.
if self.connected and self.ec_uart_pty not in self.outputs:
self.outputs.append(self.ec_uart_pty)
def PackCommand(self, raw_cmd):
r"""Packs a command for use with error checking.
For error checking, we pack console commands in a particular format. The
format is as follows:
&&[x][x][x][x]&{cmd}\n\n
^ ^ ^^ ^^ ^ ^-- 2 newlines.
| | || || |-- the raw console command.
| | || ||-- 1 ampersand.
| | ||____|--- 2 hex digits representing the CRC8 of cmd.
| |____|-- 2 hex digits reprsenting the length of cmd.
|-- 2 ampersands
Args:
raw_cmd: A pre-packed string which contains the raw command.
Returns:
A string which contains the packed command.
"""
# Don't pack a single carriage return.
if raw_cmd != '\r':
# The command format is as follows.
# &&[x][x][x][x]&{cmd}\n\n
packed_cmd = []
packed_cmd.append('&&')
# The first pair of hex digits are the length of the command.
packed_cmd.append('%02x' % len(raw_cmd))
# Then the CRC8 of cmd.
packed_cmd.append('%02x' % Crc8(raw_cmd))
packed_cmd.append('&')
# Now, the raw command followed by 2 newlines.
packed_cmd.append(raw_cmd)
packed_cmd.append('\n\n')
return ''.join(packed_cmd)
else:
return raw_cmd
def ProcessCommand(self, command):
"""Captures the input determines what actions to take.
Args:
command: A string representing the command sent by the user.
"""
if command == "disconnect":
if self.connected:
self.logger.debug('UART disconnect request.')
# Drop all pending commands if any.
while not self.ec_cmd_queue.empty():
c = self.ec_cmd_queue.get()
self.logger.debug('dropped: \'%s\'', c)
if self.enhanced_ec:
# Reset retry state.
self.cmd_retries = COMMAND_RETRIES
self.last_cmd = ''
# Get the UART that the interpreter is attached to.
fd = self.ec_uart_pty
self.logger.debug('fd: %r', fd)
# Remove the descriptor from the inputs and outputs.
self.inputs.remove(fd)
if fd in self.outputs:
self.outputs.remove(fd)
self.logger.debug('Removed fd. Remaining inputs: %r', self.inputs)
# Close the file.
fd.close()
# Mark the interpreter as disconnected now.
self.connected = False
self.logger.debug('Disconnected from %s.', self.ec_uart_pty_name)
return
elif command == "reconnect":
if not self.connected:
self.logger.debug('UART reconnect request.')
# Reopen the PTY.
fd = open(self.ec_uart_pty_name, 'a+')
self.logger.debug('fd: %r', fd)
self.ec_uart_pty = fd
# Add the descriptor to the inputs.
self.inputs.append(fd)
self.logger.debug('fd added. curr inputs: %r', self.inputs)
# Mark the interpreter as connected now.
self.connected = True
self.logger.debug('Connected to %s.', self.ec_uart_pty_name)
return
# Ignore any other commands while in the disconnected state.
self.logger.debug('command: \'%s\'', command)
if not self.connected:
self.logger.debug('Ignoring command because currently disconnected.')
return
# Remove leading and trailing spaces only if this is an enhanced EC image.
# For non-enhanced EC images, commands will be single characters at a time
# and can be spaces.
if self.enhanced_ec:
command = command.strip(' ')
# There's nothing to do if the command is empty.
if len(command) == 0:
return
# Check for interrogation command.
if command == EC_SYN:
# User is requesting interrogation. Send SYN as is.
self.logger.debug('User requesting interrogation.')
self.interrogating = True
# Assume the EC isn't enhanced until we get a response.
self.enhanced_ec = False
elif self.enhanced_ec:
# Enhanced EC images require the plaintext commands to be packed.
command = self.PackCommand(command)
# TODO(aaboagye): Make a dict of commands and keys and eventually,
# handle partial matching based on unique prefixes.
self.EnqueueCmd(command)
def HandleCmdRetries(self):
"""Attempts to retry commands if possible."""
if self.cmd_retries > 0:
# The EC encountered an error. We'll have to retry again.
self.logger.warning('Retrying command...')
self.cmd_retries -= 1
self.logger.warning('Retries remaining: %d', self.cmd_retries)
# Retry the command and add the EC UART to the writers again.
self.EnqueueCmd(self.last_cmd)
self.outputs.append(self.ec_uart_pty)
else:
# We're out of retries, so just give up.
self.logger.error('Command failed. No retries left.')
# Clear the command in progress.
self.last_cmd = ''
# Reset the retry count.
self.cmd_retries = COMMAND_RETRIES
def SendCmdToEC(self):
"""Sends a command to the EC."""
# If we're retrying a command, just try to send it again.
if self.cmd_retries < COMMAND_RETRIES:
cmd = self.last_cmd
else:
# If we're not retrying, we should not be writing to the EC if we have no
# items in our command queue.
assert not self.ec_cmd_queue.empty()
# Get the command to send.
cmd = self.ec_cmd_queue.get()
# Send the command.
self.ec_uart_pty.write(cmd)
self.ec_uart_pty.flush()
self.logger.debug('Sent command to EC.')
if self.enhanced_ec and cmd != EC_SYN:
# Now, that we've sent the command, store the current command as the last
# command sent. If we encounter an error string, we will attempt to retry
# this command.
if cmd != self.last_cmd:
self.last_cmd = cmd
# Reset the retry count.
self.cmd_retries = COMMAND_RETRIES
# If no command is pending to be sent, then we can remove the EC UART from
# writers. Might need better checking for command retry logic in here.
if self.ec_cmd_queue.empty():
# Remove the EC UART from the writers while we wait for a response.
self.logger.debug('Removing EC UART from writers.')
self.outputs.remove(self.ec_uart_pty)
def HandleECData(self):
"""Handle any debug prints from the EC."""
self.logger.debug('EC has data')
# Read what the EC sent us.
data = os.read(self.ec_uart_pty.fileno(), EC_MAX_READ)
self.logger.debug('got: \'%s\'', binascii.hexlify(data))
if '&E' in data and self.enhanced_ec:
# We received an error, so we should retry it if possible.
self.logger.warning('Error string found in data.')
self.HandleCmdRetries()
return
# If we were interrogating, check the response and update our knowledge
# of the current EC image.
if self.interrogating:
self.enhanced_ec = data == EC_ACK
if self.enhanced_ec:
self.logger.debug('The current EC image seems enhanced.')
else:
self.logger.debug('The current EC image does NOT seem enhanced.')
# Done interrogating.
self.interrogating = False
# For now, just forward everything the EC sends us.
self.logger.debug('Forwarding to user...')
self.dbg_pipe.send(data)
def HandleUserData(self):
"""Handle any incoming commands from the user."""
self.logger.debug('Command data available. Begin processing.')
data = self.cmd_pipe.recv()
# Process the command.
self.ProcessCommand(data)
def Crc8(data):
"""Calculates the CRC8 of data.
The generator polynomial used is: x^8 + x^2 + x + 1.
This is the same implementation that is used in the EC.
Args:
data: A string of data that we wish to calculate the CRC8 on.
Returns:
crc >> 8: An integer representing the CRC8 value.
"""
crc = 0
for byte in data:
crc ^= (ord(byte) << 8)
for _ in range(8):
if crc & 0x8000:
crc ^= (0x1070 << 3)
crc <<= 1
return crc >> 8
def StartLoop(interp):
"""Starts an infinite loop of servicing the user and the EC.
StartLoop checks to see if there are any commands to process, processing them
if any, and forwards EC output to the user.
When sending a command to the EC, we send the command once and check the
response to see if the EC encountered an error when receiving the command. An
error condition is reported to the interpreter by a string with at least one
'&' and 'E'. The full string is actually '&&EE', however it's possible that
the leading ampersand or trailing 'E' could be dropped. If an error is
encountered, the interpreter will retry up to the amount configured.
Args:
interp: An Interpreter object that has been properly initialised.
"""
try:
while True:
readable, writeable, _ = select.select(interp.inputs, interp.outputs, [])
for obj in readable:
# Handle any debug prints from the EC.
if obj is interp.ec_uart_pty:
interp.HandleECData()
# Handle any commands from the user.
elif obj is interp.cmd_pipe:
interp.HandleUserData()
for obj in writeable:
# Send a command to the EC.
if obj is interp.ec_uart_pty:
interp.SendCmdToEC()
finally:
# Close pipes.
interp.cmd_pipe.close()
interp.dbg_pipe.close()
# Close file descriptor.
interp.ec_uart_pty.close()
# Exit.
sys.exit(0)
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.