INSTRUCTION
stringlengths 1
46.3k
| RESPONSE
stringlengths 75
80.2k
|
---|---|
Convert a single page LaTeX document into an image.
To display the returned image, `img.show()`
Required external dependencies: `pdflatex` (with `qcircuit` package),
and `poppler` (for `pdftocairo`).
Args:
A LaTeX document as a string.
Returns:
A PIL Image
Raises:
OSError: If an external dependency is not installed.
|
def render_latex(latex: str) -> PIL.Image: # pragma: no cover
"""
Convert a single page LaTeX document into an image.
To display the returned image, `img.show()`
Required external dependencies: `pdflatex` (with `qcircuit` package),
and `poppler` (for `pdftocairo`).
Args:
A LaTeX document as a string.
Returns:
A PIL Image
Raises:
OSError: If an external dependency is not installed.
"""
tmpfilename = 'circ'
with tempfile.TemporaryDirectory() as tmpdirname:
tmppath = os.path.join(tmpdirname, tmpfilename)
with open(tmppath + '.tex', 'w') as latex_file:
latex_file.write(latex)
subprocess.run(["pdflatex",
"-halt-on-error",
"-output-directory={}".format(tmpdirname),
"{}".format(tmpfilename+'.tex')],
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL,
check=True)
subprocess.run(['pdftocairo',
'-singlefile',
'-png',
'-q',
tmppath + '.pdf',
tmppath])
img = PIL.Image.open(tmppath + '.png')
return img
|
Create an image of a quantum circuit.
A convenience function that calls circuit_to_latex() and render_latex().
Args:
circ: A quantum Circuit
qubits: Optional qubit list to specify qubit order
Returns:
Returns: A PIL Image (Use img.show() to display)
Raises:
NotImplementedError: For unsupported gates.
OSError: If an external dependency is not installed.
|
def circuit_to_image(circ: Circuit,
qubits: Qubits = None) -> PIL.Image: # pragma: no cover
"""Create an image of a quantum circuit.
A convenience function that calls circuit_to_latex() and render_latex().
Args:
circ: A quantum Circuit
qubits: Optional qubit list to specify qubit order
Returns:
Returns: A PIL Image (Use img.show() to display)
Raises:
NotImplementedError: For unsupported gates.
OSError: If an external dependency is not installed.
"""
latex = circuit_to_latex(circ, qubits)
img = render_latex(latex)
return img
|
Format an object as a latex string.
|
def _latex_format(obj: Any) -> str:
"""Format an object as a latex string."""
if isinstance(obj, float):
try:
return sympy.latex(symbolize(obj))
except ValueError:
return "{0:.4g}".format(obj)
return str(obj)
|
Tensorflow eager mode example. Given an arbitrary one-qubit gate, use
gradient descent to find corresponding parameters of a universal ZYZ
gate.
|
def fit_zyz(target_gate):
"""
Tensorflow eager mode example. Given an arbitrary one-qubit gate, use
gradient descent to find corresponding parameters of a universal ZYZ
gate.
"""
assert bk.BACKEND == 'eager'
tf = bk.TL
tfe = bk.tfe
steps = 4000
dev = '/gpu:0' if bk.DEVICE == 'gpu' else '/cpu:0'
with tf.device(dev):
t = tfe.Variable(np.random.normal(size=[3]), name='t')
def loss_fn():
"""Loss"""
gate = qf.ZYZ(t[0], t[1], t[2])
ang = qf.fubini_study_angle(target_gate.vec, gate.vec)
return ang
loss_and_grads = tfe.implicit_value_and_gradients(loss_fn)
# opt = tf.train.GradientDescentOptimizer(learning_rate=0.005)
opt = tf.train.AdamOptimizer(learning_rate=0.001)
# train = opt.minimize(ang, var_list=[t])
for step in range(steps):
loss, grads_and_vars = loss_and_grads()
sys.stdout.write('\r')
sys.stdout.write("step: {:3d} loss: {:10.9f}".format(step,
loss.numpy()))
if loss < 0.0001:
break
opt.apply_gradients(grads_and_vars)
print()
return bk.evaluate(t)
|
Print version strings of currently installed dependencies
``> python -m quantumflow.meta``
Args:
file: Output stream. Defaults to stdout.
|
def print_versions(file: typing.TextIO = None) -> None:
"""
Print version strings of currently installed dependencies
``> python -m quantumflow.meta``
Args:
file: Output stream. Defaults to stdout.
"""
print('** QuantumFlow dependencies (> python -m quantumflow.meta) **')
print('quantumflow \t', qf.__version__, file=file)
print('python \t', sys.version[0:5], file=file)
print('numpy \t', np.__version__, file=file)
print('networkx \t', nx.__version__, file=file)
print('cvxpy \t', cvx.__version__, file=file)
print('pyquil \t', pyquil.__version__, file=file)
print(bk.name, ' \t', bk.version, '(BACKEND)', file=file)
|
Tensorflow example. Given an arbitrary one-qubit gate, use gradient
descent to find corresponding parameters of a universal ZYZ gate.
|
def fit_zyz(target_gate):
"""
Tensorflow example. Given an arbitrary one-qubit gate, use gradient
descent to find corresponding parameters of a universal ZYZ gate.
"""
assert bk.BACKEND == 'tensorflow'
tf = bk.TL
steps = 4000
t = tf.get_variable('t', [3])
gate = qf.ZYZ(t[0], t[1], t[2])
ang = qf.fubini_study_angle(target_gate.vec, gate.vec)
opt = tf.train.AdamOptimizer(learning_rate=0.001)
train = opt.minimize(ang, var_list=[t])
with tf.Session() as sess:
init_op = tf.global_variables_initializer()
sess.run(init_op)
for step in range(steps):
sess.run(train)
loss = sess.run(ang)
sys.stdout.write('\r')
sys.stdout.write("step: {} gate_angle: {}".format(step, loss))
if loss < 0.0001:
break
print()
return sess.run(t)
|
Prepare a 4-qubit W state using sqrt(iswaps) and local gates
|
def prepare_w4():
"""
Prepare a 4-qubit W state using sqrt(iswaps) and local gates
"""
circ = qf.Circuit()
circ += qf.X(1)
circ += qf.ISWAP(1, 2) ** 0.5
circ += qf.S(2)
circ += qf.Z(2)
circ += qf.ISWAP(2, 3) ** 0.5
circ += qf.S(3)
circ += qf.Z(3)
circ += qf.ISWAP(0, 1) ** 0.5
circ += qf.S(0)
circ += qf.Z(0)
ket = circ.run()
return ket
|
Converts a 1-qubit gate into a RN gate, a 1-qubit rotation of angle theta
about axis (nx, ny, nz) in the Bloch sphere.
Returns:
A Circuit containing a single RN gate
|
def bloch_decomposition(gate: Gate) -> Circuit:
"""
Converts a 1-qubit gate into a RN gate, a 1-qubit rotation of angle theta
about axis (nx, ny, nz) in the Bloch sphere.
Returns:
A Circuit containing a single RN gate
"""
if gate.qubit_nb != 1:
raise ValueError('Expected 1-qubit gate')
U = asarray(gate.asoperator())
U /= np.linalg.det(U) ** (1/2)
nx = - U[0, 1].imag
ny = - U[0, 1].real
nz = - U[0, 0].imag
N = np.sqrt(nx**2 + ny**2 + nz**2)
if N == 0: # Identity
nx, ny, nz = 1, 1, 1
else:
nx /= N
ny /= N
nz /= N
sin_halftheta = N
cos_halftheta = U[0, 0].real
theta = 2 * np.arctan2(sin_halftheta, cos_halftheta)
# We return a Circuit (rather than just a gate) to keep the
# interface of decomposition routines uniform.
return Circuit([RN(theta, nx, ny, nz, *gate.qubits)])
|
Returns the Euler Z-Y-Z decomposition of a local 1-qubit gate.
|
def zyz_decomposition(gate: Gate) -> Circuit:
"""
Returns the Euler Z-Y-Z decomposition of a local 1-qubit gate.
"""
if gate.qubit_nb != 1:
raise ValueError('Expected 1-qubit gate')
q, = gate.qubits
U = asarray(gate.asoperator())
U /= np.linalg.det(U) ** (1/2) # SU(2)
if abs(U[0, 0]) > abs(U[1, 0]):
theta1 = 2 * np.arccos(min(abs(U[0, 0]), 1))
else:
theta1 = 2 * np.arcsin(min(abs(U[1, 0]), 1))
cos_halftheta1 = np.cos(theta1/2)
if not np.isclose(cos_halftheta1, 0.0):
phase = U[1, 1] / cos_halftheta1
theta0_plus_theta2 = 2 * np.arctan2(np.imag(phase), np.real(phase))
else:
theta0_plus_theta2 = 0.0
sin_halftheta1 = np.sin(theta1/2)
if not np.isclose(sin_halftheta1, 0.0):
phase = U[1, 0] / sin_halftheta1
theta0_sub_theta2 = 2 * np.arctan2(np.imag(phase), np.real(phase))
else:
theta0_sub_theta2 = 0.0
theta0 = (theta0_plus_theta2 + theta0_sub_theta2) / 2
theta2 = (theta0_plus_theta2 - theta0_sub_theta2) / 2
t0 = theta0/np.pi
t1 = theta1/np.pi
t2 = theta2/np.pi
circ1 = Circuit()
circ1 += TZ(t2, q)
circ1 += TY(t1, q)
circ1 += TZ(t0, q)
return circ1
|
Decompose a 2-qubit unitary composed of two 1-qubit local gates.
Uses the "Nearest Kronecker Product" algorithm. Will give erratic
results if the gate is not the direct product of two 1-qubit gates.
|
def kronecker_decomposition(gate: Gate) -> Circuit:
"""
Decompose a 2-qubit unitary composed of two 1-qubit local gates.
Uses the "Nearest Kronecker Product" algorithm. Will give erratic
results if the gate is not the direct product of two 1-qubit gates.
"""
# An alternative approach would be to take partial traces, but
# this approach appears to be more robust.
if gate.qubit_nb != 2:
raise ValueError('Expected 2-qubit gate')
U = asarray(gate.asoperator())
rank = 2**gate.qubit_nb
U /= np.linalg.det(U) ** (1/rank)
R = np.stack([U[0:2, 0:2].reshape(4),
U[0:2, 2:4].reshape(4),
U[2:4, 0:2].reshape(4),
U[2:4, 2:4].reshape(4)])
u, s, vh = np.linalg.svd(R)
v = vh.transpose()
A = (np.sqrt(s[0]) * u[:, 0]).reshape(2, 2)
B = (np.sqrt(s[0]) * v[:, 0]).reshape(2, 2)
q0, q1 = gate.qubits
g0 = Gate(A, qubits=[q0])
g1 = Gate(B, qubits=[q1])
if not gates_close(gate, Circuit([g0, g1]).asgate()):
raise ValueError("Gate cannot be decomposed into two 1-qubit gates")
circ = Circuit()
circ += zyz_decomposition(g0)
circ += zyz_decomposition(g1)
assert gates_close(gate, circ.asgate()) # Sanity check
return circ
|
Returns the canonical coordinates of a 2-qubit gate
|
def canonical_coords(gate: Gate) -> Sequence[float]:
"""Returns the canonical coordinates of a 2-qubit gate"""
circ = canonical_decomposition(gate)
gate = circ.elements[6] # type: ignore
params = [gate.params[key] for key in ('tx', 'ty', 'tz')]
return params
|
Decompose a 2-qubit gate by removing local 1-qubit gates to leave
the non-local canonical two-qubit gate. [1]_ [2]_ [3]_ [4]_
Returns: A Circuit of 5 gates: two initial 1-qubit gates; a CANONICAL
gate, with coordinates in the Weyl chamber; two final 1-qubit gates
The canonical coordinates can be found in circ.elements[2].params
More or less follows the algorithm outlined in [2]_.
.. [1] A geometric theory of non-local two-qubit operations, J. Zhang,
J. Vala, K. B. Whaley, S. Sastry quant-ph/0291120
.. [2] An analytical decomposition protocol for optimal implementation of
two-qubit entangling gates. M. Blaauboer, R.L. de Visser,
cond-mat/0609750
.. [3] Metric structure of two-qubit gates, perfect entangles and quantum
control, P. Watts, M. O'Conner, J. Vala, Entropy (2013)
.. [4] Constructive Quantum Shannon Decomposition from Cartan Involutions
B. Drury, P. Love, arXiv:0806.4015
|
def canonical_decomposition(gate: Gate) -> Circuit:
"""Decompose a 2-qubit gate by removing local 1-qubit gates to leave
the non-local canonical two-qubit gate. [1]_ [2]_ [3]_ [4]_
Returns: A Circuit of 5 gates: two initial 1-qubit gates; a CANONICAL
gate, with coordinates in the Weyl chamber; two final 1-qubit gates
The canonical coordinates can be found in circ.elements[2].params
More or less follows the algorithm outlined in [2]_.
.. [1] A geometric theory of non-local two-qubit operations, J. Zhang,
J. Vala, K. B. Whaley, S. Sastry quant-ph/0291120
.. [2] An analytical decomposition protocol for optimal implementation of
two-qubit entangling gates. M. Blaauboer, R.L. de Visser,
cond-mat/0609750
.. [3] Metric structure of two-qubit gates, perfect entangles and quantum
control, P. Watts, M. O'Conner, J. Vala, Entropy (2013)
.. [4] Constructive Quantum Shannon Decomposition from Cartan Involutions
B. Drury, P. Love, arXiv:0806.4015
"""
# Implementation note: The canonical decomposition is easy. Constraining
# canonical coordinates to the Weyl chamber is easy. But doing the
# canonical decomposition with the canonical gate in the Weyl chamber
# proved to be surprisingly tricky.
# Unitary transform to Magic Basis of Bell states
Q = np.asarray([[1, 0, 0, 1j],
[0, 1j, 1, 0],
[0, 1j, -1, 0],
[1, 0, 0, -1j]]) / np.sqrt(2)
Q_H = Q.conj().T
if gate.qubit_nb != 2:
raise ValueError('Expected 2-qubit gate')
U = asarray(gate.asoperator())
rank = 2**gate.qubit_nb
U /= np.linalg.det(U) ** (1/rank) # U is in SU(4) so det U = 1
U_mb = Q_H @ U @ Q # Transform gate to Magic Basis [1, (eq. 17, 18)]
M = U_mb.transpose() @ U_mb # Construct M matrix [1, (eq. 22)]
# Diagonalize symmetric complex matrix
eigvals, eigvecs = _eig_complex_symmetric(M)
lambdas = np.sqrt(eigvals) # Eigenvalues of F
# Lambdas only fixed up to a sign. So make sure det F = 1 as it should
det_F = np.prod(lambdas)
if det_F.real < 0:
lambdas[0] *= -1
coords, signs, perm = _constrain_to_weyl(lambdas)
# Construct local and canonical gates in magic basis
lambdas = (lambdas*signs)[perm]
O2 = (np.diag(signs) @ eigvecs.transpose())[perm]
F = np.diag(lambdas)
O1 = U_mb @ O2.transpose() @ F.conj()
# Sanity check: Make sure O1 and O2 are orthogonal
assert np.allclose(np.eye(4), O2.transpose() @ O2) # Sanity check
assert np.allclose(np.eye(4), O1.transpose() @ O1) # Sanity check
# Sometimes O1 & O2 end up with det = -1, instead of +1 as they should.
# We can commute a diagonal matrix through F to fix this up.
neg = np.diag([-1, 1, 1, 1])
if np.linalg.det(O2).real < 0:
O2 = neg @ O2
O1 = O1 @ neg
# Transform gates back from magic basis
K1 = Q @ O1 @ Q_H
A = Q @ F @ Q_H
K2 = Q @ O2 @ Q_H
assert gates_close(Gate(U), Gate(K1 @ A @ K2)) # Sanity check
canon = CANONICAL(coords[0], coords[1], coords[2], 0, 1)
# Sanity check
assert gates_close(Gate(A, qubits=gate.qubits), canon, tolerance=1e-4)
# Decompose local gates into the two component 1-qubit gates
gateK1 = Gate(K1, qubits=gate.qubits)
circK1 = kronecker_decomposition(gateK1)
assert gates_close(gateK1, circK1.asgate()) # Sanity check
gateK2 = Gate(K2, qubits=gate.qubits)
circK2 = kronecker_decomposition(gateK2)
assert gates_close(gateK2, circK2.asgate()) # Sanity check
# Build and return circuit
circ = Circuit()
circ += circK2
circ += canon
circ += circK1
return circ
|
Diagonalize a complex symmetric matrix. The eigenvalues are
complex, and the eigenvectors form an orthogonal matrix.
Returns:
eigenvalues, eigenvectors
|
def _eig_complex_symmetric(M: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Diagonalize a complex symmetric matrix. The eigenvalues are
complex, and the eigenvectors form an orthogonal matrix.
Returns:
eigenvalues, eigenvectors
"""
if not np.allclose(M, M.transpose()):
raise np.linalg.LinAlgError('Not a symmetric matrix')
# The matrix of eigenvectors should be orthogonal.
# But the standard 'eig' method will fail to return an orthogonal
# eigenvector matrix when the eigenvalues are degenerate. However,
# both the real and
# imaginary part of M must be symmetric with the same orthogonal
# matrix of eigenvectors. But either the real or imaginary part could
# vanish. So we use a randomized algorithm where we diagonalize a
# random linear combination of real and imaginary parts to find the
# eigenvectors, taking advantage of the 'eigh' subroutine for
# diagonalizing symmetric matrices.
# This can fail if we're very unlucky with our random coefficient, so we
# give the algorithm a few chances to succeed.
# Empirically, never seems to fail on randomly sampled complex
# symmetric 4x4 matrices.
# If failure rate is less than 1 in a million, then 16 rounds
# will have overall failure rate less than 1 in a googol.
# However, cannot (yet) guarantee that there aren't special cases
# which have much higher failure rates.
# GEC 2018
max_attempts = 16
for _ in range(max_attempts):
c = np.random.uniform(0, 1)
matrix = c * M.real + (1-c) * M.imag
_, eigvecs = np.linalg.eigh(matrix)
eigvecs = np.array(eigvecs, dtype=complex)
eigvals = np.diag(eigvecs.transpose() @ M @ eigvecs)
# Finish if we got a correct answer.
reconstructed = eigvecs @ np.diag(eigvals) @ eigvecs.transpose()
if np.allclose(M, reconstructed):
return eigvals, eigvecs
# Should never happen. Hopefully.
raise np.linalg.LinAlgError(
'Cannot diagonalize complex symmetric matrix.')
|
QAOA Maxcut using tensorflow
|
def maxcut_qaoa(
graph,
steps=DEFAULT_STEPS,
learning_rate=LEARNING_RATE,
verbose=False):
"""QAOA Maxcut using tensorflow"""
if not isinstance(graph, nx.Graph):
graph = nx.from_edgelist(graph)
init_scale = 0.01
init_bias = 0.5
init_beta = normal(loc=init_bias, scale=init_scale, size=[steps])
init_gamma = normal(loc=init_bias, scale=init_scale, size=[steps])
beta = tf.get_variable('beta', initializer=init_beta)
gamma = tf.get_variable('gamma', initializer=init_gamma)
circ = qubo_circuit(graph, steps, beta, gamma)
cuts = graph_cuts(graph)
maxcut = cuts.max()
expect = circ.run().expectation(cuts)
loss = - expect
# === Optimization ===
opt = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
train = opt.minimize(loss, var_list=[beta, gamma])
with tf.Session() as sess:
init_op = tf.global_variables_initializer()
sess.run(init_op)
block = 10
min_difference = 0.0001
last_ratio = -1
for step in range(0, MAX_OPT_STEPS, block):
for _ in range(block):
sess.run(train)
ratio = sess.run(expect) / maxcut
if ratio - last_ratio < min_difference:
break
last_ratio = ratio
if verbose:
print("# step: {} ratio: {:.4f}%".format(step, ratio))
opt_beta = sess.run(beta)
opt_gamma = sess.run(gamma)
return ratio, opt_beta, opt_gamma
|
Returns the K-qubit identity gate
|
def identity_gate(qubits: Union[int, Qubits]) -> Gate:
"""Returns the K-qubit identity gate"""
_, qubits = qubits_count_tuple(qubits)
return I(*qubits)
|
Direct product of two gates. Qubit count is the sum of each gate's
bit count.
|
def join_gates(*gates: Gate) -> Gate:
"""Direct product of two gates. Qubit count is the sum of each gate's
bit count."""
vectors = [gate.vec for gate in gates]
vec = reduce(outer_product, vectors)
return Gate(vec.tensor, vec.qubits)
|
Return a controlled unitary gate. Given a gate acting on K qubits,
return a new gate on K+1 qubits prepended with a control bit.
|
def control_gate(control: Qubit, gate: Gate) -> Gate:
"""Return a controlled unitary gate. Given a gate acting on K qubits,
return a new gate on K+1 qubits prepended with a control bit. """
if control in gate.qubits:
raise ValueError('Gate and control qubits overlap')
qubits = [control, *gate.qubits]
gate_tensor = join_gates(P0(control), identity_gate(gate.qubits)).tensor \
+ join_gates(P1(control), gate).tensor
controlled_gate = Gate(qubits=qubits, tensor=gate_tensor)
return controlled_gate
|
Return a conditional unitary gate. Do gate0 on bit 1 if bit 0 is zero,
else do gate1 on 1
|
def conditional_gate(control: Qubit, gate0: Gate, gate1: Gate) -> Gate:
"""Return a conditional unitary gate. Do gate0 on bit 1 if bit 0 is zero,
else do gate1 on 1"""
assert gate0.qubits == gate1.qubits # FIXME
tensor = join_gates(P0(control), gate0).tensor
tensor += join_gates(P1(control), gate1).tensor
gate = Gate(tensor=tensor, qubits=[control, *gate0.qubits])
return gate
|
Return true if gate tensor is (almost) unitary
|
def almost_unitary(gate: Gate) -> bool:
"""Return true if gate tensor is (almost) unitary"""
res = (gate @ gate.H).asoperator()
N = gate.qubit_nb
return np.allclose(asarray(res), np.eye(2**N), atol=TOLERANCE)
|
Return true if gate tensor is (almost) the identity
|
def almost_identity(gate: Gate) -> bool:
"""Return true if gate tensor is (almost) the identity"""
N = gate.qubit_nb
return np.allclose(asarray(gate.asoperator()), np.eye(2**N))
|
Return true if gate tensor is (almost) Hermitian
|
def almost_hermitian(gate: Gate) -> bool:
"""Return true if gate tensor is (almost) Hermitian"""
return np.allclose(asarray(gate.asoperator()),
asarray(gate.H.asoperator()))
|
Pretty print a gate tensor
Args:
gate:
ndigits:
file: Stream to which to write. Defaults to stdout
|
def print_gate(gate: Gate, ndigits: int = 2,
file: TextIO = None) -> None:
"""Pretty print a gate tensor
Args:
gate:
ndigits:
file: Stream to which to write. Defaults to stdout
"""
N = gate.qubit_nb
gate_tensor = gate.vec.asarray()
lines = []
for index, amplitude in np.ndenumerate(gate_tensor):
ket = "".join([str(n) for n in index[0:N]])
bra = "".join([str(index[n]) for n in range(N, 2*N)])
if round(abs(amplitude)**2, ndigits) > 0.0:
lines.append('{} -> {} : {}'.format(bra, ket, amplitude))
lines.sort(key=lambda x: int(x[0:N]))
print('\n'.join(lines), file=file)
|
r"""Returns a random unitary gate on K qubits.
Ref:
"How to generate random matrices from the classical compact groups"
Francesco Mezzadri, math-ph/0609050
|
def random_gate(qubits: Union[int, Qubits]) -> Gate:
r"""Returns a random unitary gate on K qubits.
Ref:
"How to generate random matrices from the classical compact groups"
Francesco Mezzadri, math-ph/0609050
"""
N, qubits = qubits_count_tuple(qubits)
unitary = scipy.stats.unitary_group.rvs(2**N)
return Gate(unitary, qubits=qubits, name='RAND{}'.format(N))
|
Prepare a 16-qubit W state using sqrt(iswaps) and local gates,
respecting linear topology
|
def prepare_w16():
"""
Prepare a 16-qubit W state using sqrt(iswaps) and local gates,
respecting linear topology
"""
ket = qf.zero_state(16)
circ = w16_circuit()
ket = circ.run(ket)
return ket
|
Return a circuit that prepares the the 16-qubit W state using\
sqrt(iswaps) and local gates, respecting linear topology
|
def w16_circuit() -> qf.Circuit:
"""
Return a circuit that prepares the the 16-qubit W state using\
sqrt(iswaps) and local gates, respecting linear topology
"""
gates = [
qf.X(7),
qf.ISWAP(7, 8) ** 0.5,
qf.S(8),
qf.Z(8),
qf.SWAP(7, 6),
qf.SWAP(6, 5),
qf.SWAP(5, 4),
qf.SWAP(8, 9),
qf.SWAP(9, 10),
qf.SWAP(10, 11),
qf.ISWAP(4, 3) ** 0.5,
qf.S(3),
qf.Z(3),
qf.ISWAP(11, 12) ** 0.5,
qf.S(12),
qf.Z(12),
qf.SWAP(3, 2),
qf.SWAP(4, 5),
qf.SWAP(11, 10),
qf.SWAP(12, 13),
qf.ISWAP(2, 1) ** 0.5,
qf.S(1),
qf.Z(1),
qf.ISWAP(5, 6) ** 0.5,
qf.S(6),
qf.Z(6),
qf.ISWAP(10, 9) ** 0.5,
qf.S(9),
qf.Z(9),
qf.ISWAP(13, 14) ** 0.5,
qf.S(14),
qf.Z(14),
qf.ISWAP(1, 0) ** 0.5,
qf.S(0),
qf.Z(0),
qf.ISWAP(2, 3) ** 0.5,
qf.S(3),
qf.Z(3),
qf.ISWAP(5, 4) ** 0.5,
qf.S(4),
qf.Z(4),
qf.ISWAP(6, 7) ** 0.5,
qf.S(7),
qf.Z(7),
qf.ISWAP(9, 8) ** 0.5,
qf.S(8),
qf.Z(8),
qf.ISWAP(10, 11) ** 0.5,
qf.S(11),
qf.Z(11),
qf.ISWAP(13, 12) ** 0.5,
qf.S(12),
qf.Z(12),
qf.ISWAP(14, 15) ** 0.5,
qf.S(15),
qf.Z(15),
]
circ = qf.Circuit(gates)
return circ
|
A context manager to redirect stdout and/or stderr to /dev/null.
Examples:
with muted(sys.stdout):
...
with muted(sys.stderr):
...
with muted(sys.stdout, sys.stderr):
...
|
def muted(*streams):
"""A context manager to redirect stdout and/or stderr to /dev/null.
Examples:
with muted(sys.stdout):
...
with muted(sys.stderr):
...
with muted(sys.stdout, sys.stderr):
...
"""
devnull = open(os.devnull, 'w')
try:
old_streams = [os.dup(s.fileno()) for s in streams]
for s in streams:
os.dup2(devnull.fileno(), s.fileno())
yield
finally:
for o,n in zip(old_streams, streams):
os.dup2(o, n.fileno())
devnull.close()
|
Checks if a given functions exists in the current platform.
|
def has_function(function_name, libraries=None):
"""Checks if a given functions exists in the current platform."""
compiler = distutils.ccompiler.new_compiler()
with muted(sys.stdout, sys.stderr):
result = compiler.has_function(
function_name, libraries=libraries)
if os.path.exists('a.out'):
os.remove('a.out')
return result
|
Execute the build command.
|
def run(self):
"""Execute the build command."""
module = self.distribution.ext_modules[0]
base_dir = os.path.dirname(__file__)
if base_dir:
os.chdir(base_dir)
exclusions = []
for define in self.define or []:
module.define_macros.append(define)
for library in self.libraries or []:
module.libraries.append(library)
building_for_windows = self.plat_name in ('win32','win-amd64')
building_for_osx = 'macosx' in self.plat_name
building_for_linux = 'linux' in self.plat_name
building_for_freebsd = 'freebsd' in self.plat_name
building_for_openbsd = 'openbsd' in self.plat_name # need testing
if building_for_linux:
module.define_macros.append(('USE_LINUX_PROC', '1'))
elif building_for_windows:
module.define_macros.append(('USE_WINDOWS_PROC', '1'))
module.define_macros.append(('_CRT_SECURE_NO_WARNINGS', '1'))
module.libraries.append('kernel32')
module.libraries.append('advapi32')
module.libraries.append('user32')
module.libraries.append('crypt32')
module.libraries.append('ws2_32')
elif building_for_osx:
module.define_macros.append(('USE_MACH_PROC', '1'))
module.include_dirs.append('/usr/local/opt/openssl/include')
module.include_dirs.append('/opt/local/include')
module.library_dirs.append('/opt/local/lib')
module.include_dirs.append('/usr/local/include')
module.library_dirs.append('/usr/local/lib')
elif building_for_freebsd:
module.define_macros.append(('USE_FREEBSD_PROC', '1'))
module.include_dirs.append('/opt/local/include')
module.library_dirs.append('/opt/local/lib')
module.include_dirs.append('/usr/local/include')
module.library_dirs.append('/usr/local/lib')
elif building_for_openbsd:
module.define_macros.append(('USE_OPENBSD_PROC', '1'))
module.include_dirs.append('/opt/local/include')
module.library_dirs.append('/opt/local/lib')
module.include_dirs.append('/usr/local/include')
module.library_dirs.append('/usr/local/lib')
else:
module.define_macros.append(('USE_NO_PROC', '1'))
if has_function('memmem'):
module.define_macros.append(('HAVE_MEMMEM', '1'))
if has_function('strlcpy'):
module.define_macros.append(('HAVE_STRLCPY', '1'))
if has_function('strlcat'):
module.define_macros.append(('HAVE_STRLCAT', '1'))
if self.enable_profiling:
module.define_macros.append(('PROFILING_ENABLED', '1'))
if self.dynamic_linking:
module.libraries.append('yara')
else:
if not self.define or not ('HASH_MODULE', '1') in self.define:
if (has_function('MD5_Init', libraries=['crypto']) and
has_function('SHA256_Init', libraries=['crypto'])):
module.define_macros.append(('HASH_MODULE', '1'))
module.define_macros.append(('HAVE_LIBCRYPTO', '1'))
module.libraries.append('crypto')
else:
exclusions.append('yara/libyara/modules/hash.c')
if self.enable_magic:
module.define_macros.append(('MAGIC_MODULE', '1'))
module.libraries.append('magic')
else:
exclusions.append('yara/libyara/modules/magic.c')
if self.enable_cuckoo:
module.define_macros.append(('CUCKOO_MODULE', '1'))
module.libraries.append('jansson')
else:
exclusions.append('yara/libyara/modules/cuckoo.c')
if self.enable_dotnet:
module.define_macros.append(('DOTNET_MODULE', '1'))
else:
exclusions.append('yara/libyara/modules/dotnet.c')
if self.enable_dex:
module.define_macros.append(('DEX_MODULE', '1'))
else:
exclusions.append('yara/libyara/modules/dex.c')
if self.enable_macho:
module.define_macros.append(('MACHO_MODULE', '1'))
else:
exclusions.append('yara/libyara/modules/macho.c')
exclusions = [os.path.normpath(x) for x in exclusions]
for directory, _, files in os.walk('yara/libyara/'):
for x in files:
x = os.path.normpath(os.path.join(directory, x))
if x.endswith('.c') and x not in exclusions:
module.sources.append(x)
build_ext.run(self)
|
Dispatch messages received from agents to the right handlers
|
async def handle_agent_message(self, agent_addr, message):
"""Dispatch messages received from agents to the right handlers"""
message_handlers = {
AgentHello: self.handle_agent_hello,
AgentJobStarted: self.handle_agent_job_started,
AgentJobDone: self.handle_agent_job_done,
AgentJobSSHDebug: self.handle_agent_job_ssh_debug,
Pong: self._handle_pong
}
try:
func = message_handlers[message.__class__]
except:
raise TypeError("Unknown message type %s" % message.__class__)
self._create_safe_task(func(agent_addr, message))
|
Dispatch messages received from clients to the right handlers
|
async def handle_client_message(self, client_addr, message):
"""Dispatch messages received from clients to the right handlers"""
# Verify that the client is registered
if message.__class__ != ClientHello and client_addr not in self._registered_clients:
await ZMQUtils.send_with_addr(self._client_socket, client_addr, Unknown())
return
message_handlers = {
ClientHello: self.handle_client_hello,
ClientNewJob: self.handle_client_new_job,
ClientKillJob: self.handle_client_kill_job,
ClientGetQueue: self.handle_client_get_queue,
Ping: self.handle_client_ping
}
try:
func = message_handlers[message.__class__]
except:
raise TypeError("Unknown message type %s" % message.__class__)
self._create_safe_task(func(client_addr, message))
|
:param client_addrs: list of clients to which we should send the update
|
async def send_container_update_to_client(self, client_addrs):
""" :param client_addrs: list of clients to which we should send the update """
self._logger.debug("Sending containers updates...")
available_containers = tuple(self._containers.keys())
msg = BackendUpdateContainers(available_containers)
for client in client_addrs:
await ZMQUtils.send_with_addr(self._client_socket, client, msg)
|
Handle an ClientHello message. Send available containers to the client
|
async def handle_client_hello(self, client_addr, _: ClientHello):
""" Handle an ClientHello message. Send available containers to the client """
self._logger.info("New client connected %s", client_addr)
self._registered_clients.add(client_addr)
await self.send_container_update_to_client([client_addr])
|
Handle an Ping message. Pong the client
|
async def handle_client_ping(self, client_addr, _: Ping):
""" Handle an Ping message. Pong the client """
await ZMQUtils.send_with_addr(self._client_socket, client_addr, Pong())
|
Handle an ClientNewJob message. Add a job to the queue and triggers an update
|
async def handle_client_new_job(self, client_addr, message: ClientNewJob):
""" Handle an ClientNewJob message. Add a job to the queue and triggers an update """
self._logger.info("Adding a new job %s %s to the queue", client_addr, message.job_id)
self._waiting_jobs[(client_addr, message.job_id)] = message
await self.update_queue()
|
Handle an ClientKillJob message. Remove a job from the waiting list or send the kill message to the right agent.
|
async def handle_client_kill_job(self, client_addr, message: ClientKillJob):
""" Handle an ClientKillJob message. Remove a job from the waiting list or send the kill message to the right agent. """
# Check if the job is not in the queue
if (client_addr, message.job_id) in self._waiting_jobs:
del self._waiting_jobs[(client_addr, message.job_id)]
# Do not forget to send a JobDone
await ZMQUtils.send_with_addr(self._client_socket, client_addr, BackendJobDone(message.job_id, ("killed", "You killed the job"),
0.0, {}, {}, {}, "", None, "", ""))
# If the job is running, transmit the info to the agent
elif (client_addr, message.job_id) in self._job_running:
agent_addr = self._job_running[(client_addr, message.job_id)][0]
await ZMQUtils.send_with_addr(self._agent_socket, agent_addr, BackendKillJob((client_addr, message.job_id)))
else:
self._logger.warning("Client %s attempted to kill unknown job %s", str(client_addr), str(message.job_id))
|
Handles a ClientGetQueue message. Send back info about the job queue
|
async def handle_client_get_queue(self, client_addr, _: ClientGetQueue):
""" Handles a ClientGetQueue message. Send back info about the job queue"""
#jobs_running: a list of tuples in the form
#(job_id, is_current_client_job, agent_name, info, launcher, started_at, max_end)
jobs_running = list()
for backend_job_id, content in self._job_running.items():
jobs_running.append((content[1].job_id, backend_job_id[0] == client_addr, self._registered_agents[content[0]],
content[1].course_id+"/"+content[1].task_id,
content[1].launcher, int(content[2]), int(content[2])+content[1].time_limit))
#jobs_waiting: a list of tuples in the form
#(job_id, is_current_client_job, info, launcher, max_time)
jobs_waiting = list()
for job_client_addr, msg in self._waiting_jobs.items():
if isinstance(msg, ClientNewJob):
jobs_waiting.append((msg.job_id, job_client_addr[0] == client_addr, msg.course_id+"/"+msg.task_id, msg.launcher,
msg.time_limit))
await ZMQUtils.send_with_addr(self._client_socket, client_addr, BackendGetQueue(jobs_running, jobs_waiting))
|
Send waiting jobs to available agents
|
async def update_queue(self):
"""
Send waiting jobs to available agents
"""
# For now, round-robin
not_found_for_agent = []
while len(self._available_agents) > 0 and len(self._waiting_jobs) > 0:
agent_addr = self._available_agents.pop(0)
# Find first job that can be run on this agent
found = False
client_addr, job_id, job_msg = None, None, None
for (client_addr, job_id), job_msg in self._waiting_jobs.items():
if job_msg.environment in self._containers_on_agent[agent_addr]:
found = True
break
if not found:
self._logger.debug("Nothing to do for agent %s", agent_addr)
not_found_for_agent.append(agent_addr)
continue
# Remove the job from the queue
del self._waiting_jobs[(client_addr, job_id)]
job_id = (client_addr, job_msg.job_id)
self._job_running[job_id] = (agent_addr, job_msg, time.time())
self._logger.info("Sending job %s %s to agent %s", client_addr, job_msg.job_id, agent_addr)
await ZMQUtils.send_with_addr(self._agent_socket, agent_addr, BackendNewJob(job_id, job_msg.course_id, job_msg.task_id,
job_msg.inputdata, job_msg.environment,
job_msg.enable_network, job_msg.time_limit,
job_msg.hard_time_limit, job_msg.mem_limit,
job_msg.debug))
# Do not forget to add again for which we did not find jobs to do
self._available_agents += not_found_for_agent
|
Handle an AgentAvailable message. Add agent_addr to the list of available agents
|
async def handle_agent_hello(self, agent_addr, message: AgentHello):
"""
Handle an AgentAvailable message. Add agent_addr to the list of available agents
"""
self._logger.info("Agent %s (%s) said hello", agent_addr, message.friendly_name)
if agent_addr in self._registered_agents:
# Delete previous instance of this agent, if any
await self._delete_agent(agent_addr)
self._registered_agents[agent_addr] = message.friendly_name
self._available_agents.extend([agent_addr for _ in range(0, message.available_job_slots)])
self._containers_on_agent[agent_addr] = message.available_containers.keys()
self._ping_count[agent_addr] = 0
# update information about available containers
for container_name, container_info in message.available_containers.items():
if container_name in self._containers:
# check if the id is the same
if self._containers[container_name][0] == container_info["id"]:
# ok, just add the agent to the list of agents that have the container
self._logger.debug("Registering container %s for agent %s", container_name, str(agent_addr))
self._containers[container_name][2].append(agent_addr)
elif self._containers[container_name][1] > container_info["created"]:
# containers stored have been created after the new one
# add the agent, but emit a warning
self._logger.warning("Container %s has multiple version: \n"
"\t Currently registered agents have version %s (%i)\n"
"\t New agent %s has version %s (%i)",
container_name,
self._containers[container_name][0], self._containers[container_name][1],
str(agent_addr), container_info["id"], container_info["created"])
self._containers[container_name][2].append(agent_addr)
else: # self._containers[container_name][1] < container_info["created"]:
# containers stored have been created before the new one
# add the agent, update the infos, and emit a warning
self._logger.warning("Container %s has multiple version: \n"
"\t Currently registered agents have version %s (%i)\n"
"\t New agent %s has version %s (%i)",
container_name,
self._containers[container_name][0], self._containers[container_name][1],
str(agent_addr), container_info["id"], container_info["created"])
self._containers[container_name] = (container_info["id"], container_info["created"],
self._containers[container_name][2] + [agent_addr])
else:
# just add it
self._logger.debug("Registering container %s for agent %s", container_name, str(agent_addr))
self._containers[container_name] = (container_info["id"], container_info["created"], [agent_addr])
# update the queue
await self.update_queue()
# update clients
await self.send_container_update_to_client(self._registered_clients)
|
Handle an AgentJobStarted message. Send the data back to the client
|
async def handle_agent_job_started(self, agent_addr, message: AgentJobStarted):
"""Handle an AgentJobStarted message. Send the data back to the client"""
self._logger.debug("Job %s %s started on agent %s", message.job_id[0], message.job_id[1], agent_addr)
await ZMQUtils.send_with_addr(self._client_socket, message.job_id[0], BackendJobStarted(message.job_id[1]))
|
Handle an AgentJobDone message. Send the data back to the client, and start new job if needed
|
async def handle_agent_job_done(self, agent_addr, message: AgentJobDone):
"""Handle an AgentJobDone message. Send the data back to the client, and start new job if needed"""
if agent_addr in self._registered_agents:
self._logger.info("Job %s %s finished on agent %s", message.job_id[0], message.job_id[1], agent_addr)
# Remove the job from the list of running jobs
del self._job_running[message.job_id]
# Sent the data back to the client
await ZMQUtils.send_with_addr(self._client_socket, message.job_id[0], BackendJobDone(message.job_id[1], message.result,
message.grade, message.problems,
message.tests, message.custom,
message.state, message.archive,
message.stdout, message.stderr))
# The agent is available now
self._available_agents.append(agent_addr)
else:
self._logger.warning("Job result %s %s from non-registered agent %s", message.job_id[0], message.job_id[1], agent_addr)
# update the queue
await self.update_queue()
|
Handle an AgentJobSSHDebug message. Send the data back to the client
|
async def handle_agent_job_ssh_debug(self, _, message: AgentJobSSHDebug):
"""Handle an AgentJobSSHDebug message. Send the data back to the client"""
await ZMQUtils.send_with_addr(self._client_socket, message.job_id[0], BackendJobSSHDebug(message.job_id[1], message.host, message.port,
message.password))
|
Ping the agents
|
async def _do_ping(self):
""" Ping the agents """
# the list() call here is needed, as we remove entries from _registered_agents!
for agent_addr, friendly_name in list(self._registered_agents.items()):
try:
ping_count = self._ping_count.get(agent_addr, 0)
if ping_count > 5:
self._logger.warning("Agent %s (%s) does not respond: removing from list.", agent_addr, friendly_name)
delete_agent = True
else:
self._ping_count[agent_addr] = ping_count + 1
await ZMQUtils.send_with_addr(self._agent_socket, agent_addr, Ping())
delete_agent = False
except:
# This should not happen, but it's better to check anyway.
self._logger.exception("Failed to send ping to agent %s (%s). Removing it from list.", agent_addr, friendly_name)
delete_agent = True
if delete_agent:
try:
await self._delete_agent(agent_addr)
except:
self._logger.exception("Failed to delete agent %s (%s)!", agent_addr, friendly_name)
self._loop.call_later(1, self._create_safe_task, self._do_ping())
|
Deletes an agent
|
async def _delete_agent(self, agent_addr):
""" Deletes an agent """
self._available_agents = [agent for agent in self._available_agents if agent != agent_addr]
del self._registered_agents[agent_addr]
await self._recover_jobs(agent_addr)
|
Recover the jobs sent to a crashed agent
|
async def _recover_jobs(self, agent_addr):
""" Recover the jobs sent to a crashed agent """
for (client_addr, job_id), (agent, job_msg, _) in reversed(list(self._job_running.items())):
if agent == agent_addr:
await ZMQUtils.send_with_addr(self._client_socket, client_addr,
BackendJobDone(job_id, ("crash", "Agent restarted"),
0.0, {}, {}, {}, "", None, None, None))
del self._job_running[(client_addr, job_id)]
await self.update_queue()
|
Calls self._loop.create_task with a safe (== with logged exception) coroutine
|
def _create_safe_task(self, coroutine):
""" Calls self._loop.create_task with a safe (== with logged exception) coroutine """
task = self._loop.create_task(coroutine)
task.add_done_callback(self.__log_safe_task)
return task
|
Parse a valid date
|
def parse_date(date, default=None):
""" Parse a valid date """
if date == "":
if default is not None:
return default
else:
raise Exception("Unknown format for " + date)
for format_type in ["%Y-%m-%d %H:%M:%S", "%Y-%m-%d %H:%M", "%Y-%m-%d %H", "%Y-%m-%d", "%d/%m/%Y %H:%M:%S", "%d/%m/%Y %H:%M", "%d/%m/%Y %H",
"%d/%m/%Y"]:
try:
return datetime.strptime(date, format_type)
except ValueError:
pass
raise Exception("Unknown format for " + date)
|
Returns True if the task/course is not yet accessible
|
def before_start(self, when=None):
""" Returns True if the task/course is not yet accessible """
if when is None:
when = datetime.now()
return self._val[0] > when
|
Returns True if the course/task is still open
|
def is_open(self, when=None):
""" Returns True if the course/task is still open """
if when is None:
when = datetime.now()
return self._val[0] <= when and when <= self._val[1]
|
Returns True if the course/task is still open with the soft deadline
|
def is_open_with_soft_deadline(self, when=None):
""" Returns True if the course/task is still open with the soft deadline """
if when is None:
when = datetime.now()
return self._val[0] <= when and when <= self._soft_end
|
Returns true if the course/task is always accessible
|
def is_always_accessible(self):
""" Returns true if the course/task is always accessible """
return self._val[0] == datetime.min and self._val[1] == datetime.max
|
Returns true if the course/task is never accessible
|
def is_never_accessible(self):
""" Returns true if the course/task is never accessible """
return self._val[0] == datetime.max and self._val[1] == datetime.max
|
If the date is custom, return the start datetime with the format %Y-%m-%d %H:%M:%S. Else, returns "".
|
def get_std_start_date(self):
""" If the date is custom, return the start datetime with the format %Y-%m-%d %H:%M:%S. Else, returns "". """
first, _ = self._val
if first != datetime.min and first != datetime.max:
return first.strftime("%Y-%m-%d %H:%M:%S")
else:
return ""
|
If the date is custom, return the end datetime with the format %Y-%m-%d %H:%M:%S. Else, returns "".
|
def get_std_end_date(self):
""" If the date is custom, return the end datetime with the format %Y-%m-%d %H:%M:%S. Else, returns "". """
_, second = self._val
if second != datetime.max:
return second.strftime("%Y-%m-%d %H:%M:%S")
else:
return ""
|
Runs a new job.
It works exactly like the Client class, instead that there is no callback and directly returns result, in the form of a tuple
(result, grade, problems, tests, custom, archive).
|
def new_job(self, task, inputdata, launcher_name="Unknown", debug=False):
"""
Runs a new job.
It works exactly like the Client class, instead that there is no callback and directly returns result, in the form of a tuple
(result, grade, problems, tests, custom, archive).
"""
job_semaphore = threading.Semaphore(0)
def manage_output(result, grade, problems, tests, custom, state, archive, stdout, stderr):
""" Manages the output of this job """
manage_output.job_return = (result, grade, problems, tests, custom, state, archive, stdout, stderr)
job_semaphore.release()
manage_output.job_return = None
self._client.new_job(task, inputdata, manage_output, launcher_name, debug)
job_semaphore.acquire()
job_return = manage_output.job_return
return job_return
|
GET request
|
def GET_AUTH(self):
""" GET request """
return self.template_helper.get_renderer().queue(*self.submission_manager.get_job_queue_snapshot(), datetime.fromtimestamp)
|
Handles GET request
|
def GET(self):
""" Handles GET request """
if self.user_manager.session_logged_in() or not self.app.allow_registration:
raise web.notfound()
error = False
reset = None
msg = ""
data = web.input()
if "activate" in data:
msg, error = self.activate_user(data)
elif "reset" in data:
msg, error, reset = self.get_reset_data(data)
return self.template_helper.get_renderer().register(reset, msg, error)
|
Returns the user info to reset
|
def get_reset_data(self, data):
""" Returns the user info to reset """
error = False
reset = None
msg = ""
user = self.database.users.find_one({"reset": data["reset"]})
if user is None:
error = True
msg = "Invalid reset hash."
else:
reset = {"hash": data["reset"], "username": user["username"], "realname": user["realname"]}
return msg, error, reset
|
Activates user
|
def activate_user(self, data):
""" Activates user """
error = False
user = self.database.users.find_one_and_update({"activate": data["activate"]}, {"$unset": {"activate": True}})
if user is None:
error = True
msg = _("Invalid activation hash.")
else:
msg = _("You are now activated. You can proceed to login.")
return msg, error
|
Parses input and register user
|
def register_user(self, data):
""" Parses input and register user """
error = False
msg = ""
email_re = re.compile(
r"(^[-!#$%&'*+/=?^_`{}|~0-9A-Z]+(\.[-!#$%&'*+/=?^_`{}|~0-9A-Z]+)*" # dot-atom
r'|^"([\001-\010\013\014\016-\037!#-\[\]-\177]|\\[\001-011\013\014\016-\177])*"' # quoted-string
r')@(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+[A-Z]{2,6}\.?$', re.IGNORECASE) # domain
# Check input format
if re.match(r"^[-_|~0-9A-Z]{4,}$", data["username"], re.IGNORECASE) is None:
error = True
msg = _("Invalid username format.")
elif email_re.match(data["email"]) is None:
error = True
msg = _("Invalid email format.")
elif len(data["passwd"]) < 6:
error = True
msg = _("Password too short.")
elif data["passwd"] != data["passwd2"]:
error = True
msg = _("Passwords don't match !")
if not error:
existing_user = self.database.users.find_one({"$or": [{"username": data["username"]}, {"email": data["email"]}]})
if existing_user is not None:
error = True
if existing_user["username"] == data["username"]:
msg = _("This username is already taken !")
else:
msg = _("This email address is already in use !")
else:
passwd_hash = hashlib.sha512(data["passwd"].encode("utf-8")).hexdigest()
activate_hash = hashlib.sha512(str(random.getrandbits(256)).encode("utf-8")).hexdigest()
self.database.users.insert({"username": data["username"],
"realname": data["realname"],
"email": data["email"],
"password": passwd_hash,
"activate": activate_hash,
"bindings": {},
"language": self.user_manager._session.get("language", "en")})
try:
web.sendmail(web.config.smtp_sendername, data["email"], _("Welcome on INGInious"),
_("""Welcome on INGInious !
To activate your account, please click on the following link :
""")
+ web.ctx.home + "/register?activate=" + activate_hash)
msg = _("You are succesfully registered. An email has been sent to you for activation.")
except:
error = True
msg = _("Something went wrong while sending you activation email. Please contact the administrator.")
return msg, error
|
Send a reset link to user to recover its password
|
def lost_passwd(self, data):
""" Send a reset link to user to recover its password """
error = False
msg = ""
# Check input format
email_re = re.compile(
r"(^[-!#$%&'*+/=?^_`{}|~0-9A-Z]+(\.[-!#$%&'*+/=?^_`{}|~0-9A-Z]+)*" # dot-atom
r'|^"([\001-\010\013\014\016-\037!#-\[\]-\177]|\\[\001-011\013\014\016-\177])*"' # quoted-string
r')@(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+[A-Z]{2,6}\.?$', re.IGNORECASE) # domain
if email_re.match(data["recovery_email"]) is None:
error = True
msg = _("Invalid email format.")
if not error:
reset_hash = hashlib.sha512(str(random.getrandbits(256)).encode("utf-8")).hexdigest()
user = self.database.users.find_one_and_update({"email": data["recovery_email"]}, {"$set": {"reset": reset_hash}})
if user is None:
error = True
msg = _("This email address was not found in database.")
else:
try:
web.sendmail(web.config.smtp_sendername, data["recovery_email"], _("INGInious password recovery"),
_("""Dear {realname},
Someone (probably you) asked to reset your INGInious password. If this was you, please click on the following link :
""").format(realname=user["realname"]) + web.ctx.home + "/register?reset=" + reset_hash)
msg = _("An email has been sent to you to reset your password.")
except:
error = True
msg = _("Something went wrong while sending you reset email. Please contact the administrator.")
return msg, error
|
Reset the user password
|
def reset_passwd(self, data):
""" Reset the user password """
error = False
msg = ""
# Check input format
if len(data["passwd"]) < 6:
error = True
msg = _("Password too short.")
elif data["passwd"] != data["passwd2"]:
error = True
msg = _("Passwords don't match !")
if not error:
passwd_hash = hashlib.sha512(data["passwd"].encode("utf-8")).hexdigest()
user = self.database.users.find_one_and_update({"reset": data["reset_hash"]},
{"$set": {"password": passwd_hash},
"$unset": {"reset": True, "activate": True}})
if user is None:
error = True
msg = _("Invalid reset hash.")
else:
msg = _("Your password has been successfully changed.")
return msg, error
|
Handles POST request
|
def POST(self):
""" Handles POST request """
if self.user_manager.session_logged_in() or not self.app.allow_registration:
raise web.notfound()
reset = None
msg = ""
error = False
data = web.input()
if "register" in data:
msg, error = self.register_user(data)
elif "lostpasswd" in data:
msg, error = self.lost_passwd(data)
elif "resetpasswd" in data:
msg, error, reset = self.get_reset_data(data)
if reset:
msg, error = self.reset_passwd(data)
if not error:
reset = None
return self.template_helper.get_renderer().register(reset, msg, error)
|
:param course: a Course object
:param taskid: the task id of the task
:raise InvalidNameException, TaskNotFoundException, TaskUnreadableException
:return: an object representing the task, of the type given in the constructor
|
def get_task(self, course, taskid):
"""
:param course: a Course object
:param taskid: the task id of the task
:raise InvalidNameException, TaskNotFoundException, TaskUnreadableException
:return: an object representing the task, of the type given in the constructor
"""
if not id_checker(taskid):
raise InvalidNameException("Task with invalid name: " + taskid)
if self._cache_update_needed(course, taskid):
self._update_cache(course, taskid)
return self._cache[(course.get_id(), taskid)][0]
|
:param courseid: the course id of the course
:param taskid: the task id of the task
:raise InvalidNameException, TaskNotFoundException, TaskUnreadableException
:return: the content of the task descriptor, as a dict
|
def get_task_descriptor_content(self, courseid, taskid):
"""
:param courseid: the course id of the course
:param taskid: the task id of the task
:raise InvalidNameException, TaskNotFoundException, TaskUnreadableException
:return: the content of the task descriptor, as a dict
"""
if not id_checker(courseid):
raise InvalidNameException("Course with invalid name: " + courseid)
if not id_checker(taskid):
raise InvalidNameException("Task with invalid name: " + taskid)
descriptor_path, descriptor_manager = self._get_task_descriptor_info(courseid, taskid)
try:
task_content = descriptor_manager.load(self.get_task_fs(courseid, taskid).get(descriptor_path))
except Exception as e:
raise TaskUnreadableException(str(e))
return task_content
|
:param courseid: the course id of the course
:param taskid: the task id of the task
:raise InvalidNameException, TaskNotFoundException
:return: the current extension of the task descriptor
|
def get_task_descriptor_extension(self, courseid, taskid):
"""
:param courseid: the course id of the course
:param taskid: the task id of the task
:raise InvalidNameException, TaskNotFoundException
:return: the current extension of the task descriptor
"""
if not id_checker(courseid):
raise InvalidNameException("Course with invalid name: " + courseid)
if not id_checker(taskid):
raise InvalidNameException("Task with invalid name: " + taskid)
descriptor_path = self._get_task_descriptor_info(courseid, taskid)[0]
return splitext(descriptor_path)[1]
|
:param courseid: the course id of the course
:param taskid: the task id of the task
:raise InvalidNameException
:return: A FileSystemProvider to the folder containing the task files
|
def get_task_fs(self, courseid, taskid):
"""
:param courseid: the course id of the course
:param taskid: the task id of the task
:raise InvalidNameException
:return: A FileSystemProvider to the folder containing the task files
"""
if not id_checker(courseid):
raise InvalidNameException("Course with invalid name: " + courseid)
if not id_checker(taskid):
raise InvalidNameException("Task with invalid name: " + taskid)
return self._filesystem.from_subfolder(courseid).from_subfolder(taskid)
|
Update the task descriptor with the dict in content
:param courseid: the course id of the course
:param taskid: the task id of the task
:param content: the content to put in the task file
:param force_extension: If None, save it the same format. Else, save with the given extension
:raise InvalidNameException, TaskNotFoundException, TaskUnreadableException
|
def update_task_descriptor_content(self, courseid, taskid, content, force_extension=None):
"""
Update the task descriptor with the dict in content
:param courseid: the course id of the course
:param taskid: the task id of the task
:param content: the content to put in the task file
:param force_extension: If None, save it the same format. Else, save with the given extension
:raise InvalidNameException, TaskNotFoundException, TaskUnreadableException
"""
if not id_checker(courseid):
raise InvalidNameException("Course with invalid name: " + courseid)
if not id_checker(taskid):
raise InvalidNameException("Task with invalid name: " + taskid)
if force_extension is None:
path_to_descriptor, descriptor_manager = self._get_task_descriptor_info(courseid, taskid)
elif force_extension in self.get_available_task_file_extensions():
path_to_descriptor = "task." + force_extension
descriptor_manager = self._task_file_managers[force_extension]
else:
raise TaskReaderNotFoundException()
try:
self.get_task_fs(courseid, taskid).put(path_to_descriptor, descriptor_manager.dump(content))
except:
raise TaskNotFoundException()
|
Returns the list of all available tasks in a course
|
def get_readable_tasks(self, course):
""" Returns the list of all available tasks in a course """
course_fs = self._filesystem.from_subfolder(course.get_id())
tasks = [
task[0:len(task)-1] # remove trailing /
for task in course_fs.list(folders=True, files=False, recursive=False)
if self._task_file_exists(course_fs.from_subfolder(task))]
return tasks
|
Returns true if a task file exists in this directory
|
def _task_file_exists(self, task_fs):
""" Returns true if a task file exists in this directory """
for filename in ["task.{}".format(ext) for ext in self.get_available_task_file_extensions()]:
if task_fs.exists(filename):
return True
return False
|
Deletes all possibles task files in directory, to allow to change the format
|
def delete_all_possible_task_files(self, courseid, taskid):
""" Deletes all possibles task files in directory, to allow to change the format """
if not id_checker(courseid):
raise InvalidNameException("Course with invalid name: " + courseid)
if not id_checker(taskid):
raise InvalidNameException("Task with invalid name: " + taskid)
task_fs = self.get_task_fs(courseid, taskid)
for ext in self.get_available_task_file_extensions():
try:
task_fs.delete("task."+ext)
except:
pass
|
:return: a table containing taskid=>Task pairs
|
def get_all_tasks(self, course):
"""
:return: a table containing taskid=>Task pairs
"""
tasks = self.get_readable_tasks(course)
output = {}
for task in tasks:
try:
output[task] = self.get_task(course, task)
except:
pass
return output
|
:param courseid: the course id of the course
:param taskid: the task id of the task
:raise InvalidNameException, TaskNotFoundException
:return: a tuple, containing:
(descriptor filename,
task file manager for the descriptor)
|
def _get_task_descriptor_info(self, courseid, taskid):
"""
:param courseid: the course id of the course
:param taskid: the task id of the task
:raise InvalidNameException, TaskNotFoundException
:return: a tuple, containing:
(descriptor filename,
task file manager for the descriptor)
"""
if not id_checker(courseid):
raise InvalidNameException("Course with invalid name: " + courseid)
if not id_checker(taskid):
raise InvalidNameException("Task with invalid name: " + taskid)
task_fs = self.get_task_fs(courseid, taskid)
for ext, task_file_manager in self._task_file_managers.items():
if task_fs.exists("task."+ext):
return "task." + ext, task_file_manager
raise TaskNotFoundException()
|
:param course: a Course object
:param taskid: a (valid) task id
:raise InvalidNameException, TaskNotFoundException
:return: True if an update of the cache is needed, False else
|
def _cache_update_needed(self, course, taskid):
"""
:param course: a Course object
:param taskid: a (valid) task id
:raise InvalidNameException, TaskNotFoundException
:return: True if an update of the cache is needed, False else
"""
if not id_checker(taskid):
raise InvalidNameException("Task with invalid name: " + taskid)
task_fs = self.get_task_fs(course.get_id(), taskid)
if (course.get_id(), taskid) not in self._cache:
return True
try:
last_update, __, __ = self._get_last_updates(course, taskid, task_fs, False)
except:
raise TaskNotFoundException()
last_modif = self._cache[(course.get_id(), taskid)][1]
for filename, mftime in last_update.items():
if filename not in last_modif or last_modif[filename] < mftime:
return True
return False
|
Updates the cache
:param course: a Course object
:param taskid: a (valid) task id
:raise InvalidNameException, TaskNotFoundException, TaskUnreadableException
|
def _update_cache(self, course, taskid):
"""
Updates the cache
:param course: a Course object
:param taskid: a (valid) task id
:raise InvalidNameException, TaskNotFoundException, TaskUnreadableException
"""
if not id_checker(taskid):
raise InvalidNameException("Task with invalid name: " + taskid)
task_fs = self.get_task_fs(course.get_id(), taskid)
last_modif, translation_fs, task_content = self._get_last_updates(course, taskid, task_fs, True)
self._cache[(course.get_id(), taskid)] = (
self._task_class(course, taskid, task_content, task_fs, translation_fs, self._hook_manager, self._task_problem_types),
last_modif
)
|
Clean/update the cache of all the tasks for a given course (id)
:param courseid:
|
def update_cache_for_course(self, courseid):
"""
Clean/update the cache of all the tasks for a given course (id)
:param courseid:
"""
to_drop = []
for (cid, tid) in self._cache:
if cid == courseid:
to_drop.append(tid)
for tid in to_drop:
del self._cache[(courseid, tid)]
|
:param courseid: the course id of the course
:param taskid: the task id of the task
:raise InvalidNameException or CourseNotFoundException
Erase the content of the task folder
|
def delete_task(self, courseid, taskid):
"""
:param courseid: the course id of the course
:param taskid: the task id of the task
:raise InvalidNameException or CourseNotFoundException
Erase the content of the task folder
"""
if not id_checker(courseid):
raise InvalidNameException("Course with invalid name: " + courseid)
if not id_checker(taskid):
raise InvalidNameException("Task with invalid name: " + taskid)
task_fs = self.get_task_fs(courseid, taskid)
if task_fs.exists():
task_fs.delete()
get_course_logger(courseid).info("Task %s erased from the factory.", taskid)
|
Prepare SAML request
|
def prepare_request(settings):
""" Prepare SAML request """
# Set the ACS url and binding method
settings["sp"]["assertionConsumerService"] = {
"url": web.ctx.homedomain + web.ctx.homepath + "/auth/callback/" + settings["id"],
"binding": "urn:oasis:names:tc:SAML:2.0:bindings:HTTP-POST"
}
# If server is behind proxys or balancers use the HTTP_X_FORWARDED fields
data = web.input()
return {
'https': 'on' if web.ctx.protocol == 'https' else 'off',
'http_host': web.ctx.environ["SERVER_NAME"],
'server_port': web.ctx.environ["SERVER_PORT"],
'script_name': web.ctx.homepath,
'get_data': data.copy(),
'post_data': data.copy(),
# Uncomment if using ADFS as IdP, https://github.com/onelogin/python-saml/pull/144
# 'lowercase_urlencoding': True,
'query_string': web.ctx.query
}
|
:return: a dict of available containers in the form
{
"name": { #for example, "default"
"id": "container img id", # "sha256:715c5cb5575cdb2641956e42af4a53e69edf763ce701006b2c6e0f4f39b68dd3"
"created": 12345678 # create date
"ports": [22, 434] # list of ports needed
}
}
|
def get_containers(self):
"""
:return: a dict of available containers in the form
{
"name": { #for example, "default"
"id": "container img id", # "sha256:715c5cb5575cdb2641956e42af4a53e69edf763ce701006b2c6e0f4f39b68dd3"
"created": 12345678 # create date
"ports": [22, 434] # list of ports needed
}
}
"""
# First, create a dict with {"id": {"title": "alias", "created": 000, "ports": [0, 1]}}
images = {}
for x in self._docker.images.list(filters={"label": "org.inginious.grading.name"}):
try:
title = x.labels["org.inginious.grading.name"]
created = datetime.strptime(x.attrs['Created'][:-4], "%Y-%m-%dT%H:%M:%S.%f").timestamp()
ports = [int(y) for y in x.labels["org.inginious.grading.ports"].split(
",")] if "org.inginious.grading.ports" in x.labels else []
images[x.attrs['Id']] = {"title": title, "created": created, "ports": ports}
except:
logging.getLogger("inginious.agent").exception("Container %s is badly formatted", title)
# Then, we keep only the last version of each name
latest = {}
for img_id, img_c in images.items():
if img_c["title"] not in latest or latest[img_c["title"]]["created"] < img_c["created"]:
latest[img_c["title"]] = {"id": img_id, "created": img_c["created"], "ports": img_c["ports"]}
return latest
|
Get the external IP of the host of the docker daemon. Uses OpenDNS internally.
:param env_with_dig: any container image that has dig
|
def get_host_ip(self, env_with_dig='ingi/inginious-c-default'):
"""
Get the external IP of the host of the docker daemon. Uses OpenDNS internally.
:param env_with_dig: any container image that has dig
"""
try:
container = self._docker.containers.create(env_with_dig, command="dig +short myip.opendns.com @resolver1.opendns.com")
container.start()
response = container.wait()
assert response["StatusCode"] == 0 if isinstance(response, dict) else response == 0
answer = container.logs(stdout=True, stderr=False).decode('utf8').strip()
container.remove(v=True, link=False, force=True)
return answer
except:
return None
|
Creates a container.
:param environment: env to start (name/id of a docker image)
:param network_grading: boolean to indicate if the network should be enabled in the container or not
:param mem_limit: in Mo
:param task_path: path to the task directory that will be mounted in the container
:param sockets_path: path to the socket directory that will be mounted in the container
:param ports: dictionary in the form {docker_port: external_port}
:return: the container id
|
def create_container(self, environment, network_grading, mem_limit, task_path, sockets_path,
course_common_path, course_common_student_path, ports=None):
"""
Creates a container.
:param environment: env to start (name/id of a docker image)
:param network_grading: boolean to indicate if the network should be enabled in the container or not
:param mem_limit: in Mo
:param task_path: path to the task directory that will be mounted in the container
:param sockets_path: path to the socket directory that will be mounted in the container
:param ports: dictionary in the form {docker_port: external_port}
:return: the container id
"""
task_path = os.path.abspath(task_path)
sockets_path = os.path.abspath(sockets_path)
course_common_path = os.path.abspath(course_common_path)
course_common_student_path = os.path.abspath(course_common_student_path)
if ports is None:
ports = {}
response = self._docker.containers.create(
environment,
stdin_open=True,
mem_limit=str(mem_limit) + "M",
memswap_limit=str(mem_limit) + "M",
mem_swappiness=0,
oom_kill_disable=True,
network_mode=("bridge" if (network_grading or len(ports) > 0) else 'none'),
ports=ports,
volumes={
task_path: {'bind': '/task'},
sockets_path: {'bind': '/sockets'},
course_common_path: {'bind': '/course/common', 'mode': 'ro'},
course_common_student_path: {'bind': '/course/common/student', 'mode': 'ro'}
}
)
return response.id
|
Creates a student container
:param parent_container_id: id of the "parent" container
:param environment: env to start (name/id of a docker image)
:param network_grading: boolean to indicate if the network should be enabled in the container or not (share the parent stack)
:param mem_limit: in Mo
:param student_path: path to the task directory that will be mounted in the container
:param socket_path: path to the socket that will be mounted in the container
:param systemfiles_path: path to the systemfiles folder containing files that can override partially some defined system files
:return: the container id
|
def create_container_student(self, parent_container_id, environment, network_grading, mem_limit, student_path,
socket_path, systemfiles_path, course_common_student_path):
"""
Creates a student container
:param parent_container_id: id of the "parent" container
:param environment: env to start (name/id of a docker image)
:param network_grading: boolean to indicate if the network should be enabled in the container or not (share the parent stack)
:param mem_limit: in Mo
:param student_path: path to the task directory that will be mounted in the container
:param socket_path: path to the socket that will be mounted in the container
:param systemfiles_path: path to the systemfiles folder containing files that can override partially some defined system files
:return: the container id
"""
student_path = os.path.abspath(student_path)
socket_path = os.path.abspath(socket_path)
systemfiles_path = os.path.abspath(systemfiles_path)
course_common_student_path = os.path.abspath(course_common_student_path)
response = self._docker.containers.create(
environment,
stdin_open=True,
command="_run_student_intern",
mem_limit=str(mem_limit) + "M",
memswap_limit=str(mem_limit) + "M",
mem_swappiness=0,
oom_kill_disable=True,
network_mode=('none' if not network_grading else ('container:' + parent_container_id)),
volumes={
student_path: {'bind': '/task/student'},
socket_path: {'bind': '/__parent.sock'},
systemfiles_path: {'bind': '/task/systemfiles', 'mode': 'ro'},
course_common_student_path: {'bind': '/course/common/student', 'mode': 'ro'}
}
)
return response.id
|
A socket attached to the stdin/stdout of a container. The object returned contains a get_socket() function to get a socket.socket
object and close_socket() to close the connection
|
def attach_to_container(self, container_id):
""" A socket attached to the stdin/stdout of a container. The object returned contains a get_socket() function to get a socket.socket
object and close_socket() to close the connection """
sock = self._docker.containers.get(container_id).attach_socket(params={
'stdin': 1,
'stdout': 1,
'stderr': 0,
'stream': 1,
})
# fix a problem with docker-py; we must keep a reference of sock at every time
return FixDockerSocket(sock)
|
Return the full stdout/stderr of a container
|
def get_logs(self, container_id):
""" Return the full stdout/stderr of a container"""
stdout = self._docker.containers.get(container_id).logs(stdout=True, stderr=False).decode('utf8')
stderr = self._docker.containers.get(container_id).logs(stdout=False, stderr=True).decode('utf8')
return stdout, stderr
|
:param container_id:
:return: an iterable that contains dictionnaries with the stats of the running container. See the docker api for content.
|
def get_stats(self, container_id):
"""
:param container_id:
:return: an iterable that contains dictionnaries with the stats of the running container. See the docker api for content.
"""
return self._docker.containers.get(container_id).stats(decode=True)
|
Removes a container (with fire)
|
def remove_container(self, container_id):
"""
Removes a container (with fire)
"""
self._docker.containers.get(container_id).remove(v=True, link=False, force=True)
|
Kills a container
:param signal: custom signal. Default is SIGKILL.
|
def kill_container(self, container_id, signal=None):
"""
Kills a container
:param signal: custom signal. Default is SIGKILL.
"""
self._docker.containers.get(container_id).kill(signal)
|
:param filters: filters to apply on messages. See docker api.
:return: an iterable that contains events from docker. See the docker api for content.
|
def event_stream(self, filters=None):
"""
:param filters: filters to apply on messages. See docker api.
:return: an iterable that contains events from docker. See the docker api for content.
"""
if filters is None:
filters = {}
return self._docker.events(decode=True, filters=filters)
|
Correctly closes the socket
:return:
|
def close_socket(self):
"""
Correctly closes the socket
:return:
"""
try:
self.docker_py_sock._sock.close() # pylint: disable=protected-access
except AttributeError:
pass
self.docker_py_sock.close()
|
Checks that a given path is valid. If it's not, raises NotFoundException
|
def _checkpath(self, path):
""" Checks that a given path is valid. If it's not, raises NotFoundException """
if path.startswith("/") or ".." in path or path.strip() != path:
raise NotFoundException()
|
GET request
|
def POST_AUTH(self, courseid): # pylint: disable=arguments-differ
""" GET request """
course, __ = self.get_course_and_check_rights(courseid, allow_all_staff=False)
user_input = web.input(tasks=[], aggregations=[], users=[])
if "submission" in user_input:
# Replay a unique submission
submission = self.database.submissions.find_one({"_id": ObjectId(user_input.submission)})
if submission is None:
raise web.notfound()
web.header('Content-Type', 'application/json')
self.submission_manager.replay_job(course.get_task(submission["taskid"]), submission)
return json.dumps({"status": "waiting"})
else:
# Replay several submissions, check input
tasks = course.get_tasks()
error = False
msg = _("Selected submissions were set for replay.")
for i in user_input.tasks:
if i not in tasks.keys():
msg = _("Task with id {} does not exist !").format(i)
error = True
if not error:
# Load submissions
submissions, __ = self.get_selected_submissions(course, user_input.filter_type, user_input.tasks, user_input.users, user_input.aggregations, user_input.type)
for submission in submissions:
self.submission_manager.replay_job(tasks[submission["taskid"]], submission)
return self.show_page(course, web.input(), msg, error)
|
GET request
|
def GET_AUTH(self, courseid): # pylint: disable=arguments-differ
""" GET request """
course, __ = self.get_course_and_check_rights(courseid, allow_all_staff=False)
return self.show_page(course, web.input())
|
Save user profile modifications
|
def save_profile(self, userdata, data):
""" Save user profile modifications """
result = userdata
error = False
# Check if updating username.
if not userdata["username"] and "username" in data:
if re.match(r"^[-_|~0-9A-Z]{4,}$", data["username"], re.IGNORECASE) is None:
error = True
msg = _("Invalid username format.")
elif self.database.users.find_one({"username": data["username"]}):
error = True
msg = _("Username already taken")
else:
result = self.database.users.find_one_and_update({"email": userdata["email"]},
{"$set": {"username": data["username"]}},
return_document=ReturnDocument.AFTER)
if not result:
error = True
msg = _("Incorrect email.")
else:
self.user_manager.connect_user(result["username"], result["realname"], result["email"],
result["language"])
msg = _("Profile updated.")
return result, msg, error
# Check if updating the password.
if self.app.allow_registration and len(data["passwd"]) in range(1, 6):
error = True
msg = _("Password too short.")
return result, msg, error
elif self.app.allow_registration and len(data["passwd"]) > 0 and data["passwd"] != data["passwd2"]:
error = True
msg = _("Passwords don't match !")
return result, msg, error
elif self.app.allow_registration and len(data["passwd"]) >= 6:
oldpasswd_hash = hashlib.sha512(data["oldpasswd"].encode("utf-8")).hexdigest()
passwd_hash = hashlib.sha512(data["passwd"].encode("utf-8")).hexdigest()
match = {"username": self.user_manager.session_username()}
if "password" in userdata:
match["password"] = oldpasswd_hash
result = self.database.users.find_one_and_update(match,
{"$set": {"password": passwd_hash}},
return_document=ReturnDocument.AFTER)
if not result:
error = True
msg = _("Incorrect old password.")
return result, msg, error
# Check if updating language
if data["language"] != userdata["language"]:
language = data["language"] if data["language"] in self.app.available_languages else "en"
result = self.database.users.find_one_and_update({"username": self.user_manager.session_username()},
{"$set": {"language": language}},
return_document=ReturnDocument.AFTER)
if not result:
error = True
msg = _("Incorrect username.")
return result, msg, error
else:
self.user_manager.set_session_language(language)
# Checks if updating name
if len(data["realname"]) > 0:
result = self.database.users.find_one_and_update({"username": self.user_manager.session_username()},
{"$set": {"realname": data["realname"]}},
return_document=ReturnDocument.AFTER)
if not result:
error = True
msg = _("Incorrect username.")
return result, msg, error
else:
self.user_manager.set_session_realname(data["realname"])
else:
error = True
msg = _("Name is too short.")
return result, msg, error
msg = _("Profile updated.")
return result, msg, error
|
GET request
|
def GET_AUTH(self): # pylint: disable=arguments-differ
""" GET request """
userdata = self.database.users.find_one({"email": self.user_manager.session_email()})
if not userdata:
raise web.notfound()
return self.template_helper.get_renderer().preferences.profile("", False)
|
POST request
|
def POST_AUTH(self): # pylint: disable=arguments-differ
""" POST request """
userdata = self.database.users.find_one({"email": self.user_manager.session_email()})
if not userdata:
raise web.notfound()
msg = ""
error = False
data = web.input()
if "save" in data:
userdata, msg, error = self.save_profile(userdata, data)
return self.template_helper.get_renderer().preferences.profile(msg, error)
|
Init the external grader plugin. This simple grader allows only anonymous requests, and submissions are not stored in database.
Available configuration:
::
plugins:
- plugin_module: inginious.frontend.plugins.simple_grader
courseid : "external"
page_pattern: "/external"
return_fields: "^(result|text|problems)$"
The grader will only return fields that are in the job return dict if their key match return_fields.
Different types of request are available : see documentation
|
def init(plugin_manager, course_factory, client, config):
"""
Init the external grader plugin. This simple grader allows only anonymous requests, and submissions are not stored in database.
Available configuration:
::
plugins:
- plugin_module: inginious.frontend.plugins.simple_grader
courseid : "external"
page_pattern: "/external"
return_fields: "^(result|text|problems)$"
The grader will only return fields that are in the job return dict if their key match return_fields.
Different types of request are available : see documentation
"""
courseid = config.get('courseid', 'external')
course = course_factory.get_course(courseid)
page_pattern = config.get('page_pattern', '/external')
return_fields = re.compile(config.get('return_fields', '^(result|text|problems)$'))
client_buffer = ClientBuffer(client)
client_sync = ClientSync(client)
class ExternalGrader(INGIniousPage):
""" Manages job from outside, using the default input """
def GET(self):
""" GET request """
return """
<!DOCTYPE html>
<html>
<head>
<title>External grade POST test</title>
</head>
<body>
<form method="post">
<textarea style="width:100%; height:400px;" name="input">{"question1":"print 'Hello World!'"}</textarea><br/>
<input type="text" name="taskid" value="helloworld"/> (taskid)<br/>
<input type="checkbox" name="async"/> async?<br/>
<input type="submit"/>
</form>
</body>
</html>"""
def keep_only_config_return_values(self, job_return):
""" Keep only some useful return values """
return {key: value for key, value in job_return.items() if return_fields.match(key)}
def POST(self):
""" POST request """
web.header('Access-Control-Allow-Origin', '*')
web.header('Content-Type', 'application/json')
post_input = web.input()
if "input" in post_input and "taskid" in post_input:
# New job
try:
task_input = json.loads(post_input.input)
except:
return json.dumps({"status": "error", "status_message": "Cannot decode input"})
try:
task = course.get_task(post_input.taskid)
except:
return json.dumps({"status": "error", "status_message": "Cannot open task"})
if not task.input_is_consistent(task_input, self.default_allowed_file_extensions, self.default_max_file_size):
return json.dumps({"status": "error", "status_message": "Input is not consistent with the task"})
if post_input.get("async") is None:
# New sync job
try:
result, grade, problems, tests, custom, state, archive, stdout, stderr = client_sync.new_job(task, task_input, "Plugin - Simple Grader")
job_return = {"result":result, "grade": grade, "problems": problems, "tests": tests, "custom": custom, "state": state, "archive": archive, "stdout": stdout, "stderr": stderr}
except:
return json.dumps({"status": "error", "status_message": "An internal error occurred"})
return json.dumps(dict(list({"status": "done"}.items()) + list(self.keep_only_config_return_values(job_return).items())))
else:
# New async job
jobid = client_buffer.new_job(task, task_input, "Plugin - Simple Grader")
return json.dumps({"status": "done", "jobid": str(jobid)})
elif "jobid" in post_input:
# Get status of async job
if client_buffer.is_waiting(post_input["jobid"]):
return json.dumps({"status": "waiting"})
elif client_buffer.is_done(post_input["jobid"]):
result, grade, problems, tests, custom, state, archive, stdout, stderr = client_buffer.get_result(post_input["jobid"])
job_return = {"result": result, "grade": grade, "problems": problems, "tests": tests,
"custom": custom, "archive": archive, "stdout": stdout, "stderr": stderr}
return json.dumps(dict(list({"status": "done"}.items()) + list(self.keep_only_config_return_values(job_return).items())))
else:
return json.dumps({"status": "error", "status_message": "There is no job with jobid {}".format(post_input["jobid"])})
else:
return json.dumps({"status": "error", "status_message": "Unknown request type"})
plugin_manager.add_page(page_pattern, ExternalGrader)
|
List courses available to the connected client. Returns a dict in the form
::
{
"courseid1":
{
"name": "Name of the course", #the name of the course
"require_password": False, #indicates if this course requires a password or not
"is_registered": False, #indicates if the user is registered to this course or not
"tasks": #only appears if is_registered is True
{
"taskid1": "name of task1",
"taskid2": "name of task2"
#...
},
"grade": 0.0 #the current grade in the course. Only appears if is_registered is True
}
#...
}
If you use the endpoint /api/v0/courses/the_course_id, this dict will contain one entry or the page will return 404 Not Found.
|
def API_GET(self, courseid=None): # pylint: disable=arguments-differ
"""
List courses available to the connected client. Returns a dict in the form
::
{
"courseid1":
{
"name": "Name of the course", #the name of the course
"require_password": False, #indicates if this course requires a password or not
"is_registered": False, #indicates if the user is registered to this course or not
"tasks": #only appears if is_registered is True
{
"taskid1": "name of task1",
"taskid2": "name of task2"
#...
},
"grade": 0.0 #the current grade in the course. Only appears if is_registered is True
}
#...
}
If you use the endpoint /api/v0/courses/the_course_id, this dict will contain one entry or the page will return 404 Not Found.
"""
output = []
if courseid is None:
courses = self.course_factory.get_all_courses()
else:
try:
courses = {courseid: self.course_factory.get_course(courseid)}
except:
raise APINotFound("Course not found")
username = self.user_manager.session_username()
user_info = self.database.users.find_one({"username": username})
for courseid, course in courses.items():
if self.user_manager.course_is_open_to_user(course, username, False) or course.is_registration_possible(user_info):
data = {
"id": courseid,
"name": course.get_name(self.user_manager.session_language()),
"require_password": course.is_password_needed_for_registration(),
"is_registered": self.user_manager.course_is_open_to_user(course, username, False)
}
if self.user_manager.course_is_open_to_user(course, username, False):
data["tasks"] = {taskid: task.get_name(self.user_manager.session_language()) for taskid, task in course.get_tasks().items()}
data["grade"] = self.user_manager.get_course_cache(username, course)["grade"]
output.append(data)
return 200, output
|
Convert the output to what the client asks
|
def _api_convert_output(return_value):
""" Convert the output to what the client asks """
content_type = web.ctx.environ.get('CONTENT_TYPE', 'text/json')
if "text/json" in content_type:
web.header('Content-Type', 'text/json; charset=utf-8')
return json.dumps(return_value)
if "text/html" in content_type:
web.header('Content-Type', 'text/html; charset=utf-8')
dump = yaml.dump(return_value)
return "<pre>" + web.websafe(dump) + "</pre>"
if "text/yaml" in content_type or \
"text/x-yaml" in content_type or \
"application/yaml" in content_type or \
"application/x-yaml" in content_type:
web.header('Content-Type', 'text/yaml; charset=utf-8')
dump = yaml.dump(return_value)
return dump
web.header('Content-Type', 'text/json; charset=utf-8')
return json.dumps(return_value)
|
GET request
|
def GET(self, *args, **kwargs):
""" GET request """
return self._handle_api(self.API_GET, args, kwargs)
|
PUT request
|
def PUT(self, *args, **kwargs):
""" PUT request """
return self._handle_api(self.API_PUT, args, kwargs)
|
POST request
|
def POST(self, *args, **kwargs):
""" POST request """
return self._handle_api(self.API_POST, args, kwargs)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.