blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
283
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
41
| license_type
stringclasses 2
values | repo_name
stringlengths 7
96
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 58
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 12.7k
662M
⌀ | star_events_count
int64 0
35.5k
| fork_events_count
int64 0
20.6k
| gha_license_id
stringclasses 11
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 43
values | src_encoding
stringclasses 9
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
5.88M
| extension
stringclasses 30
values | content
stringlengths 7
5.88M
| authors
sequencelengths 1
1
| author
stringlengths 0
73
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
780073cc16c8f338f3195e45934b88dd0709ef5b | f777b5e4a98c40f4bfc5c5c9e326faa09beb2d53 | /projects/DensePose/densepose/modeling/cse/utils.py | 18480db5e485dec3bd0daf3cae69263a6abdde4f | [
"Apache-2.0"
] | permissive | alekseynp/detectron2 | 04ae9a47d950ea4c737715b5f2aa7637d3742264 | 2409af0bf0d4bdcc685feb6d2c7fd659828acac4 | refs/heads/master | 2022-05-30T09:13:26.438077 | 2022-04-11T20:59:40 | 2022-04-11T20:59:40 | 254,280,315 | 0 | 1 | Apache-2.0 | 2020-04-09T05:34:15 | 2020-04-09T05:34:14 | null | UTF-8 | Python | false | false | 3,538 | py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import torch
from torch.nn import functional as F
def squared_euclidean_distance_matrix(pts1: torch.Tensor, pts2: torch.Tensor) -> torch.Tensor:
"""
Get squared Euclidean Distance Matrix
Computes pairwise squared Euclidean distances between points
Args:
pts1: Tensor [M x D], M is the number of points, D is feature dimensionality
pts2: Tensor [N x D], N is the number of points, D is feature dimensionality
Return:
Tensor [M, N]: matrix of squared Euclidean distances; at index (m, n)
it contains || pts1[m] - pts2[n] ||^2
"""
edm = torch.mm(-2 * pts1, pts2.t())
edm += (pts1 * pts1).sum(1, keepdim=True) + (pts2 * pts2).sum(1, keepdim=True).t()
return edm.contiguous()
def normalize_embeddings(embeddings: torch.Tensor, epsilon: float = 1e-6) -> torch.Tensor:
"""
Normalize N D-dimensional embedding vectors arranged in a tensor [N, D]
Args:
embeddings (tensor [N, D]): N D-dimensional embedding vectors
epsilon (float): minimum value for a vector norm
Return:
Normalized embeddings (tensor [N, D]), such that L2 vector norms are all equal to 1.
"""
return embeddings / torch.clamp(
embeddings.norm(p=None, dim=1, keepdim=True), min=epsilon # pyre-ignore[6]
)
def get_closest_vertices_mask_from_ES(
E: torch.Tensor,
S: torch.Tensor,
h: int,
w: int,
mesh_vertex_embeddings: torch.Tensor,
device: torch.device,
):
"""
Interpolate Embeddings and Segmentations to the size of a given bounding box,
and compute closest vertices and the segmentation mask
Args:
E (tensor [1, D, H, W]): D-dimensional embedding vectors for every point of the
default-sized box
S (tensor [1, 2, H, W]): 2-dimensional segmentation mask for every point of the
default-sized box
h (int): height of the target bounding box
w (int): width of the target bounding box
mesh_vertex_embeddings (tensor [N, D]): vertex embeddings for a chosen mesh
N is the number of vertices in the mesh, D is feature dimensionality
device (torch.device): device to move the tensors to
Return:
Closest Vertices (tensor [h, w]), int, for every point of the resulting box
Segmentation mask (tensor [h, w]), boolean, for every point of the resulting box
"""
# pyre-fixme[6]: Expected `Optional[int]` for 2nd param but got `Tuple[int, int]`.
embedding_resized = F.interpolate(E, size=(h, w), mode="bilinear")[0].to(device)
# pyre-fixme[6]: Expected `Optional[int]` for 2nd param but got `Tuple[int, int]`.
coarse_segm_resized = F.interpolate(S, size=(h, w), mode="bilinear")[0].to(device)
mask = coarse_segm_resized.argmax(0) > 0
closest_vertices = torch.zeros(mask.shape, dtype=torch.long, device=device)
all_embeddings = embedding_resized[:, mask].t()
size_chunk = 10_000 # Chunking to avoid possible OOM
edm = []
if len(all_embeddings) == 0:
return closest_vertices, mask
for chunk in range((len(all_embeddings) - 1) // size_chunk + 1):
chunk_embeddings = all_embeddings[size_chunk * chunk : size_chunk * (chunk + 1)]
edm.append(
torch.argmin(
squared_euclidean_distance_matrix(chunk_embeddings, mesh_vertex_embeddings), dim=1
)
)
closest_vertices[mask] = torch.cat(edm)
return closest_vertices, mask
| [
"[email protected]"
] | |
12f14216b2b4a57ff01c2b1c049c8688d0d4cbf8 | 34ed92a9593746ccbcb1a02630be1370e8524f98 | /lib/pints/pints/plot.py | e915a7b2d7e282f5c1642e4f76e9500005a923c2 | [
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] | permissive | HOLL95/Cytochrome_SV | 87b7a680ed59681230f79e1de617621680ea0fa0 | d02b3469f3ee5a4c85d756053bc87651093abea1 | refs/heads/master | 2022-08-01T05:58:16.161510 | 2021-02-01T16:09:31 | 2021-02-01T16:09:31 | 249,424,867 | 0 | 0 | null | 2022-06-22T04:09:11 | 2020-03-23T12:29:29 | Jupyter Notebook | UTF-8 | Python | false | false | 28,013 | py | #
# Quick diagnostic plots.
#
# This file is part of PINTS.
# Copyright (c) 2017-2018, University of Oxford.
# For licensing information, see the LICENSE file distributed with the PINTS
# software package.
#
from __future__ import absolute_import, division
from __future__ import print_function, unicode_literals
def function(f, x, lower=None, upper=None, evaluations=20):
"""
Creates 1d plots of a :class:`LogPDF` or a :class:`ErrorMeasure` around a
point `x` (i.e. a 1-dimensional plot in each direction).
Returns a ``matplotlib`` figure object and axes handle.
Parameters
----------
f
A :class:`pints.LogPDF` or :class:`pints.ErrorMeasure` to plot.
x
A point in the function's input space.
lower
Optional lower bounds for each parameter, used to specify the lower
bounds of the plot.
upper
Optional upper bounds for each parameter, used to specify the upper
bounds of the plot.
evaluations
The number of evaluations to use in each plot.
"""
import matplotlib.pyplot as plt
import numpy as np
import pints
# Check function and get n_parameters
if not (isinstance(f, pints.LogPDF) or isinstance(f, pints.ErrorMeasure)):
raise ValueError(
'Given function must be pints.LogPDF or pints.ErrorMeasure.')
n_param = f.n_parameters()
# Check point
x = pints.vector(x)
if len(x) != n_param:
raise ValueError(
'Given point `x` must have same number of parameters as function.')
# Check boundaries
if lower is None:
# Guess boundaries based on point x
lower = x * 0.95
lower[lower == 0] = -1
else:
lower = pints.vector(lower)
if len(lower) != n_param:
raise ValueError('Lower bounds must have same number of'
+ ' parameters as function.')
if upper is None:
# Guess boundaries based on point x
upper = x * 1.05
upper[upper == 0] = 1
else:
upper = pints.vector(upper)
if len(upper) != n_param:
raise ValueError('Upper bounds must have same number of'
+ ' parameters as function.')
# Check number of evaluations
evaluations = int(evaluations)
if evaluations < 1:
raise ValueError('Number of evaluations must be greater than zero.')
# Create points to plot
xs = np.tile(x, (n_param * evaluations, 1))
for j in range(n_param):
i1 = j * evaluations
i2 = i1 + evaluations
xs[i1:i2, j] = np.linspace(lower[j], upper[j], evaluations)
# Evaluate points
fs = pints.evaluate(f, xs, parallel=False)
# Create figure
fig, axes = plt.subplots(n_param, 1, figsize=(6, 2 * n_param))
if n_param == 1:
axes = np.asarray([axes], dtype=object)
for j, p in enumerate(x):
i1 = j * evaluations
i2 = i1 + evaluations
axes[j].plot(xs[i1:i2, j], fs[i1:i2], c='green', label='Function')
axes[j].axvline(p, c='blue', label='Value')
axes[j].set_xlabel('Parameter ' + str(1 + j))
axes[j].legend()
plt.tight_layout()
return fig, axes
def function_between_points(f, point_1, point_2, padding=0.25, evaluations=20):
"""
Creates and returns a plot of a function between two points in parameter
space.
Returns a ``matplotlib`` figure object and axes handle.
Parameters
----------
f
A :class:`pints.LogPDF` or :class:`pints.ErrorMeasure` to plot.
point_1
The first point in parameter space. The method will find a line from
``point_1`` to ``point_2`` and plot ``f`` at several points along it.
point_2
The second point.
padding
Specifies the amount of padding around the line segment
``[point_1, point_2]`` that will be shown in the plot.
evaluations
The number of evaluation along the line in parameter space.
"""
import matplotlib.pyplot as plt
import numpy as np
import pints
# Check function and get n_parameters
if not (isinstance(f, pints.LogPDF) or isinstance(f, pints.ErrorMeasure)):
raise ValueError(
'Given function must be pints.LogPDF or pints.ErrorMeasure.')
n_param = f.n_parameters()
# Check points
point_1 = pints.vector(point_1)
point_2 = pints.vector(point_2)
if not (len(point_1) == len(point_2) == n_param):
raise ValueError('Both points must have the same number of parameters'
+ ' as the given function.')
# Check padding
padding = float(padding)
if padding < 0:
raise ValueError('Padding cannot be negative.')
# Check evaluation
evaluations = int(evaluations)
if evaluations < 3:
raise ValueError('The number of evaluations must be 3 or greater.')
# Figure setting
fig, axes = plt.subplots(1, 1, figsize=(6, 4))
axes.set_xlabel('Point 1 to point 2')
axes.set_ylabel('Function')
# Generate some x-values near the given parameters
s = np.linspace(-padding, 1 + padding, evaluations)
# Direction
r = point_2 - point_1
# Calculate function with other parameters fixed
x = [point_1 + sj * r for sj in s]
y = pints.evaluate(f, x, parallel=False)
# Plot
axes.plot(s, y, color='green')
axes.axvline(0, color='#1f77b4', label='Point 1')
axes.axvline(1, color='#7f7f7f', label='Point 2')
axes.legend()
return fig, axes
def histogram(samples, ref_parameters=None, n_percentiles=None):
"""
Takes one or more markov chains or lists of samples as input and creates
and returns a plot showing histograms for each chain or list of samples.
Returns a ``matplotlib`` figure object and axes handle.
Parameters
----------
samples
A list of lists of samples, with shape
``(n_lists, n_samples, n_parameters)``, where ``n_lists`` is the
number of lists of samples, ``n_samples`` is the number of samples in
one list and ``n_parameters`` is the number of parameters.
ref_parameters
A set of parameters for reference in the plot. For example, if true
values of parameters are known, they can be passed in for plotting.
n_percentiles
Shows only the middle n-th percentiles of the distribution.
Default shows all samples in ``samples``.
"""
import matplotlib.pyplot as plt
import numpy as np
# If we switch to Python3 exclusively, bins and alpha can be keyword-only
# arguments
bins = 40
alpha = 0.5
n_list = len(samples)
_, n_param = samples[0].shape
# Check number of parameters
for samples_j in samples:
if n_param != samples_j.shape[1]:
raise ValueError(
'All samples must have the same number of parameters.'
)
# Check reference parameters
if ref_parameters is not None:
if len(ref_parameters) != n_param:
raise ValueError(
'Length of `ref_parameters` must be same as number of'
' parameters.')
# Set up figure
fig, axes = plt.subplots(
n_param, 1, figsize=(6, 2 * n_param),
squeeze=False, # Tell matlab to always return a 2d axes object
)
# Plot first samples
for i in range(n_param):
for j_list, samples_j in enumerate(samples):
# Add histogram subplot
axes[i, 0].set_xlabel('Parameter ' + str(i + 1))
axes[i, 0].set_ylabel('Frequency')
if n_percentiles is None:
xmin = np.min(samples_j[:, i])
xmax = np.max(samples_j[:, i])
else:
xmin = np.percentile(samples_j[:, i],
50 - n_percentiles / 2.)
xmax = np.percentile(samples_j[:, i],
50 + n_percentiles / 2.)
xbins = np.linspace(xmin, xmax, bins)
axes[i, 0].hist(
samples_j[:, i], bins=xbins, alpha=alpha,
label='Samples ' + str(1 + j_list))
# Add reference parameters if given
if ref_parameters is not None:
# For histogram subplot
ymin_tv, ymax_tv = axes[i, 0].get_ylim()
axes[i, 0].plot(
[ref_parameters[i], ref_parameters[i]],
[0.0, ymax_tv],
'--', c='k')
if n_list > 1:
axes[0, 0].legend()
plt.tight_layout()
return fig, axes[:, 0]
def trace(samples, ref_parameters=None, n_percentiles=None):
"""
Takes one or more markov chains or lists of samples as input and creates
and returns a plot showing histograms and traces for each chain or list of
samples.
Returns a ``matplotlib`` figure object and axes handle.
Parameters
----------
samples
A list of lists of samples, with shape
``(n_lists, n_samples, n_parameters)``, where ``n_lists`` is the
number of lists of samples, ``n_samples`` is the number of samples in
one list and ``n_parameters`` is the number of parameters.
ref_parameters
A set of parameters for reference in the plot. For example, if true
values of parameters are known, they can be passed in for plotting.
n_percentiles
Shows only the middle n-th percentiles of the distribution.
Default shows all samples in ``samples``.
"""
import matplotlib.pyplot as plt
import numpy as np
# If we switch to Python3 exclusively, bins and alpha can be keyword-only
# arguments
bins = 40
alpha = 0.5
n_list = len(samples)
_, n_param = samples[0].shape
# Check number of parameters
for samples_j in samples:
if n_param != samples_j.shape[1]:
raise ValueError(
'All samples must have the same number of parameters.'
)
# Check reference parameters
if ref_parameters is not None:
if len(ref_parameters) != n_param:
raise ValueError(
'Length of `ref_parameters` must be same as number of'
' parameters.')
# Set up figure
fig, axes = plt.subplots(
n_param, 2, figsize=(12, 2 * n_param),
# Tell matplotlib to return 2d, even if n_param is 1
squeeze=False,
)
# Plot first samples
for i in range(n_param):
ymin_all, ymax_all = np.inf, -np.inf
for j_list, samples_j in enumerate(samples):
# Add histogram subplot
axes[i, 0].set_xlabel('Parameter ' + str(i + 1))
axes[i, 0].set_ylabel('Frequency')
if n_percentiles is None:
xmin = np.min(samples_j[:, i])
xmax = np.max(samples_j[:, i])
else:
xmin = np.percentile(samples_j[:, i],
50 - n_percentiles / 2.)
xmax = np.percentile(samples_j[:, i],
50 + n_percentiles / 2.)
xbins = np.linspace(xmin, xmax, bins)
axes[i, 0].hist(samples_j[:, i], bins=xbins, alpha=alpha,
label='Samples ' + str(1 + j_list))
# Add trace subplot
axes[i, 1].set_xlabel('Iteration')
axes[i, 1].set_ylabel('Parameter ' + str(i + 1))
axes[i, 1].plot(samples_j[:, i], alpha=alpha)
# Set ylim
ymin_all = ymin_all if ymin_all < xmin else xmin
ymax_all = ymax_all if ymax_all > xmax else xmax
axes[i, 1].set_ylim([ymin_all, ymax_all])
# Add reference parameters if given
if ref_parameters is not None:
# For histogram subplot
ymin_tv, ymax_tv = axes[i, 0].get_ylim()
axes[i, 0].plot(
[ref_parameters[i], ref_parameters[i]],
[0.0, ymax_tv],
'--', c='k')
# For trace subplot
xmin_tv, xmax_tv = axes[i, 1].get_xlim()
axes[i, 1].plot(
[0.0, xmax_tv],
[ref_parameters[i], ref_parameters[i]],
'--', c='k')
if n_list > 1:
axes[0, 0].legend()
plt.tight_layout()
return fig, axes
def autocorrelation(samples, max_lags=100):
"""
Creates and returns an autocorrelation plot for a given markov chain or
list of `samples`.
Returns a ``matplotlib`` figure object and axes handle.
Parameters
----------
samples
A list of samples, with shape ``(n_samples, n_parameters)``, where
``n_samples`` is the number of samples in the list and ``n_parameters``
is the number of parameters.
max_lags
The maximum autocorrelation lag to plot.
"""
import matplotlib.pyplot as plt
import numpy as np
# Check samples size
try:
n_sample, n_param = samples.shape
except ValueError:
raise ValueError('`samples` must be of shape (n_sample,'
+ ' n_parameters).')
fig, axes = plt.subplots(n_param, 1, sharex=True, figsize=(6, 2 * n_param))
if n_param == 1:
axes = np.asarray([axes], dtype=object)
for i in range(n_param):
axes[i].acorr(samples[:, i] - np.mean(samples[:, i]), maxlags=max_lags)
axes[i].set_xlim(-0.5, max_lags + 0.5)
axes[i].legend(['Parameter ' + str(1 + i)], loc='upper right')
# Add x-label to final plot only
axes[i].set_xlabel('Lag')
# Add vertical y-label to middle plot
# fig.text(0.04, 0.5, 'Autocorrelation', va='center', rotation='vertical')
axes[int(i / 2)].set_ylabel('Autocorrelation')
plt.tight_layout()
return fig, axes
def series(samples, problem, ref_parameters=None, thinning=None):
"""
Creates and returns a plot of predicted time series for a given list of
``samples`` and a single-output or multi-output ``problem``.
Because this method runs simulations, it can take a considerable time to
run.
Returns a ``matplotlib`` figure object and axes handle.
Parameters
----------
samples
A list of samples, with shape ``(n_samples, n_parameters)``, where
`n_samples` is the number of samples in the list and ``n_parameters``
is the number of parameters.
problem
A :class:``pints.SingleOutputProblem`` or
:class:``pints.MultiOutputProblem`` of a n_parameters equal to or
greater than the ``n_parameters`` of the `samples`. Any extra
parameters present in the chain but not accepted by the
``SingleOutputProblem`` or ``MultiOutputProblem`` (for example
parameters added by a noise model) will be ignored.
ref_parameters
A set of parameters for reference in the plot. For example,
if true values of parameters are known, they can be passed in for
plotting.
thinning
An integer greater than zero. If specified, only every
n-th sample (with ``n = thinning``) in the samples will be used. If
left at the default value ``None``, a value will be chosen so that
200 to 400 predictions are shown.
"""
import matplotlib.pyplot as plt
import numpy as np
# Check samples size
try:
n_sample, n_param = samples.shape
except ValueError:
raise ValueError('`samples` must be of shape (n_sample,'
+ ' n_parameters).')
# Get problem n_parameters
n_parameters = problem.n_parameters()
# Check reference parameters
if ref_parameters is not None:
if len(ref_parameters) != n_param and \
len(ref_parameters) != n_parameters:
raise ValueError(
'Length of `ref_parameters` must be same as number of'
' parameters.')
ref_series = problem.evaluate(ref_parameters[:n_parameters])
# Get number of problem output
n_outputs = problem.n_outputs()
# Get thinning rate
if thinning is None:
thinning = max(1, int(n_sample / 200))
else:
thinning = int(thinning)
if thinning < 1:
raise ValueError(
'Thinning rate must be `None` or an integer greater than'
' zero.')
# Get times
times = problem.times()
# Evaluate the model for all parameter sets in the samples
i = 0
predicted_values = []
for params in samples[::thinning, :n_parameters]:
predicted_values.append(problem.evaluate(params))
i += 1
predicted_values = np.array(predicted_values)
mean_values = np.mean(predicted_values, axis=0)
# Guess appropriate alpha (0.05 worked for 1000 plots)
alpha = max(0.05 * (1000 / (n_sample / thinning)), 0.5)
# Plot prediction
fig, axes = plt.subplots(n_outputs, 1, figsize=(8, np.sqrt(n_outputs) * 3),
sharex=True)
if n_outputs == 1:
plt.xlabel('Time')
plt.ylabel('Value')
plt.plot(
times, problem.values(), 'x', color='#7f7f7f', ms=6.5, alpha=0.5,
label='Original data')
plt.plot(
times, predicted_values[0], color='#1f77b4',
label='Inferred series')
for v in predicted_values[1:]:
plt.plot(times, v, color='#1f77b4', alpha=alpha)
plt.plot(times, mean_values, 'k:', lw=2,
label='Mean of inferred series')
# Add reference series if given
if ref_parameters is not None:
plt.plot(times, ref_series, color='#d62728', ls='--',
label='Reference series')
plt.legend()
elif n_outputs > 1:
# Remove horizontal space between axes and set common xlabel
fig.subplots_adjust(hspace=0)
axes[-1].set_xlabel('Time')
# Go through each output
for i_output in range(n_outputs):
axes[i_output].set_ylabel('Output %d' % (i_output + 1))
axes[i_output].plot(
times, problem.values()[:, i_output], 'x', color='#7f7f7f',
ms=6.5, alpha=0.5, label='Original data')
axes[i_output].plot(
times, predicted_values[0][:, i_output], color='#1f77b4',
label='Inferred series')
for v in predicted_values[1:]:
axes[i_output].plot(times, v[:, i_output], color='#1f77b4',
alpha=alpha)
axes[i_output].plot(times, mean_values[:, i_output], 'k:', lw=2,
label='Mean of inferred series')
# Add reference series if given
if ref_parameters is not None:
axes[i_output].plot(times, ref_series[:, i_output],
color='#d62728', ls='--',
label='Reference series')
axes[0].legend()
plt.tight_layout()
return fig, axes
def pairwise(samples,
kde=False,
heatmap=False,
opacity=None,
ref_parameters=None,
n_percentiles=None):
"""
Takes a markov chain or list of ``samples`` and creates a set of pairwise
scatterplots for all parameters (p1 versus p2, p1 versus p3, p2 versus p3,
etc.).
The returned plot is in a 'matrix' form, with histograms of each individual
parameter on the diagonal, and scatter plots of parameters ``i`` and ``j``
on each entry ``(i, j)`` below the diagonal.
Returns a ``matplotlib`` figure object and axes handle.
Parameters
----------
samples
A list of samples, with shape ``(n_samples, n_parameters)``, where
``n_samples`` is the number of samples in the list and ``n_parameters``
is the number of parameters.
kde
Set to ``True`` to use kernel-density estimation for the
histograms and scatter plots. Cannot use together with ``heatmap``.
heatmap
Set to ``True`` to plot heatmap for the pairwise plots.
Cannot be used together with ``kde``.
Opacity
This value can be used to manually set the opacity of the
points in the scatter plots (when ``kde=False`` and ``heatmap=False``
only).
ref_parameters
A set of parameters for reference in the plot. For example,
if true values of parameters are known, they can be passed in for
plotting.
n_percentiles
Shows only the middle n-th percentiles of the distribution.
Default shows all samples in ``samples``.
"""
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as stats
import warnings
from distutils.version import LooseVersion
# Check matplotlib version
use_old_matplotlib = LooseVersion(matplotlib.__version__) \
< LooseVersion("2.2")
# Check options kde and heatmap
if kde and heatmap:
raise ValueError('Cannot use `kde` and `heatmap` together.')
# Check samples size
try:
n_sample, n_param = samples.shape
except ValueError:
raise ValueError('`samples` must be of shape (n_sample,'
+ ' n_parameters).')
# Check number of parameters
if n_param < 2:
raise ValueError('Number of parameters must be larger than 2.')
# Check reference parameters
if ref_parameters is not None:
if len(ref_parameters) != n_param:
raise ValueError(
'Length of `ref_parameters` must be same as number of'
' parameters.')
# Create figure
fig_size = (3 * n_param, 3 * n_param)
fig, axes = plt.subplots(n_param, n_param, figsize=fig_size)
bins = 25
for i in range(n_param):
for j in range(n_param):
if i == j:
# Diagonal: Plot a histogram
if n_percentiles is None:
xmin, xmax = np.min(samples[:, i]), np.max(samples[:, i])
else:
xmin = np.percentile(samples[:, i],
50 - n_percentiles / 2.)
xmax = np.percentile(samples[:, i],
50 + n_percentiles / 2.)
xbins = np.linspace(xmin, xmax, bins)
axes[i, j].set_xlim(xmin, xmax)
if use_old_matplotlib: # pragma: no cover
axes[i, j].hist(samples[:, i], bins=xbins, normed=True)
else:
axes[i, j].hist(samples[:, i], bins=xbins, density=True)
# Add kde plot
if kde:
x = np.linspace(xmin, xmax, 100)
axes[i, j].plot(x, stats.gaussian_kde(samples[:, i])(x))
# Add reference parameters if given
if ref_parameters is not None:
ymin_tv, ymax_tv = axes[i, j].get_ylim()
axes[i, j].plot(
[ref_parameters[i], ref_parameters[i]],
[0.0, ymax_tv],
'--', c='k')
elif i < j:
# Top-right: no plot
axes[i, j].axis('off')
else:
# Lower-left: Plot the samples as density map
if n_percentiles is None:
xmin, xmax = np.min(samples[:, j]), np.max(samples[:, j])
ymin, ymax = np.min(samples[:, i]), np.max(samples[:, i])
else:
xmin = np.percentile(samples[:, j],
50 - n_percentiles / 2.)
xmax = np.percentile(samples[:, j],
50 + n_percentiles / 2.)
ymin = np.percentile(samples[:, i],
50 - n_percentiles / 2.)
ymax = np.percentile(samples[:, i],
50 + n_percentiles / 2.)
axes[i, j].set_xlim(xmin, xmax)
axes[i, j].set_ylim(ymin, ymax)
if not kde and not heatmap:
# Create scatter plot
# Determine point opacity
num_points = len(samples[:, i])
if opacity is None:
if num_points < 10:
opacity = 1.0
else:
opacity = 1.0 / np.log10(num_points)
# Scatter points
axes[i, j].scatter(
samples[:, j], samples[:, i], alpha=opacity, s=0.1)
elif kde:
# Create a KDE-based plot
# Plot values
values = np.vstack([samples[:, j], samples[:, i]])
axes[i, j].imshow(
np.rot90(values), cmap=plt.cm.Blues,
extent=[xmin, xmax, ymin, ymax])
# Create grid
xx, yy = np.mgrid[xmin:xmax:100j, ymin:ymax:100j]
positions = np.vstack([xx.ravel(), yy.ravel()])
# Get kernel density estimate and plot contours
kernel = stats.gaussian_kde(values)
f = np.reshape(kernel(positions).T, xx.shape)
axes[i, j].contourf(xx, yy, f, cmap='Blues')
axes[i, j].contour(xx, yy, f, colors='k')
# Force equal aspect ratio
# Matplotlib raises a warning here (on 2.7 at least)
# We can't do anything about it, so no other option than
# to suppress it at this stage...
with warnings.catch_warnings():
warnings.simplefilter('ignore', UnicodeWarning)
axes[i, j].set_aspect((xmax - xmin) / (ymax - ymin))
elif heatmap:
# Create a heatmap-based plot
# Create bins
xbins = np.linspace(xmin, xmax, bins)
ybins = np.linspace(ymin, ymax, bins)
# Plot heatmap
axes[i, j].hist2d(samples[:, j], samples[:, i],
bins=(xbins, ybins), cmap=plt.cm.Blues)
# Force equal aspect ratio
# Matplotlib raises a warning here (on 2.7 at least)
# We can't do anything about it, so no other option than
# to suppress it at this stage...
with warnings.catch_warnings():
warnings.simplefilter('ignore', UnicodeWarning)
axes[i, j].set_aspect((xmax - xmin) / (ymax - ymin))
# Add reference parameters if given
if ref_parameters is not None:
axes[i, j].plot(
[ref_parameters[j], ref_parameters[j]],
[ymin, ymax],
'--', c='k')
axes[i, j].plot(
[xmin, xmax],
[ref_parameters[i], ref_parameters[i]],
'--', c='k')
# Set tick labels
if i < n_param - 1:
# Only show x tick labels for the last row
axes[i, j].set_xticklabels([])
else:
# Rotate the x tick labels to fit in the plot
for tl in axes[i, j].get_xticklabels():
tl.set_rotation(45)
if j > 0:
# Only show y tick labels for the first column
axes[i, j].set_yticklabels([])
# Set axis labels
axes[-1, i].set_xlabel('Parameter %d' % (i + 1))
if i == 0:
# The first one is not a parameter
axes[i, 0].set_ylabel('Frequency')
else:
axes[i, 0].set_ylabel('Parameter %d' % (i + 1))
return fig, axes
| [
"[email protected]"
] | |
b7e335ec5f9b7c481858b08725dd834ca4d73b3b | 917d4f67f6033a0cc01ba2b3b7b07dab94dcffdf | /property/pages/views.py | 6104059f52839acc79ba33851e228e4120171433 | [] | no_license | hghimanshu/Django | 011156c484e6710a379be3fb7faf6ab814bde02c | 75bef769e615df2719b213884f7269a56b7ccb7b | refs/heads/master | 2023-02-19T08:49:35.691196 | 2022-03-21T09:03:58 | 2022-03-21T09:03:58 | 242,301,089 | 0 | 0 | null | 2023-02-15T18:19:31 | 2020-02-22T07:43:13 | CSS | UTF-8 | Python | false | false | 856 | py | from django.shortcuts import render
from django.http import HttpResponse
from listings.models import Listing
from realtors.models import Realtor
from listings.choices import price_choices, bedroom_choices, state_choices
# Create your views here.
def index(request):
listings = Listing.objects.order_by('-list_date').filter(is_published=True)[:3]
context = {
'listings': listings,
'state_choices': state_choices,
'bedroom_choices': bedroom_choices,
'price_choices': price_choices
}
return render(request, 'pages/index.html', context)
def about(request):
realtors = Realtor.objects.order_by('-hire_date')
mvp_realtors = Realtor.objects.all().filter(is_mvp=True)
context = {
'realtors': realtors,
'mvp': mvp_realtors
}
return render(request, 'pages/about.html', context)
| [
"[email protected]"
] | |
0ec97c5ab8392c7cb7cdb4c1effd50efecf924ef | 474743374414f48e924919c524ad05534af94f9c | /nyCommuting.py | 32b8878bed161a5b2cc87e61982a1fac35ad5dda | [] | no_license | Perseus1993/nyMobility | aafbebcea28cd9950b329b967717b2d692960463 | 54b85a8638c5554ccad0c3c5c51e7695f1430ac7 | refs/heads/master | 2023-03-15T12:08:28.777956 | 2018-10-12T16:37:29 | 2018-10-12T16:37:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,756 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 5 17:25:14 2018
@author: doorleyr
"""
import pandas as pd
import json
from shapely.geometry import Point, shape
def get_location(longitude, latitude, regions_json, name):
# for a given lat and lon, and a given geojson, find the name of the feature into which the latLon falls
point = Point(longitude, latitude)
for record in regions_json['features']:
polygon = shape(record['geometry'])
if polygon.contains(point):
return record['properties'][name]
return 'None'
#get communties geojson
communities=json.load(open('./spatialData/communityDistrictsManhattanOnly.geojson'))
ntas=json.load(open('./spatialData/Neighborhood Tabulation Areas.geojson'))
nycCounties=json.load(open('./spatialData/nycCounties.geojson'))
nj=json.load(open('./spatialData/newJersey.geojson'))
#get OD data
commuting=pd.read_csv('./od_data/tract2TractCommuting_NY.csv', skiprows=2)
#commuting['RESIDENCE']=commuting.apply(lambda row: str(row['RESIDENCE']).split(',')[0], axis=1)
#commuting['WORKPLACE']=commuting.apply(lambda row: str(row['WORKPLACE']).split(',')[0], axis=1)
commuting=commuting[~commuting['Workers 16 and Over'].isnull()]
commuting['Workers 16 and Over']=commuting.apply(lambda row: float(str(row['Workers 16 and Over']).replace(',',"")), axis=1)# in case there are commas for separating 000s
#get tracts geojson
tracts=json.load(open('./spatialData/2010 Census Tracts.geojson'))
tractsManhattan=tracts.copy()
#tractsManhattan['features']=[f for f in tracts['features'] if f['properties']['COUNTY']=='061']
tractsManhattan['features']=[f for f in tracts['features'] if f['properties']['boro_name']=='Manhattan']
#get the full list of origins and destination names
allTracts=set(commuting['RESIDENCE']).union(set(commuting['WORKPLACE']))
#tractNamesGeo=[tractsManhattan['features'][i]['properties']['NAME'] for i in range(len(tractsManhattan['features']))]
tractNamesGeo=[tractsManhattan['features'][i]['properties']['ctlabel'] for i in range(len(tractsManhattan['features']))]
#create empty dict of name to custom zones
#check if name contains New Jersey: map to NJ
#if not, check if not in New York County: map to the boro name
# if in NY county, look up the tract in the geojson and map to the ntaname
# OR get the tract centroid and find which community district its in
tracts2Comms={}
tracts2Nhoods={}
for t in allTracts:
if 'New Jersey' in t:
tracts2Comms[t]='New Jersey'
tracts2Nhoods[t]='New Jersey'
elif 'New York County' not in t:
tracts2Comms[t]=t.split(', ')[1]
tracts2Nhoods[t]=t.split(', ')[1]
else:
# tracts2Comms[t]=getLocation()
tractNum=t.split(',')[0].split(' ')[2]
tractInd=tractNamesGeo.index(tractNum)
# tracts2Nhoods[t]=tractsManhattan['features'][tractInd]['properties']['ntaname']
tractCentroid=shape(tractsManhattan['features'][tractInd]['geometry']).centroid
comm=get_location(tractCentroid.x, tractCentroid.y, communities, 'Name')
nHood=get_location(tractCentroid.x, tractCentroid.y, ntas, 'ntaname')
if comm=='None':
print(t)
if tractNum =='309':
comm='Bronx County' #exception: this census tract is actually in New York County but not considered part of any of the Manhattan community districts
if nHood=='None':
print('nHood')
print(t)
tracts2Comms[t]=comm
tracts2Nhoods[t]=nHood
commuting['oComm']=commuting.apply(lambda row: tracts2Comms[row['RESIDENCE']], axis=1)
commuting['dComm']=commuting.apply(lambda row: tracts2Comms[row['WORKPLACE']], axis=1)
commuting['oNhood']=commuting.apply(lambda row: tracts2Nhoods[row['RESIDENCE']], axis=1)
commuting['dNhood']=commuting.apply(lambda row: tracts2Nhoods[row['WORKPLACE']], axis=1)
#commuting['simpleMode']=commuting.apply(lambda row: cpttModeDict[row['Means of Transportation 18']], axis=1)
odComms=pd.crosstab(commuting['oComm'], commuting['dComm'], commuting['Workers 16 and Over'], aggfunc="sum").fillna(0)
odNHoods=pd.crosstab(commuting['oNhood'], commuting['dNhood'], commuting['Workers 16 and Over'], aggfunc="sum").fillna(0)
odComms.to_csv('./results/od_communityDistricts.csv')
odNHoods.to_csv('./results/od_neighbourhoods.csv')
odCommsMode=commuting.groupby(by=['oComm', 'dComm', 'Means of Transportation 18'], as_index=False).sum()
odNHoodsMode=commuting.groupby(by=['oNhood', 'dNhood', 'Means of Transportation 18'], as_index=False).sum()
#create a geojson including all the zones for the community district aggregation
geoOutComms=nycCounties.copy()
geoOutComms['features']=[]
for g in nycCounties['features']:
if g['properties']['NAME']+' County' in odComms.columns.values:
geoOutComms['features'].append({'properties':{'Name': g['properties']['NAME']+' County', 'type':'County'}, 'geometry': g['geometry'], 'type':'Feature'})
for c in communities['features']:
if c['properties']['Name'] in odComms.columns.values:
geoOutComms['features'].append({'properties':{'Name': c['properties']['Name'], 'type':'Community'}, 'geometry': c['geometry'], 'type':'Feature'})
geoOutComms['features'].append({'properties':{'Name': 'New Jersey', 'type':'State'}, 'geometry': nj['features'][0]['geometry'], 'type':'Feature'})
#create a geojson including all the zones for the nta aggregation
geoOutNHoods=nycCounties.copy()
geoOutNHoods['features']=[]
for g in nycCounties['features']:
if g['properties']['NAME']+' County' in odNHoods.columns.values:
geoOutNHoods['features'].append({'properties':{'Name': g['properties']['NAME']+' County', 'type':'County'}, 'geometry': g['geometry'], 'type':'Feature'})
for c in ntas['features']:
if c['properties']['ntaname'] in odNHoods.columns.values:
geoOutNHoods['features'].append({'properties':{'Name': c['properties']['ntaname'], 'type':'Neighbourhood'}, 'geometry': c['geometry'], 'type':'Feature'})
geoOutNHoods['features'].append({'properties':{'Name': 'New Jersey', 'type':'State'}, 'geometry': nj['features'][0]['geometry'], 'type':'Feature'})
#geoOutNHoods=nycCounties.copy()
#geoOutNHoods['features']=[g for g in nycCounties['features'] if g['properties']['NAME']+' County' in odNHoods.columns.values]
#for c in ntas['features']:
# if c['properties']['ntaname'] in odNHoods.columns.values:
# geoOutNHoods['features'].extend([c])
#geoOutNHoods['features'].extend([nj['features'][0]])
json.dump(geoOutComms, open('./results/allZonesComms.geojson', 'w'))
json.dump(geoOutNHoods, open('./results/allZonesNHoods.geojson', 'w'))
odCommsMode.to_csv('./results/od_communityDistricts_byMode.csv')
odNHoodsMode.to_csv('./results/od_neighbourhoods_byMode.csv')
| [
"[email protected]"
] | |
fce4178c6cb9494989c22ab01f8bd58fe80f1f06 | 3f2a1b1298883a147217fac62abf3e447f3328bd | /生成数据/scatter_squares.py | b3435ed3575348c54d41f4fc363df6554455bc17 | [] | no_license | wuxudongxd/python_code | c23a5e665d2f74b0b50839f3230b343b914d4a27 | 4a7bacc8061797c1344d9216813cdc02985fb0a3 | refs/heads/master | 2022-04-09T02:42:30.702003 | 2019-06-27T14:43:35 | 2019-06-27T14:43:35 | 234,686,253 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 595 | py | import matplotlib.pyplot as plt
x_values = list(range(1, 1001))
y_values = [x ** 2 for x in x_values]
plt.scatter(x_values, y_values, c = y_values, cmap = plt.cm.Blues, edgecolor = 'none', s = 40)
#设置图表标题,并给坐标轴加上标签
plt.title("Square Numbers", fontsize = 24)
plt.xlabel("Value", fontsize = 14)
plt.ylabel("Square of Value", fontsize = 14)
#设置刻度标记的大小
plt.tick_params(axis = 'both', which = 'najor', labelsize = 14)
#设置每个坐标轴的取值范围
plt.axis([0, 1100, 0, 1100000])
plt.savefig('squares_plot.png', bbox_inches = 'tight')
| [
"[email protected]"
] | |
c1854c18c3be9690a31572a2e09f19b60d45d596 | c5174b07aa1f2eefc9d1aa156973a947c7906591 | /Utilities/custon_logging.py | 78939a565ff30237090a2d3b3b35c5bddab56235 | [] | no_license | rajabhinav02/selenium-python-framework | 5e3a749fda815bbb9eea2a7479e7f80f301bd77c | 69e464fdfcb054b2688291522e28f35b85c0b2e7 | refs/heads/master | 2023-06-07T10:49:55.314883 | 2021-07-13T15:37:33 | 2021-07-13T15:37:33 | 385,606,012 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 503 | py | import inspect
import logging
def loggingtest(loglevel):
tcname = inspect.stack()[1][3]
logger= logging.getLogger(tcname)
logger.setLevel(logging.DEBUG)
#filehandler = logging.FileHandler("{0}.log".format(tcname))
filehandler= logging.FileHandler("auto.log", mode="a")
filehandler.setLevel(loglevel)
format = logging.Formatter("%(asctime)s : %(levelname)s : %(name)s : %(message)s")
filehandler.setFormatter(format)
logger.addHandler(filehandler)
return logger | [
"[email protected]"
] | |
51cd8ac2631b4848011d9365c3105f0a997c90de | ae9555e70027feb5502dc032852767fa77b9207b | /code/visualize.py | 72eafaf0c4849e61dfb47e5c911334cebcf70ff5 | [] | no_license | LuvYamashitaMizuki/sbert | b8d9495263cc0a9b58ae4ae7911f946499561bdf | 4da951ff27c503013d7dab2e6b57352488bed1ad | refs/heads/master | 2023-05-05T04:50:53.897329 | 2021-05-22T07:19:30 | 2021-05-22T07:19:30 | 369,705,075 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 534 | py | words = []
data = []
for line in open("models/bert_embeddings11313.txt"):
embedding = line.split()
words.append(embedding[0])
embedding = list(map(float, embedding[1:]))
data.append(embedding)
f = open("bert_visualize.tsv", "w+")
for i in range(len(data)):
for i, pnt in enumerate(data[i]):
if i == len(data[i]) - 1:
f.write(str(pnt) + '\n')
else:
f.write(str(pnt) + '\t')
f = open("bert_visualize_words.tsv", "w+")
for i in range(len(words)):
f.write(words[i] + '\n')
| [
"[email protected]"
] | |
db5ec5010620f9dd0771c6bf99d56cad7dbaf3df | f39d030a25f63f5e60098b7069bfe129d7ffaa4e | /mapwidget/urls.py | 820a130c81cb55f15e47e30f360d29486c1ace39 | [] | no_license | RHoK-November-2012/zelenamapa | d248c4104b7ed497ca29aee1759567db7370f009 | 93080a67107dedeca1c9bc28177cdce4b2e7bc17 | refs/heads/master | 2021-01-15T22:09:16.899268 | 2012-12-10T12:57:26 | 2012-12-10T12:57:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 709 | py | from django.conf.urls.defaults import patterns, include, url
from django.views.generic.simple import direct_to_template
from django.views.generic.simple import redirect_to
from views import *
urlpatterns = patterns("",
url(r"^(?P<w>\d+)x(?P<h>\d+)/(?P<pk>\d+)/$",
map_view,
name="zm.mapwidget.map", ),
url(r"^(?P<w>\d+)x(?P<h>\d+)/(?P<pk>\d+)/div/$",
map_div_view,
name="zm.mapwidget.map.div", ),
url(r"^mapconfig(?P<pk>\d+).js$",
mapconfig_js_view,
name="zm.mapwidget.map_js", ),
url(r"^mapconfig_div(?P<pk>\d+).js$",
mapconfig_js_view,
{"template": "mapwidget/mapconfig_div.js"},
name="zm.mapwidget.map_div_js", ),
) | [
"[email protected]"
] | |
35d65c1e31e62c13f7fa5ec8809270c77652898d | bbfd23efb084d9c13add9a36744cad94224762d2 | /octoprint_raisecloud/raisecloud.py | 39b54af88113f6dd8252d1b8bc41cc8f11e0320c | [] | no_license | Raise3D/Octoprint-Raisecloud | a68842dd8839a12640ac86ae8cfd529fcb2b8742 | 35975de651e15ac8fdb4a2eecea1f7ab8e25038d | refs/heads/master | 2023-08-31T09:41:39.477679 | 2023-08-17T07:52:43 | 2023-08-17T07:52:43 | 255,273,781 | 7 | 7 | null | 2023-08-17T07:52:44 | 2020-04-13T08:40:52 | Python | UTF-8 | Python | false | false | 3,237 | py | # coding=utf-8
from __future__ import absolute_import, unicode_literals
import json
import base64
import logging
import requests
from Crypto.Cipher import AES
_logger = logging.getLogger('octoprint.plugins.raisecloud')
class RaiseCloud(object):
def __init__(self, machine_id, printer_name, machine_type):
self.endpoint = "https://api.raise3d.com/octoprod-v1.1"
self.url = "/user/keyLogin"
self.machine_id = machine_id
self.machine_type = machine_type
self.machine_name = printer_name
def login_cloud(self, content):
body = {
"machine_id": self.machine_id,
"machine_type": self.machine_type,
"machine_name": self.machine_name,
"key": content
}
url = "{}{}".format(self.endpoint, self.url)
try:
result = requests.post(url=url, json=body, verify=True)
if result.status_code == 200:
data = json.loads(result.text)
state = data["state"] # state 0-绑定到达上线, 1-正常返回token, 3-用户名密码不匹配
message = data["msg"]
if state == 1:
token = data["data"]["token"]
group_name = data["data"]["group_name"]
if data["data"]["team_owner"]:
group_owner = data["data"]["team_owner"]
else:
group_owner = ""
return {"state": 1, "msg": message, "token": token, "group_name": group_name,
"machine_id": self.machine_id, "group_owner": group_owner}
return {"state": state, "msg": message}
return {"state": -1, "msg": "Login error"}
except Exception as e:
return {"state": -1, "msg": "Login error"}
class Util(object):
@staticmethod
def allowed_file(filename):
return '.' in filename and filename.rsplit('.', 1)[1].lower() in ["raisepem"]
@staticmethod
def decrypt(content):
if content:
secret = 'raiseqwertyuiopa'
key = secret.encode("utf8") # 兼容 python3
decode = base64.b64decode(content)
cryptor = AES.new(key, AES.MODE_ECB)
plain_text = cryptor.decrypt(decode)
unpad = lambda s: s[0:-ord(s[-1:])]
data = json.loads(bytes.decode(unpad(plain_text)))
return {"user_name": data["user_name"]}
return {"user_name": ""}
def access_key(self, file_name, file_path):
"""
:return: content user_name
"""
try:
if self.allowed_file(file_name):
with open(file_path, 'r') as load_f:
content = json.load(load_f)["content"] # to bytes
# content = str.encode(content)
result = self.decrypt(content)
return result["user_name"], content
return "", ""
except Exception as e:
_logger.error(e)
_logger.error("Get access key error...")
return "", ""
def get_access_key(file_name, file_path):
util = Util()
return util.access_key(file_name, file_path)
| [
"[email protected]"
] | |
68c3596f7b0719e22f39a5bb9add3cf40d285973 | 893f83189700fefeba216e6899d42097cc0bec70 | /bioinformatics/photoscan-pro/python/lib/python3.5/site-packages/qtconsole/jupyter_widget.py | c2a8969866d3d2b0203d59d00013fe1e00dc58b6 | [
"GPL-3.0-only",
"Apache-2.0",
"MIT",
"Python-2.0"
] | permissive | pseudoPixels/SciWorCS | 79249198b3dd2a2653d4401d0f028f2180338371 | e1738c8b838c71b18598ceca29d7c487c76f876b | refs/heads/master | 2021-06-10T01:08:30.242094 | 2018-12-06T18:53:34 | 2018-12-06T18:53:34 | 140,774,351 | 0 | 1 | MIT | 2021-06-01T22:23:47 | 2018-07-12T23:33:53 | Python | UTF-8 | Python | false | false | 22,144 | py | """A FrontendWidget that emulates a repl for a Jupyter kernel.
This supports the additional functionality provided by Jupyter kernel.
"""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
from collections import namedtuple
import os.path
import re
from subprocess import Popen
import sys
import time
from textwrap import dedent
from qtconsole.qt import QtCore, QtGui
from qtconsole import __version__
from traitlets import Bool, Unicode
from .frontend_widget import FrontendWidget
from . import styles
#-----------------------------------------------------------------------------
# Constants
#-----------------------------------------------------------------------------
# Default strings to build and display input and output prompts (and separators
# in between)
default_in_prompt = 'In [<span class="in-prompt-number">%i</span>]: '
default_out_prompt = 'Out[<span class="out-prompt-number">%i</span>]: '
default_input_sep = '\n'
default_output_sep = ''
default_output_sep2 = ''
# Base path for most payload sources.
zmq_shell_source = 'ipykernel.zmqshell.ZMQInteractiveShell'
if sys.platform.startswith('win'):
default_editor = 'notepad'
else:
default_editor = ''
#-----------------------------------------------------------------------------
# JupyterWidget class
#-----------------------------------------------------------------------------
class IPythonWidget(FrontendWidget):
"""Dummy class for config inheritance. Destroyed below."""
class JupyterWidget(IPythonWidget):
"""A FrontendWidget for a Jupyter kernel."""
# If set, the 'custom_edit_requested(str, int)' signal will be emitted when
# an editor is needed for a file. This overrides 'editor' and 'editor_line'
# settings.
custom_edit = Bool(False)
custom_edit_requested = QtCore.Signal(object, object)
editor = Unicode(default_editor, config=True,
help="""
A command for invoking a system text editor. If the string contains a
{filename} format specifier, it will be used. Otherwise, the filename
will be appended to the end the command.
""")
editor_line = Unicode(config=True,
help="""
The editor command to use when a specific line number is requested. The
string should contain two format specifiers: {line} and {filename}. If
this parameter is not specified, the line number option to the %edit
magic will be ignored.
""")
style_sheet = Unicode(config=True,
help="""
A CSS stylesheet. The stylesheet can contain classes for:
1. Qt: QPlainTextEdit, QFrame, QWidget, etc
2. Pygments: .c, .k, .o, etc. (see PygmentsHighlighter)
3. QtConsole: .error, .in-prompt, .out-prompt, etc
""")
syntax_style = Unicode(config=True,
help="""
If not empty, use this Pygments style for syntax highlighting.
Otherwise, the style sheet is queried for Pygments style
information.
""")
# Prompts.
in_prompt = Unicode(default_in_prompt, config=True)
out_prompt = Unicode(default_out_prompt, config=True)
input_sep = Unicode(default_input_sep, config=True)
output_sep = Unicode(default_output_sep, config=True)
output_sep2 = Unicode(default_output_sep2, config=True)
# JupyterWidget protected class variables.
_PromptBlock = namedtuple('_PromptBlock', ['block', 'length', 'number'])
_payload_source_edit = 'edit_magic'
_payload_source_exit = 'ask_exit'
_payload_source_next_input = 'set_next_input'
_payload_source_page = 'page'
_retrying_history_request = False
_starting = False
#---------------------------------------------------------------------------
# 'object' interface
#---------------------------------------------------------------------------
def __init__(self, *args, **kw):
super(JupyterWidget, self).__init__(*args, **kw)
# JupyterWidget protected variables.
self._payload_handlers = {
self._payload_source_edit : self._handle_payload_edit,
self._payload_source_exit : self._handle_payload_exit,
self._payload_source_page : self._handle_payload_page,
self._payload_source_next_input : self._handle_payload_next_input }
self._previous_prompt_obj = None
self._keep_kernel_on_exit = None
# Initialize widget styling.
if self.style_sheet:
self._style_sheet_changed()
self._syntax_style_changed()
else:
self.set_default_style()
#---------------------------------------------------------------------------
# 'BaseFrontendMixin' abstract interface
#
# For JupyterWidget, override FrontendWidget methods which implement the
# BaseFrontend Mixin abstract interface
#---------------------------------------------------------------------------
def _handle_complete_reply(self, rep):
"""Support Jupyter's improved completion machinery.
"""
self.log.debug("complete: %s", rep.get('content', ''))
cursor = self._get_cursor()
info = self._request_info.get('complete')
if info and info.id == rep['parent_header']['msg_id'] and \
info.pos == cursor.position():
content = rep['content']
matches = content['matches']
start = content['cursor_start']
end = content['cursor_end']
start = max(start, 0)
end = max(end, start)
# Move the control's cursor to the desired end point
cursor_pos = self._get_input_buffer_cursor_pos()
if end < cursor_pos:
cursor.movePosition(QtGui.QTextCursor.Left,
n=(cursor_pos - end))
elif end > cursor_pos:
cursor.movePosition(QtGui.QTextCursor.Right,
n=(end - cursor_pos))
# This line actually applies the move to control's cursor
self._control.setTextCursor(cursor)
offset = end - start
# Move the local cursor object to the start of the match and
# complete.
cursor.movePosition(QtGui.QTextCursor.Left, n=offset)
self._complete_with_items(cursor, matches)
def _handle_execute_reply(self, msg):
"""Support prompt requests.
"""
msg_id = msg['parent_header'].get('msg_id')
info = self._request_info['execute'].get(msg_id)
if info and info.kind == 'prompt':
content = msg['content']
if content['status'] == 'aborted':
self._show_interpreter_prompt()
else:
number = content['execution_count'] + 1
self._show_interpreter_prompt(number)
self._request_info['execute'].pop(msg_id)
else:
super(JupyterWidget, self)._handle_execute_reply(msg)
def _handle_history_reply(self, msg):
""" Handle history tail replies, which are only supported
by Jupyter kernels.
"""
content = msg['content']
if 'history' not in content:
self.log.error("History request failed: %r"%content)
if content.get('status', '') == 'aborted' and \
not self._retrying_history_request:
# a *different* action caused this request to be aborted, so
# we should try again.
self.log.error("Retrying aborted history request")
# prevent multiple retries of aborted requests:
self._retrying_history_request = True
# wait out the kernel's queue flush, which is currently timed at 0.1s
time.sleep(0.25)
self.kernel_client.history(hist_access_type='tail',n=1000)
else:
self._retrying_history_request = False
return
# reset retry flag
self._retrying_history_request = False
history_items = content['history']
self.log.debug("Received history reply with %i entries", len(history_items))
items = []
last_cell = u""
for _, _, cell in history_items:
cell = cell.rstrip()
if cell != last_cell:
items.append(cell)
last_cell = cell
self._set_history(items)
def _insert_other_input(self, cursor, content):
"""Insert function for input from other frontends"""
cursor.beginEditBlock()
start = cursor.position()
n = content.get('execution_count', 0)
cursor.insertText('\n')
self._insert_html(cursor, self._make_in_prompt(n))
cursor.insertText(content['code'])
self._highlighter.rehighlightBlock(cursor.block())
cursor.endEditBlock()
def _handle_execute_input(self, msg):
"""Handle an execute_input message"""
self.log.debug("execute_input: %s", msg.get('content', ''))
if self.include_output(msg):
self._append_custom(self._insert_other_input, msg['content'], before_prompt=True)
def _handle_execute_result(self, msg):
"""Handle an execute_result message"""
if self.include_output(msg):
self.flush_clearoutput()
content = msg['content']
prompt_number = content.get('execution_count', 0)
data = content['data']
if 'text/plain' in data:
self._append_plain_text(self.output_sep, True)
self._append_html(self._make_out_prompt(prompt_number), True)
text = data['text/plain']
# If the repr is multiline, make sure we start on a new line,
# so that its lines are aligned.
if "\n" in text and not self.output_sep.endswith("\n"):
self._append_plain_text('\n', True)
self._append_plain_text(text + self.output_sep2, True)
def _handle_display_data(self, msg):
"""The base handler for the ``display_data`` message."""
# For now, we don't display data from other frontends, but we
# eventually will as this allows all frontends to monitor the display
# data. But we need to figure out how to handle this in the GUI.
if self.include_output(msg):
self.flush_clearoutput()
data = msg['content']['data']
metadata = msg['content']['metadata']
# In the regular JupyterWidget, we simply print the plain text
# representation.
if 'text/plain' in data:
text = data['text/plain']
self._append_plain_text(text, True)
# This newline seems to be needed for text and html output.
self._append_plain_text(u'\n', True)
def _handle_kernel_info_reply(self, rep):
"""Handle kernel info replies."""
content = rep['content']
self.kernel_banner = content.get('banner', '')
if self._starting:
# finish handling started channels
self._starting = False
super(JupyterWidget, self)._started_channels()
def _started_channels(self):
"""Make a history request"""
self._starting = True
self.kernel_client.kernel_info()
self.kernel_client.history(hist_access_type='tail', n=1000)
#---------------------------------------------------------------------------
# 'FrontendWidget' protected interface
#---------------------------------------------------------------------------
def _process_execute_error(self, msg):
"""Handle an execute_error message"""
content = msg['content']
traceback = '\n'.join(content['traceback']) + '\n'
if False:
# FIXME: For now, tracebacks come as plain text, so we can't use
# the html renderer yet. Once we refactor ultratb to produce
# properly styled tracebacks, this branch should be the default
traceback = traceback.replace(' ', ' ')
traceback = traceback.replace('\n', '<br/>')
ename = content['ename']
ename_styled = '<span class="error">%s</span>' % ename
traceback = traceback.replace(ename, ename_styled)
self._append_html(traceback)
else:
# This is the fallback for now, using plain text with ansi escapes
self._append_plain_text(traceback)
def _process_execute_payload(self, item):
""" Reimplemented to dispatch payloads to handler methods.
"""
handler = self._payload_handlers.get(item['source'])
if handler is None:
# We have no handler for this type of payload, simply ignore it
return False
else:
handler(item)
return True
def _show_interpreter_prompt(self, number=None):
""" Reimplemented for IPython-style prompts.
"""
# If a number was not specified, make a prompt number request.
if number is None:
msg_id = self.kernel_client.execute('', silent=True)
info = self._ExecutionRequest(msg_id, 'prompt')
self._request_info['execute'][msg_id] = info
return
# Show a new prompt and save information about it so that it can be
# updated later if the prompt number turns out to be wrong.
self._prompt_sep = self.input_sep
self._show_prompt(self._make_in_prompt(number), html=True)
block = self._control.document().lastBlock()
length = len(self._prompt)
self._previous_prompt_obj = self._PromptBlock(block, length, number)
# Update continuation prompt to reflect (possibly) new prompt length.
self._set_continuation_prompt(
self._make_continuation_prompt(self._prompt), html=True)
def _show_interpreter_prompt_for_reply(self, msg):
""" Reimplemented for IPython-style prompts.
"""
# Update the old prompt number if necessary.
content = msg['content']
# abort replies do not have any keys:
if content['status'] == 'aborted':
if self._previous_prompt_obj:
previous_prompt_number = self._previous_prompt_obj.number
else:
previous_prompt_number = 0
else:
previous_prompt_number = content['execution_count']
if self._previous_prompt_obj and \
self._previous_prompt_obj.number != previous_prompt_number:
block = self._previous_prompt_obj.block
# Make sure the prompt block has not been erased.
if block.isValid() and block.text():
# Remove the old prompt and insert a new prompt.
cursor = QtGui.QTextCursor(block)
cursor.movePosition(QtGui.QTextCursor.Right,
QtGui.QTextCursor.KeepAnchor,
self._previous_prompt_obj.length)
prompt = self._make_in_prompt(previous_prompt_number)
self._prompt = self._insert_html_fetching_plain_text(
cursor, prompt)
# When the HTML is inserted, Qt blows away the syntax
# highlighting for the line, so we need to rehighlight it.
self._highlighter.rehighlightBlock(cursor.block())
self._previous_prompt_obj = None
# Show a new prompt with the kernel's estimated prompt number.
self._show_interpreter_prompt(previous_prompt_number + 1)
#---------------------------------------------------------------------------
# 'JupyterWidget' interface
#---------------------------------------------------------------------------
def set_default_style(self, colors='lightbg'):
""" Sets the widget style to the class defaults.
Parameters
----------
colors : str, optional (default lightbg)
Whether to use the default light background or dark
background or B&W style.
"""
colors = colors.lower()
if colors=='lightbg':
self.style_sheet = styles.default_light_style_sheet
self.syntax_style = styles.default_light_syntax_style
elif colors=='linux':
self.style_sheet = styles.default_dark_style_sheet
self.syntax_style = styles.default_dark_syntax_style
elif colors=='nocolor':
self.style_sheet = styles.default_bw_style_sheet
self.syntax_style = styles.default_bw_syntax_style
else:
raise KeyError("No such color scheme: %s"%colors)
#---------------------------------------------------------------------------
# 'JupyterWidget' protected interface
#---------------------------------------------------------------------------
def _edit(self, filename, line=None):
""" Opens a Python script for editing.
Parameters
----------
filename : str
A path to a local system file.
line : int, optional
A line of interest in the file.
"""
if self.custom_edit:
self.custom_edit_requested.emit(filename, line)
elif not self.editor:
self._append_plain_text('No default editor available.\n'
'Specify a GUI text editor in the `JupyterWidget.editor` '
'configurable to enable the %edit magic')
else:
try:
filename = '"%s"' % filename
if line and self.editor_line:
command = self.editor_line.format(filename=filename,
line=line)
else:
try:
command = self.editor.format()
except KeyError:
command = self.editor.format(filename=filename)
else:
command += ' ' + filename
except KeyError:
self._append_plain_text('Invalid editor command.\n')
else:
try:
Popen(command, shell=True)
except OSError:
msg = 'Opening editor with command "%s" failed.\n'
self._append_plain_text(msg % command)
def _make_in_prompt(self, number):
""" Given a prompt number, returns an HTML In prompt.
"""
try:
body = self.in_prompt % number
except TypeError:
# allow in_prompt to leave out number, e.g. '>>> '
from xml.sax.saxutils import escape
body = escape(self.in_prompt)
return '<span class="in-prompt">%s</span>' % body
def _make_continuation_prompt(self, prompt):
""" Given a plain text version of an In prompt, returns an HTML
continuation prompt.
"""
end_chars = '...: '
space_count = len(prompt.lstrip('\n')) - len(end_chars)
body = ' ' * space_count + end_chars
return '<span class="in-prompt">%s</span>' % body
def _make_out_prompt(self, number):
""" Given a prompt number, returns an HTML Out prompt.
"""
try:
body = self.out_prompt % number
except TypeError:
# allow out_prompt to leave out number, e.g. '<<< '
from xml.sax.saxutils import escape
body = escape(self.out_prompt)
return '<span class="out-prompt">%s</span>' % body
#------ Payload handlers --------------------------------------------------
# Payload handlers with a generic interface: each takes the opaque payload
# dict, unpacks it and calls the underlying functions with the necessary
# arguments.
def _handle_payload_edit(self, item):
self._edit(item['filename'], item['line_number'])
def _handle_payload_exit(self, item):
self._keep_kernel_on_exit = item['keepkernel']
self.exit_requested.emit(self)
def _handle_payload_next_input(self, item):
self.input_buffer = item['text']
def _handle_payload_page(self, item):
# Since the plain text widget supports only a very small subset of HTML
# and we have no control over the HTML source, we only page HTML
# payloads in the rich text widget.
data = item['data']
if 'text/html' in data and self.kind == 'rich':
self._page(data['text/html'], html=True)
else:
self._page(data['text/plain'], html=False)
#------ Trait change handlers --------------------------------------------
def _style_sheet_changed(self):
""" Set the style sheets of the underlying widgets.
"""
self.setStyleSheet(self.style_sheet)
if self._control is not None:
self._control.document().setDefaultStyleSheet(self.style_sheet)
bg_color = self._control.palette().window().color()
self._ansi_processor.set_background_color(bg_color)
if self._page_control is not None:
self._page_control.document().setDefaultStyleSheet(self.style_sheet)
def _syntax_style_changed(self):
""" Set the style for the syntax highlighter.
"""
if self._highlighter is None:
# ignore premature calls
return
if self.syntax_style:
self._highlighter.set_style(self.syntax_style)
else:
self._highlighter.set_style_sheet(self.style_sheet)
#------ Trait default initializers -----------------------------------------
def _banner_default(self):
return "Jupyter QtConsole {version}\n".format(version=__version__)
# clobber IPythonWidget above:
class IPythonWidget(JupyterWidget):
"""Deprecated class. Use JupyterWidget"""
def __init__(self, *a, **kw):
warn("IPythonWidget is deprecated, use JupyterWidget")
super(IPythonWidget, self).__init__(*a, **kw)
| [
"[email protected]"
] | |
e591fa3628ca4a4f93b25edfbf537c50c7d91cc0 | ea17f5e7e3cfe51198bb014db152d22df827a998 | /variable_delay/third_party/mininet/node.py | 28c7db052841156d69f1f76e06cedcae849015ad | [
"LicenseRef-scancode-x11-stanford"
] | permissive | JerryLX/CoCo-beholder | e3bb15954c66e87fd209820cddea1adadc89af99 | 0c6698fcbf3134ae167e8f10a7b631b34957b726 | refs/heads/main | 2023-04-19T08:13:48.205838 | 2021-04-18T13:38:09 | 2021-04-18T13:38:09 | 357,019,424 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 61,673 | py | """
Node objects for Mininet.
Nodes provide a simple abstraction for interacting with hosts, switches
and controllers. Local nodes are simply one or more processes on the local
machine.
Node: superclass for all (primarily local) network nodes.
Host: a virtual host. By default, a host is simply a shell; commands
may be sent using Cmd (which waits for output), or using sendCmd(),
which returns immediately, allowing subsequent monitoring using
monitor(). Examples of how to run experiments using this
functionality are provided in the examples/ directory. By default,
hosts share the root file system, but they may also specify private
directories.
CPULimitedHost: a virtual host whose CPU bandwidth is limited by
RT or CFS bandwidth limiting.
Switch: superclass for switch nodes.
UserSwitch: a switch using the user-space switch from the OpenFlow
reference implementation.
OVSSwitch: a switch using the Open vSwitch OpenFlow-compatible switch
implementation (openvswitch.org).
OVSBridge: an Ethernet bridge implemented using Open vSwitch.
Supports STP.
IVSSwitch: OpenFlow switch using the Indigo Virtual Switch.
Controller: superclass for OpenFlow controllers. The default controller
is controller(8) from the reference implementation.
OVSController: The test controller from Open vSwitch.
NOXController: a controller node using NOX (noxrepo.org).
Ryu: The Ryu controller (https://osrg.github.io/ryu/)
RemoteController: a remote controller node, which may use any
arbitrary OpenFlow-compatible controller, and which is not
created or managed by Mininet.
Future enhancements:
- Possibly make Node, Switch and Controller more abstract so that
they can be used for both local and remote nodes
- Create proxy objects for remote nodes (Mininet: Cluster Edition)
"""
import os
import pty
import re
import signal
import select
from subprocess import Popen, PIPE
from time import sleep
from variable_delay.third_party.mininet.log import info, error, warn, debug
from variable_delay.third_party.mininet.util import ( quietRun, errRun, errFail, moveIntf, isShellBuiltin,
numCores, retry, mountCgroups, BaseString, decode,
encode, Python3, which )
from variable_delay.third_party.mininet.moduledeps import moduleDeps, pathCheck, TUN
from variable_delay.third_party.mininet.link import Link, Intf, TCIntf, OVSIntf
from re import findall
from distutils.version import StrictVersion
class Node( object ):
"""A virtual network node is simply a shell in a network namespace.
We communicate with it using pipes."""
portBase = 0 # Nodes always start with eth0/port0, even in OF 1.0
def __init__( self, name, inNamespace=True, **params ):
"""name: name of node
inNamespace: in network namespace?
privateDirs: list of private directory strings or tuples
params: Node parameters (see config() for details)"""
# Make sure class actually works
self.checkSetup()
self.name = params.get( 'name', name )
self.privateDirs = params.get( 'privateDirs', [] )
self.inNamespace = params.get( 'inNamespace', inNamespace )
# Python 3 complains if we don't wait for shell exit
self.waitExited = params.get( 'waitExited', Python3 )
# Stash configuration parameters for future reference
self.params = params
self.intfs = {} # dict of port numbers to interfaces
self.ports = {} # dict of interfaces to port numbers
# replace with Port objects, eventually ?
self.nameToIntf = {} # dict of interface names to Intfs
# Make pylint happy
( self.shell, self.execed, self.pid, self.stdin, self.stdout,
self.lastPid, self.lastCmd, self.pollOut ) = (
None, None, None, None, None, None, None, None )
self.waiting = False
self.readbuf = ''
# Start command interpreter shell
self.master, self.slave = None, None # pylint
self.startShell()
self.mountPrivateDirs()
# File descriptor to node mapping support
# Class variables and methods
inToNode = {} # mapping of input fds to nodes
outToNode = {} # mapping of output fds to nodes
@classmethod
def fdToNode( cls, fd ):
"""Return node corresponding to given file descriptor.
fd: file descriptor
returns: node"""
node = cls.outToNode.get( fd )
return node or cls.inToNode.get( fd )
# Command support via shell process in namespace
def startShell( self, mnopts=None ):
"Start a shell process for running commands"
if self.shell:
error( "%s: shell is already running\n" % self.name )
return
# vdlocalmnexec: (c)lose descriptors, (d)etach from tty,
# (p)rint pid, and run in (n)amespace
opts = '-cd' if mnopts is None else mnopts
if self.inNamespace:
opts += 'n'
# bash -i: force interactive
# -s: pass $* to shell, and make process easy to find in ps
# prompt is set to sentinel chr( 127 )
cmd = [ 'vdlocalmnexec', opts, 'env', 'PS1=' + chr( 127 ),
'bash', '--norc', '--noediting',
'-is', 'mininet:' + self.name ]
# Spawn a shell subprocess in a pseudo-tty, to disable buffering
# in the subprocess and insulate it from signals (e.g. SIGINT)
# received by the parent
self.master, self.slave = pty.openpty()
self.shell = self._popen( cmd, stdin=self.slave, stdout=self.slave,
stderr=self.slave, close_fds=False )
# XXX BL: This doesn't seem right, and we should also probably
# close our files when we exit...
self.stdin = os.fdopen( self.master, 'r' )
self.stdout = self.stdin
self.pid = self.shell.pid
self.pollOut = select.poll()
self.pollOut.register( self.stdout )
# Maintain mapping between file descriptors and nodes
# This is useful for monitoring multiple nodes
# using select.poll()
self.outToNode[ self.stdout.fileno() ] = self
self.inToNode[ self.stdin.fileno() ] = self
self.execed = False
self.lastCmd = None
self.lastPid = None
self.readbuf = ''
# Wait for prompt
while True:
data = self.read( 1024 )
if data[ -1 ] == chr( 127 ):
break
self.pollOut.poll()
self.waiting = False
# +m: disable job control notification
self.cmd( 'unset HISTFILE; stty -echo; set +m' )
def mountPrivateDirs( self ):
"mount private directories"
# Avoid expanding a string into a list of chars
assert not isinstance( self.privateDirs, BaseString )
for directory in self.privateDirs:
if isinstance( directory, tuple ):
# mount given private directory
privateDir = directory[ 1 ] % self.__dict__
mountPoint = directory[ 0 ]
self.cmd( 'mkdir -p %s' % privateDir )
self.cmd( 'mkdir -p %s' % mountPoint )
self.cmd( 'mount --bind %s %s' %
( privateDir, mountPoint ) )
else:
# mount temporary filesystem on directory
self.cmd( 'mkdir -p %s' % directory )
self.cmd( 'mount -n -t tmpfs tmpfs %s' % directory )
def unmountPrivateDirs( self ):
"mount private directories"
for directory in self.privateDirs:
if isinstance( directory, tuple ):
self.cmd( 'umount ', directory[ 0 ] )
else:
self.cmd( 'umount ', directory )
def _popen( self, cmd, **params ):
"""Internal method: spawn and return a process
cmd: command to run (list)
params: parameters to Popen()"""
# Leave this is as an instance method for now
assert self
popen = Popen( cmd, **params )
debug( '_popen', cmd, popen.pid )
return popen
def cleanup( self ):
"Help python collect its garbage."
# We used to do this, but it slows us down:
# Intfs may end up in root NS
# for intfName in self.intfNames():
# if self.name in intfName:
# quietRun( 'ip link del ' + intfName )
if self.shell:
# Close ptys
self.stdin.close()
os.close(self.slave)
if self.waitExited:
debug( 'waiting for', self.pid, 'to terminate\n' )
self.shell.wait()
self.shell = None
# Subshell I/O, commands and control
def read( self, maxbytes=1024 ):
"""Buffered read from node, potentially blocking.
maxbytes: maximum number of bytes to return"""
count = len( self.readbuf )
if count < maxbytes:
data = decode( os.read( self.stdout.fileno(), maxbytes - count ) )
self.readbuf += data
if maxbytes >= len( self.readbuf ):
result = self.readbuf
self.readbuf = ''
else:
result = self.readbuf[ :maxbytes ]
self.readbuf = self.readbuf[ maxbytes: ]
return result
def readline( self ):
"""Buffered readline from node, potentially blocking.
returns: line (minus newline) or None"""
self.readbuf += self.read( 1024 )
if '\n' not in self.readbuf:
return None
pos = self.readbuf.find( '\n' )
line = self.readbuf[ 0: pos ]
self.readbuf = self.readbuf[ pos + 1: ]
return line
def write( self, data ):
"""Write data to node.
data: string"""
os.write( self.stdin.fileno(), encode( data ) )
def terminate( self ):
"Send kill signal to Node and clean up after it."
self.unmountPrivateDirs()
if self.shell:
if self.shell.poll() is None:
os.killpg( self.shell.pid, signal.SIGHUP )
self.cleanup()
def stop( self, deleteIntfs=False ):
"""Stop node.
deleteIntfs: delete interfaces? (False)"""
if deleteIntfs:
self.deleteIntfs()
self.terminate()
def waitReadable( self, timeoutms=None ):
"""Wait until node's output is readable.
timeoutms: timeout in ms or None to wait indefinitely.
returns: result of poll()"""
if len( self.readbuf ) == 0:
return self.pollOut.poll( timeoutms )
def sendCmd( self, *args, **kwargs ):
"""Send a command, followed by a command to echo a sentinel,
and return without waiting for the command to complete.
args: command and arguments, or string
printPid: print command's PID? (False)"""
assert self.shell and not self.waiting
printPid = kwargs.get( 'printPid', False )
# Allow sendCmd( [ list ] )
if len( args ) == 1 and isinstance( args[ 0 ], list ):
cmd = args[ 0 ]
# Allow sendCmd( cmd, arg1, arg2... )
elif len( args ) > 0:
cmd = args
# Convert to string
if not isinstance( cmd, str ):
cmd = ' '.join( [ str( c ) for c in cmd ] )
if not re.search( r'\w', cmd ):
# Replace empty commands with something harmless
cmd = 'echo -n'
self.lastCmd = cmd
# if a builtin command is backgrounded, it still yields a PID
if len( cmd ) > 0 and cmd[ -1 ] == '&':
# print ^A{pid}\n so monitor() can set lastPid
cmd += ' printf "\\001%d\\012" $! '
elif printPid and not isShellBuiltin( cmd ):
cmd = 'vdlocalmnexec -p ' + cmd
self.write( cmd + '\n' )
self.lastPid = None
self.waiting = True
def sendInt( self, intr=chr( 3 ) ):
"Interrupt running command."
debug( 'sendInt: writing chr(%d)\n' % ord( intr ) )
self.write( intr )
def monitor( self, timeoutms=None, findPid=True ):
"""Monitor and return the output of a command.
Set self.waiting to False if command has completed.
timeoutms: timeout in ms or None to wait indefinitely
findPid: look for PID from vdlocalmnexec -p"""
ready = self.waitReadable( timeoutms )
if not ready:
return ''
data = self.read( 1024 )
pidre = r'\[\d+\] \d+\r\n'
# Look for PID
marker = chr( 1 ) + r'\d+\r\n'
if findPid and chr( 1 ) in data:
# suppress the job and PID of a backgrounded command
if re.findall( pidre, data ):
data = re.sub( pidre, '', data )
# Marker can be read in chunks; continue until all of it is read
while not re.findall( marker, data ):
data += self.read( 1024 )
markers = re.findall( marker, data )
if markers:
self.lastPid = int( markers[ 0 ][ 1: ] )
data = re.sub( marker, '', data )
# Look for sentinel/EOF
if len( data ) > 0 and data[ -1 ] == chr( 127 ):
self.waiting = False
data = data[ :-1 ]
elif chr( 127 ) in data:
self.waiting = False
data = data.replace( chr( 127 ), '' )
return data
def waitOutput( self, verbose=False, findPid=True ):
"""Wait for a command to complete.
Completion is signaled by a sentinel character, ASCII(127)
appearing in the output stream. Wait for the sentinel and return
the output, including trailing newline.
verbose: print output interactively"""
log = info if verbose else debug
output = ''
while self.waiting:
data = self.monitor( findPid=findPid )
output += data
log( data )
return output
def cmd( self, *args, **kwargs ):
"""Send a command, wait for output, and return it.
cmd: string"""
verbose = kwargs.get( 'verbose', False )
log = info if verbose else debug
log( '*** %s : %s\n' % ( self.name, args ) )
if self.shell:
self.sendCmd( *args, **kwargs )
return self.waitOutput( verbose )
else:
warn( '(%s exited - ignoring cmd%s)\n' % ( self, args ) )
def cmdPrint( self, *args):
"""Call cmd and printing its output
cmd: string"""
return self.cmd( *args, **{ 'verbose': True } )
def popen( self, *args, **kwargs ):
"""Return a Popen() object in our namespace
args: Popen() args, single list, or string
kwargs: Popen() keyword args"""
defaults = { 'stdout': PIPE, 'stderr': PIPE,
'mncmd':
[ 'vdlocalmnexec', '-da', str( self.pid ) ] }
defaults.update( kwargs )
shell = defaults.pop( 'shell', False )
if len( args ) == 1:
if isinstance( args[ 0 ], list ):
# popen([cmd, arg1, arg2...])
cmd = args[ 0 ]
elif isinstance( args[ 0 ], BaseString ):
# popen("cmd arg1 arg2...")
cmd = [ args[ 0 ] ] if shell else args[ 0 ].split()
else:
raise Exception( 'popen() requires a string or list' )
elif len( args ) > 0:
# popen( cmd, arg1, arg2... )
cmd = list( args )
if shell:
cmd = [ os.environ[ 'SHELL' ], '-c' ] + [ ' '.join( cmd ) ]
# Attach to our namespace using vdlocalmnexec -a
cmd = defaults.pop( 'mncmd' ) + cmd
popen = self._popen( cmd, **defaults )
return popen
def pexec( self, *args, **kwargs ):
"""Execute a command using popen
returns: out, err, exitcode"""
popen = self.popen( *args, stdin=PIPE, stdout=PIPE, stderr=PIPE,
**kwargs )
# Warning: this can fail with large numbers of fds!
out, err = popen.communicate()
exitcode = popen.wait()
return decode( out ), decode( err ), exitcode
# Interface management, configuration, and routing
# BL notes: This might be a bit redundant or over-complicated.
# However, it does allow a bit of specialization, including
# changing the canonical interface names. It's also tricky since
# the real interfaces are created as veth pairs, so we can't
# make a single interface at a time.
def newPort( self ):
"Return the next port number to allocate."
if len( self.ports ) > 0:
return max( self.ports.values() ) + 1
return self.portBase
def addIntf( self, intf, port=None, moveIntfFn=moveIntf ):
"""Add an interface.
intf: interface
port: port number (optional, typically OpenFlow port number)
moveIntfFn: function to move interface (optional)"""
if port is None:
port = self.newPort()
self.intfs[ port ] = intf
self.ports[ intf ] = port
self.nameToIntf[ intf.name ] = intf
debug( '\n' )
debug( 'added intf %s (%d) to node %s\n' % (
intf, port, self.name ) )
if self.inNamespace:
debug( 'moving', intf, 'into namespace for', self.name, '\n' )
moveIntfFn( intf.name, self )
def delIntf( self, intf ):
"""Remove interface from Node's known interfaces
Note: to fully delete interface, call intf.delete() instead"""
port = self.ports.get( intf )
if port is not None:
del self.intfs[ port ]
del self.ports[ intf ]
del self.nameToIntf[ intf.name ]
def defaultIntf( self ):
"Return interface for lowest port"
ports = self.intfs.keys()
if ports:
return self.intfs[ min( ports ) ]
else:
warn( '*** defaultIntf: warning:', self.name,
'has no interfaces\n' )
def intf( self, intf=None ):
"""Return our interface object with given string name,
default intf if name is falsy (None, empty string, etc).
or the input intf arg.
Having this fcn return its arg for Intf objects makes it
easier to construct functions with flexible input args for
interfaces (those that accept both string names and Intf objects).
"""
if not intf:
return self.defaultIntf()
elif isinstance( intf, BaseString):
return self.nameToIntf[ intf ]
else:
return intf
def connectionsTo( self, node):
"Return [ intf1, intf2... ] for all intfs that connect self to node."
# We could optimize this if it is important
connections = []
for intf in self.intfList():
link = intf.link
if link:
node1, node2 = link.intf1.node, link.intf2.node
if node1 == self and node2 == node:
connections += [ ( intf, link.intf2 ) ]
elif node1 == node and node2 == self:
connections += [ ( intf, link.intf1 ) ]
return connections
def deleteIntfs( self, checkName=True ):
"""Delete all of our interfaces.
checkName: only delete interfaces that contain our name"""
# In theory the interfaces should go away after we shut down.
# However, this takes time, so we're better off removing them
# explicitly so that we won't get errors if we run before they
# have been removed by the kernel. Unfortunately this is very slow,
# at least with Linux kernels before 2.6.33
for intf in list( self.intfs.values() ):
# Protect against deleting hardware interfaces
if ( self.name in intf.name ) or ( not checkName ):
intf.delete()
info( '.' )
# Routing support
def setARP( self, ip, mac ):
"""Add an ARP entry.
ip: IP address as string
mac: MAC address as string"""
result = self.cmd( 'arp', '-s', ip, mac )
return result
def setHostRoute( self, ip, intf ):
"""Add route to host.
ip: IP address as dotted decimal
intf: string, interface name"""
return self.cmd( 'route add -host', ip, 'dev', intf )
def setDefaultRoute( self, intf=None ):
"""Set the default route to go through intf.
intf: Intf or {dev <intfname> via <gw-ip> ...}"""
# Note setParam won't call us if intf is none
if isinstance( intf, BaseString ) and ' ' in intf:
params = intf
else:
params = 'dev %s' % intf
# Do this in one line in case we're messing with the root namespace
self.cmd( 'ip route del default; ip route add default', params )
# Convenience and configuration methods
def setMAC( self, mac, intf=None ):
"""Set the MAC address for an interface.
intf: intf or intf name
mac: MAC address as string"""
return self.intf( intf ).setMAC( mac )
def setIP( self, ip, prefixLen=8, intf=None, **kwargs ):
"""Set the IP address for an interface.
intf: intf or intf name
ip: IP address as a string
prefixLen: prefix length, e.g. 8 for /8 or 16M addrs
kwargs: any additional arguments for intf.setIP"""
return self.intf( intf ).setIP( ip, prefixLen, **kwargs )
def IP( self, intf=None ):
"Return IP address of a node or specific interface."
return self.intf( intf ).IP()
def MAC( self, intf=None ):
"Return MAC address of a node or specific interface."
return self.intf( intf ).MAC()
def intfIsUp( self, intf=None ):
"Check if an interface is up."
return self.intf( intf ).isUp()
# The reason why we configure things in this way is so
# That the parameters can be listed and documented in
# the config method.
# Dealing with subclasses and superclasses is slightly
# annoying, but at least the information is there!
def setParam( self, results, method, **param ):
"""Internal method: configure a *single* parameter
results: dict of results to update
method: config method name
param: arg=value (ignore if value=None)
value may also be list or dict"""
name, value = list( param.items() )[ 0 ]
if value is None:
return
f = getattr( self, method, None )
if not f:
return
if isinstance( value, list ):
result = f( *value )
elif isinstance( value, dict ):
result = f( **value )
else:
result = f( value )
results[ name ] = result
return result
def config( self, mac=None, ip=None,
defaultRoute=None, lo='up', **_params ):
"""Configure Node according to (optional) parameters:
mac: MAC address for default interface
ip: IP address for default interface
ifconfig: arbitrary interface configuration
Subclasses should override this method and call
the parent class's config(**params)"""
# If we were overriding this method, we would call
# the superclass config method here as follows:
# r = Parent.config( **_params )
r = {}
self.setParam( r, 'setMAC', mac=mac )
self.setParam( r, 'setIP', ip=ip )
self.setParam( r, 'setDefaultRoute', defaultRoute=defaultRoute )
# This should be examined
self.cmd( 'ifconfig lo ' + lo )
return r
def configDefault( self, **moreParams ):
"Configure with default parameters"
self.params.update( moreParams )
self.config( **self.params )
# This is here for backward compatibility
def linkTo( self, node, link=Link ):
"""(Deprecated) Link to another node
replace with Link( node1, node2)"""
return link( self, node )
# Other methods
def intfList( self ):
"List of our interfaces sorted by port number"
return [ self.intfs[ p ] for p in sorted( self.intfs.keys() ) ]
def intfNames( self ):
"The names of our interfaces sorted by port number"
return [ str( i ) for i in self.intfList() ]
def __repr__( self ):
"More informative string representation"
intfs = ( ','.join( [ '%s:%s' % ( i.name, i.IP() )
for i in self.intfList() ] ) )
return '<%s %s: %s pid=%s> ' % (
self.__class__.__name__, self.name, intfs, self.pid )
def __str__( self ):
"Abbreviated string representation"
return self.name
# Automatic class setup support
isSetup = False
@classmethod
def checkSetup( cls ):
"Make sure our class and superclasses are set up"
while cls and not getattr( cls, 'isSetup', True ):
cls.setup()
cls.isSetup = True
# Make pylint happy
cls = getattr( type( cls ), '__base__', None )
@classmethod
def setup( cls ):
"Make sure our class dependencies are available"
pathCheck( 'vdlocalmnexec', 'ifconfig', moduleName='Mininet')
class Host( Node ):
"A host is simply a Node"
pass
class CPULimitedHost( Host ):
"CPU limited host"
def __init__( self, name, sched='cfs', **kwargs ):
Host.__init__( self, name, **kwargs )
# Initialize class if necessary
if not CPULimitedHost.inited:
CPULimitedHost.init()
# Create a cgroup and move shell into it
self.cgroup = 'cpu,cpuacct,cpuset:/' + self.name
errFail( 'cgcreate -g ' + self.cgroup )
# We don't add ourselves to a cpuset because you must
# specify the cpu and memory placement first
errFail( 'cgclassify -g cpu,cpuacct:/%s %s' % ( self.name, self.pid ) )
# BL: Setting the correct period/quota is tricky, particularly
# for RT. RT allows very small quotas, but the overhead
# seems to be high. CFS has a mininimum quota of 1 ms, but
# still does better with larger period values.
self.period_us = kwargs.get( 'period_us', 100000 )
self.sched = sched
if sched == 'rt':
self.checkRtGroupSched()
self.rtprio = 20
def cgroupSet( self, param, value, resource='cpu' ):
"Set a cgroup parameter and return its value"
cmd = 'cgset -r %s.%s=%s /%s' % (
resource, param, value, self.name )
quietRun( cmd )
nvalue = int( self.cgroupGet( param, resource ) )
if nvalue != value:
error( '*** error: cgroupSet: %s set to %s instead of %s\n'
% ( param, nvalue, value ) )
return nvalue
def cgroupGet( self, param, resource='cpu' ):
"Return value of cgroup parameter"
cmd = 'cgget -r %s.%s /%s' % (
resource, param, self.name )
return int( quietRun( cmd ).split()[ -1 ] )
def cgroupDel( self ):
"Clean up our cgroup"
# info( '*** deleting cgroup', self.cgroup, '\n' )
_out, _err, exitcode = errRun( 'cgdelete -r ' + self.cgroup )
# Sometimes cgdelete returns a resource busy error but still
# deletes the group; next attempt will give "no such file"
return exitcode == 0 or ( 'no such file' in _err.lower() )
def popen( self, *args, **kwargs ):
"""Return a Popen() object in node's namespace
args: Popen() args, single list, or string
kwargs: Popen() keyword args"""
# Tell vdlocalmnexec to execute command in our cgroup
mncmd = kwargs.pop( 'mncmd', [ 'vdlocalmnexec', '-g', self.name,
'-da', str( self.pid ) ] )
# if our cgroup is not given any cpu time,
# we cannot assign the RR Scheduler.
if self.sched == 'rt':
if int( self.cgroupGet( 'rt_runtime_us', 'cpu' ) ) <= 0:
mncmd += [ '-r', str( self.rtprio ) ]
else:
debug( '*** error: not enough cpu time available for %s.' %
self.name, 'Using cfs scheduler for subprocess\n' )
return Host.popen( self, *args, mncmd=mncmd, **kwargs )
def cleanup( self ):
"Clean up Node, then clean up our cgroup"
super( CPULimitedHost, self ).cleanup()
retry( retries=3, delaySecs=.1, fn=self.cgroupDel )
_rtGroupSched = False # internal class var: Is CONFIG_RT_GROUP_SCHED set?
@classmethod
def checkRtGroupSched( cls ):
"Check (Ubuntu,Debian) kernel config for CONFIG_RT_GROUP_SCHED for RT"
if not cls._rtGroupSched:
release = quietRun( 'uname -r' ).strip('\r\n')
output = quietRun( 'grep CONFIG_RT_GROUP_SCHED /boot/config-%s' %
release )
if output == '# CONFIG_RT_GROUP_SCHED is not set\n':
error( '\n*** error: please enable RT_GROUP_SCHED '
'in your kernel\n' )
exit( 1 )
cls._rtGroupSched = True
def chrt( self ):
"Set RT scheduling priority"
quietRun( 'chrt -p %s %s' % ( self.rtprio, self.pid ) )
result = quietRun( 'chrt -p %s' % self.pid )
firstline = result.split( '\n' )[ 0 ]
lastword = firstline.split( ' ' )[ -1 ]
if lastword != 'SCHED_RR':
error( '*** error: could not assign SCHED_RR to %s\n' % self.name )
return lastword
def rtInfo( self, f ):
"Internal method: return parameters for RT bandwidth"
pstr, qstr = 'rt_period_us', 'rt_runtime_us'
# RT uses wall clock time for period and quota
quota = int( self.period_us * f )
return pstr, qstr, self.period_us, quota
def cfsInfo( self, f ):
"Internal method: return parameters for CFS bandwidth"
pstr, qstr = 'cfs_period_us', 'cfs_quota_us'
# CFS uses wall clock time for period and CPU time for quota.
quota = int( self.period_us * f * numCores() )
period = self.period_us
if f > 0 and quota < 1000:
debug( '(cfsInfo: increasing default period) ' )
quota = 1000
period = int( quota / f / numCores() )
# Reset to unlimited on negative quota
if quota < 0:
quota = -1
return pstr, qstr, period, quota
# BL comment:
# This may not be the right API,
# since it doesn't specify CPU bandwidth in "absolute"
# units the way link bandwidth is specified.
# We should use MIPS or SPECINT or something instead.
# Alternatively, we should change from system fraction
# to CPU seconds per second, essentially assuming that
# all CPUs are the same.
def setCPUFrac( self, f, sched=None ):
"""Set overall CPU fraction for this host
f: CPU bandwidth limit (positive fraction, or -1 for cfs unlimited)
sched: 'rt' or 'cfs'
Note 'cfs' requires CONFIG_CFS_BANDWIDTH,
and 'rt' requires CONFIG_RT_GROUP_SCHED"""
if not sched:
sched = self.sched
if sched == 'rt':
if not f or f < 0:
raise Exception( 'Please set a positive CPU fraction'
' for sched=rt\n' )
pstr, qstr, period, quota = self.rtInfo( f )
elif sched == 'cfs':
pstr, qstr, period, quota = self.cfsInfo( f )
else:
return
# Set cgroup's period and quota
setPeriod = self.cgroupSet( pstr, period )
setQuota = self.cgroupSet( qstr, quota )
if sched == 'rt':
# Set RT priority if necessary
sched = self.chrt()
info( '(%s %d/%dus) ' % ( sched, setQuota, setPeriod ) )
def setCPUs( self, cores, mems=0 ):
"Specify (real) cores that our cgroup can run on"
if not cores:
return
if isinstance( cores, list ):
cores = ','.join( [ str( c ) for c in cores ] )
self.cgroupSet( resource='cpuset', param='cpus',
value=cores )
# Memory placement is probably not relevant, but we
# must specify it anyway
self.cgroupSet( resource='cpuset', param='mems',
value=mems)
# We have to do this here after we've specified
# cpus and mems
errFail( 'cgclassify -g cpuset:/%s %s' % (
self.name, self.pid ) )
def config( self, cpu=-1, cores=None, **params ):
"""cpu: desired overall system CPU fraction
cores: (real) core(s) this host can run on
params: parameters for Node.config()"""
r = Node.config( self, **params )
# Was considering cpu={'cpu': cpu , 'sched': sched}, but
# that seems redundant
self.setParam( r, 'setCPUFrac', cpu=cpu )
self.setParam( r, 'setCPUs', cores=cores )
return r
inited = False
@classmethod
def init( cls ):
"Initialization for CPULimitedHost class"
mountCgroups()
cls.inited = True
# Some important things to note:
#
# The "IP" address which setIP() assigns to the switch is not
# an "IP address for the switch" in the sense of IP routing.
# Rather, it is the IP address for the control interface,
# on the control network, and it is only relevant to the
# controller. If you are running in the root namespace
# (which is the only way to run OVS at the moment), the
# control interface is the loopback interface, and you
# normally never want to change its IP address!
#
# In general, you NEVER want to attempt to use Linux's
# network stack (i.e. ifconfig) to "assign" an IP address or
# MAC address to a switch data port. Instead, you "assign"
# the IP and MAC addresses in the controller by specifying
# packets that you want to receive or send. The "MAC" address
# reported by ifconfig for a switch data port is essentially
# meaningless. It is important to understand this if you
# want to create a functional router using OpenFlow.
class Switch( Node ):
"""A Switch is a Node that is running (or has execed?)
an OpenFlow switch."""
portBase = 1 # Switches start with port 1 in OpenFlow
dpidLen = 16 # digits in dpid passed to switch
def __init__( self, name, dpid=None, opts='', listenPort=None, **params):
"""dpid: dpid hex string (or None to derive from name, e.g. s1 -> 1)
opts: additional switch options
listenPort: port to listen on for dpctl connections"""
Node.__init__( self, name, **params )
self.dpid = self.defaultDpid( dpid )
self.opts = opts
self.listenPort = listenPort
if not self.inNamespace:
self.controlIntf = Intf( 'lo', self, port=0 )
def defaultDpid( self, dpid=None ):
"Return correctly formatted dpid from dpid or switch name (s1 -> 1)"
if dpid:
# Remove any colons and make sure it's a good hex number
dpid = dpid.replace( ':', '' )
assert len( dpid ) <= self.dpidLen and int( dpid, 16 ) >= 0
else:
# Use hex of the first number in the switch name
nums = re.findall( r'\d+', self.name )
if nums:
dpid = hex( int( nums[ 0 ] ) )[ 2: ]
else:
self.terminate() # Python 3.6 crash workaround
raise Exception( 'Unable to derive default datapath ID - '
'please either specify a dpid or use a '
'canonical switch name such as s23.' )
return '0' * ( self.dpidLen - len( dpid ) ) + dpid
def defaultIntf( self ):
"Return control interface"
if self.controlIntf:
return self.controlIntf
else:
return Node.defaultIntf( self )
def sendCmd( self, *cmd, **kwargs ):
"""Send command to Node.
cmd: string"""
kwargs.setdefault( 'printPid', False )
if not self.execed:
return Node.sendCmd( self, *cmd, **kwargs )
else:
error( '*** Error: %s has execed and cannot accept commands' %
self.name )
def connected( self ):
"Is the switch connected to a controller? (override this method)"
# Assume that we are connected by default to whatever we need to
# be connected to. This should be overridden by any OpenFlow
# switch, but not by a standalone bridge.
debug( 'Assuming', repr( self ), 'is connected to a controller\n' )
return True
def stop( self, deleteIntfs=True ):
"""Stop switch
deleteIntfs: delete interfaces? (True)"""
if deleteIntfs:
self.deleteIntfs()
def __repr__( self ):
"More informative string representation"
intfs = ( ','.join( [ '%s:%s' % ( i.name, i.IP() )
for i in self.intfList() ] ) )
return '<%s %s: %s pid=%s> ' % (
self.__class__.__name__, self.name, intfs, self.pid )
class UserSwitch( Switch ):
"User-space switch."
dpidLen = 12
def __init__( self, name, dpopts='--no-slicing', **kwargs ):
"""Init.
name: name for the switch
dpopts: additional arguments to ofdatapath (--no-slicing)"""
Switch.__init__( self, name, **kwargs )
pathCheck( 'ofdatapath', 'ofprotocol',
moduleName='the OpenFlow reference user switch' +
'(openflow.org)' )
if self.listenPort:
self.opts += ' --listen=ptcp:%i ' % self.listenPort
else:
self.opts += ' --listen=punix:/tmp/%s.listen' % self.name
self.dpopts = dpopts
@classmethod
def setup( cls ):
"Ensure any dependencies are loaded; if not, try to load them."
if not os.path.exists( '/dev/net/tun' ):
moduleDeps( add=TUN )
def dpctl( self, *args ):
"Run dpctl command"
listenAddr = None
if not self.listenPort:
listenAddr = 'unix:/tmp/%s.listen' % self.name
else:
listenAddr = 'tcp:127.0.0.1:%i' % self.listenPort
return self.cmd( 'dpctl ' + ' '.join( args ) +
' ' + listenAddr )
def connected( self ):
"Is the switch connected to a controller?"
status = self.dpctl( 'status' )
return ( 'remote.is-connected=true' in status and
'local.is-connected=true' in status )
@staticmethod
def TCReapply( intf ):
"""Unfortunately user switch and Mininet are fighting
over tc queuing disciplines. To resolve the conflict,
we re-create the user switch's configuration, but as a
leaf of the TCIntf-created configuration."""
if isinstance( intf, TCIntf ):
ifspeed = 10000000000 # 10 Gbps
minspeed = ifspeed * 0.001
res = intf.config( **intf.params )
if res is None: # link may not have TC parameters
return
# Re-add qdisc, root, and default classes user switch created, but
# with new parent, as setup by Mininet's TCIntf
parent = res['parent']
intf.tc( "%s qdisc add dev %s " + parent +
" handle 1: htb default 0xfffe" )
intf.tc( "%s class add dev %s classid 1:0xffff parent 1: htb rate "
+ str(ifspeed) )
intf.tc( "%s class add dev %s classid 1:0xfffe parent 1:0xffff " +
"htb rate " + str(minspeed) + " ceil " + str(ifspeed) )
def start( self, controllers ):
"""Start OpenFlow reference user datapath.
Log to /tmp/sN-{ofd,ofp}.log.
controllers: list of controller objects"""
# Add controllers
clist = ','.join( [ 'tcp:%s:%d' % ( c.IP(), c.port )
for c in controllers ] )
ofdlog = '/tmp/' + self.name + '-ofd.log'
ofplog = '/tmp/' + self.name + '-ofp.log'
intfs = [ str( i ) for i in self.intfList() if not i.IP() ]
self.cmd( 'ofdatapath -i ' + ','.join( intfs ) +
' punix:/tmp/' + self.name + ' -d %s ' % self.dpid +
self.dpopts +
' 1> ' + ofdlog + ' 2> ' + ofdlog + ' &' )
self.cmd( 'ofprotocol unix:/tmp/' + self.name +
' ' + clist +
' --fail=closed ' + self.opts +
' 1> ' + ofplog + ' 2>' + ofplog + ' &' )
if "no-slicing" not in self.dpopts:
# Only TCReapply if slicing is enable
sleep(1) # Allow ofdatapath to start before re-arranging qdisc's
for intf in self.intfList():
if not intf.IP():
self.TCReapply( intf )
def stop( self, deleteIntfs=True ):
"""Stop OpenFlow reference user datapath.
deleteIntfs: delete interfaces? (True)"""
self.cmd( 'kill %ofdatapath' )
self.cmd( 'kill %ofprotocol' )
super( UserSwitch, self ).stop( deleteIntfs )
class OVSSwitch( Switch ):
"Open vSwitch switch. Depends on ovs-vsctl."
def __init__( self, name, failMode='secure', datapath='kernel',
inband=False, protocols=None,
reconnectms=1000, stp=False, batch=False, **params ):
"""name: name for switch
failMode: controller loss behavior (secure|standalone)
datapath: userspace or kernel mode (kernel|user)
inband: use in-band control (False)
protocols: use specific OpenFlow version(s) (e.g. OpenFlow13)
Unspecified (or old OVS version) uses OVS default
reconnectms: max reconnect timeout in ms (0/None for default)
stp: enable STP (False, requires failMode=standalone)
batch: enable batch startup (False)"""
Switch.__init__( self, name, **params )
self.failMode = failMode
self.datapath = datapath
self.inband = inband
self.protocols = protocols
self.reconnectms = reconnectms
self.stp = stp
self._uuids = [] # controller UUIDs
self.batch = batch
self.commands = [] # saved commands for batch startup
@classmethod
def setup( cls ):
"Make sure Open vSwitch is installed and working"
pathCheck( 'ovs-vsctl',
moduleName='Open vSwitch (openvswitch.org)')
# This should no longer be needed, and it breaks
# with OVS 1.7 which has renamed the kernel module:
# moduleDeps( subtract=OF_KMOD, add=OVS_KMOD )
out, err, exitcode = errRun( 'ovs-vsctl -t 1 show' )
if exitcode:
error( out + err +
'ovs-vsctl exited with code %d\n' % exitcode +
'*** Error connecting to ovs-db with ovs-vsctl\n'
'Make sure that Open vSwitch is installed, '
'that ovsdb-server is running, and that\n'
'"ovs-vsctl show" works correctly.\n'
'You may wish to try '
'"service openvswitch-switch start".\n' )
exit( 1 )
version = quietRun( 'ovs-vsctl --version' )
cls.OVSVersion = findall( r'\d+\.\d+', version )[ 0 ]
@classmethod
def isOldOVS( cls ):
"Is OVS ersion < 1.10?"
return ( StrictVersion( cls.OVSVersion ) <
StrictVersion( '1.10' ) )
def dpctl( self, *args ):
"Run ovs-ofctl command"
return self.cmd( 'ovs-ofctl', args[ 0 ], self, *args[ 1: ] )
def vsctl( self, *args, **kwargs ):
"Run ovs-vsctl command (or queue for later execution)"
if self.batch:
cmd = ' '.join( str( arg ).strip() for arg in args )
self.commands.append( cmd )
else:
return self.cmd( 'ovs-vsctl', *args, **kwargs )
@staticmethod
def TCReapply( intf ):
"""Unfortunately OVS and Mininet are fighting
over tc queuing disciplines. As a quick hack/
workaround, we clear OVS's and reapply our own."""
if isinstance( intf, TCIntf ):
intf.config( **intf.params )
def attach( self, intf ):
"Connect a data port"
self.vsctl( 'add-port', self, intf )
self.cmd( 'ifconfig', intf, 'up' )
self.TCReapply( intf )
def detach( self, intf ):
"Disconnect a data port"
self.vsctl( 'del-port', self, intf )
def controllerUUIDs( self, update=False ):
"""Return ovsdb UUIDs for our controllers
update: update cached value"""
if not self._uuids or update:
controllers = self.cmd( 'ovs-vsctl -- get Bridge', self,
'Controller' ).strip()
if controllers.startswith( '[' ) and controllers.endswith( ']' ):
controllers = controllers[ 1 : -1 ]
if controllers:
self._uuids = [ c.strip()
for c in controllers.split( ',' ) ]
return self._uuids
def connected( self ):
"Are we connected to at least one of our controllers?"
for uuid in self.controllerUUIDs():
if 'true' in self.vsctl( '-- get Controller',
uuid, 'is_connected' ):
return True
return self.failMode == 'standalone'
def intfOpts( self, intf ):
"Return OVS interface options for intf"
opts = ''
if not self.isOldOVS():
# ofport_request is not supported on old OVS
opts += ' ofport_request=%s' % self.ports[ intf ]
# Patch ports don't work well with old OVS
if isinstance( intf, OVSIntf ):
intf1, intf2 = intf.link.intf1, intf.link.intf2
peer = intf1 if intf1 != intf else intf2
opts += ' type=patch options:peer=%s' % peer
return '' if not opts else ' -- set Interface %s' % intf + opts
def bridgeOpts( self ):
"Return OVS bridge options"
opts = ( ' other_config:datapath-id=%s' % self.dpid +
' fail_mode=%s' % self.failMode )
if not self.inband:
opts += ' other-config:disable-in-band=true'
if self.datapath == 'user':
opts += ' datapath_type=netdev'
if self.protocols and not self.isOldOVS():
opts += ' protocols=%s' % self.protocols
if self.stp and self.failMode == 'standalone':
opts += ' stp_enable=true'
opts += ' other-config:dp-desc=%s' % self.name
return opts
def start( self, controllers ):
"Start up a new OVS OpenFlow switch using ovs-vsctl"
if self.inNamespace:
raise Exception(
'OVS kernel switch does not work in a namespace' )
int( self.dpid, 16 ) # DPID must be a hex string
# Command to add interfaces
intfs = ''.join( ' -- add-port %s %s' % ( self, intf ) +
self.intfOpts( intf )
for intf in self.intfList()
if self.ports[ intf ] and not intf.IP() )
# Command to create controller entries
clist = [ ( self.name + c.name, '%s:%s:%d' %
( c.protocol, c.IP(), c.port ) )
for c in controllers ]
if self.listenPort:
clist.append( ( self.name + '-listen',
'ptcp:%s' % self.listenPort ) )
ccmd = '-- --id=@%s create Controller target=\\"%s\\"'
if self.reconnectms:
ccmd += ' max_backoff=%d' % self.reconnectms
cargs = ' '.join( ccmd % ( name, target )
for name, target in clist )
# Controller ID list
cids = ','.join( '@%s' % name for name, _target in clist )
# Try to delete any existing bridges with the same name
if not self.isOldOVS():
cargs += ' -- --if-exists del-br %s' % self
# One ovs-vsctl command to rule them all!
self.vsctl( cargs +
' -- add-br %s' % self +
' -- set bridge %s controller=[%s]' % ( self, cids ) +
self.bridgeOpts() +
intfs )
# If necessary, restore TC config overwritten by OVS
if not self.batch:
for intf in self.intfList():
self.TCReapply( intf )
# This should be ~ int( quietRun( 'getconf ARG_MAX' ) ),
# but the real limit seems to be much lower
argmax = 128000
@classmethod
def batchStartup( cls, switches, run=errRun ):
"""Batch startup for OVS
switches: switches to start up
run: function to run commands (errRun)"""
info( '...' )
cmds = 'ovs-vsctl'
for switch in switches:
if switch.isOldOVS():
# Ideally we'd optimize this also
run( 'ovs-vsctl del-br %s' % switch )
for cmd in switch.commands:
cmd = cmd.strip()
# Don't exceed ARG_MAX
if len( cmds ) + len( cmd ) >= cls.argmax:
run( cmds, shell=True )
cmds = 'ovs-vsctl'
cmds += ' ' + cmd
switch.cmds = []
switch.batch = False
if cmds:
run( cmds, shell=True )
# Reapply link config if necessary...
for switch in switches:
for intf in switch.intfs.values():
if isinstance( intf, TCIntf ):
intf.config( **intf.params )
return switches
def stop( self, deleteIntfs=True ):
"""Terminate OVS switch.
deleteIntfs: delete interfaces? (True)"""
self.cmd( 'ovs-vsctl del-br', self )
if self.datapath == 'user':
self.cmd( 'ip link del', self )
super( OVSSwitch, self ).stop( deleteIntfs )
@classmethod
def batchShutdown( cls, switches, run=errRun ):
"Shut down a list of OVS switches"
delcmd = 'del-br %s'
if switches and not switches[ 0 ].isOldOVS():
delcmd = '--if-exists ' + delcmd
# First, delete them all from ovsdb
run( 'ovs-vsctl ' +
' -- '.join( delcmd % s for s in switches ) )
# Next, shut down all of the processes
pids = ' '.join( str( switch.pid ) for switch in switches )
run( 'kill -HUP ' + pids )
for switch in switches:
switch.terminate()
return switches
OVSKernelSwitch = OVSSwitch
class OVSBridge( OVSSwitch ):
"OVSBridge is an OVSSwitch in standalone/bridge mode"
def __init__( self, *args, **kwargs ):
"""stp: enable Spanning Tree Protocol (False)
see OVSSwitch for other options"""
kwargs.update( failMode='standalone' )
OVSSwitch.__init__( self, *args, **kwargs )
def start( self, controllers ):
"Start bridge, ignoring controllers argument"
OVSSwitch.start( self, controllers=[] )
def connected( self ):
"Are we forwarding yet?"
if self.stp:
status = self.dpctl( 'show' )
return 'STP_FORWARD' in status and not 'STP_LEARN' in status
else:
return True
class IVSSwitch( Switch ):
"Indigo Virtual Switch"
def __init__( self, name, verbose=False, **kwargs ):
Switch.__init__( self, name, **kwargs )
self.verbose = verbose
@classmethod
def setup( cls ):
"Make sure IVS is installed"
pathCheck( 'ivs-ctl', 'ivs',
moduleName="Indigo Virtual Switch (projectfloodlight.org)" )
out, err, exitcode = errRun( 'ivs-ctl show' )
if exitcode:
error( out + err +
'ivs-ctl exited with code %d\n' % exitcode +
'*** The openvswitch kernel module might '
'not be loaded. Try modprobe openvswitch.\n' )
exit( 1 )
@classmethod
def batchShutdown( cls, switches ):
"Kill each IVS switch, to be waited on later in stop()"
for switch in switches:
switch.cmd( 'kill %ivs' )
return switches
def start( self, controllers ):
"Start up a new IVS switch"
args = ['ivs']
args.extend( ['--name', self.name] )
args.extend( ['--dpid', self.dpid] )
if self.verbose:
args.extend( ['--verbose'] )
for intf in self.intfs.values():
if not intf.IP():
args.extend( ['-i', intf.name] )
for c in controllers:
args.extend( ['-c', '%s:%d' % (c.IP(), c.port)] )
if self.listenPort:
args.extend( ['--listen', '127.0.0.1:%i' % self.listenPort] )
args.append( self.opts )
logfile = '/tmp/ivs.%s.log' % self.name
self.cmd( ' '.join(args) + ' >' + logfile + ' 2>&1 </dev/null &' )
def stop( self, deleteIntfs=True ):
"""Terminate IVS switch.
deleteIntfs: delete interfaces? (True)"""
self.cmd( 'kill %ivs' )
self.cmd( 'wait' )
super( IVSSwitch, self ).stop( deleteIntfs )
def attach( self, intf ):
"Connect a data port"
self.cmd( 'ivs-ctl', 'add-port', '--datapath', self.name, intf )
def detach( self, intf ):
"Disconnect a data port"
self.cmd( 'ivs-ctl', 'del-port', '--datapath', self.name, intf )
def dpctl( self, *args ):
"Run dpctl command"
if not self.listenPort:
return "can't run dpctl without passive listening port"
return self.cmd( 'ovs-ofctl ' + ' '.join( args ) +
' tcp:127.0.0.1:%i' % self.listenPort )
class Controller( Node ):
"""A Controller is a Node that is running (or has execed?) an
OpenFlow controller."""
def __init__( self, name, inNamespace=False, command='controller',
cargs='-v ptcp:%d', cdir=None, ip="127.0.0.1",
port=6653, protocol='tcp', **params ):
self.command = command
self.cargs = cargs
self.cdir = cdir
# Accept 'ip:port' syntax as shorthand
if ':' in ip:
ip, port = ip.split( ':' )
port = int( port )
self.ip = ip
self.port = port
self.protocol = protocol
Node.__init__( self, name, inNamespace=inNamespace,
ip=ip, **params )
self.checkListening()
def checkListening( self ):
"Make sure no controllers are running on our port"
# Verify that Telnet is installed first:
out, _err, returnCode = errRun( "which telnet" )
if 'telnet' not in out or returnCode != 0:
raise Exception( "Error running telnet to check for listening "
"controllers; please check that it is "
"installed." )
listening = self.cmd( "echo A | telnet -e A %s %d" %
( self.ip, self.port ) )
if 'Connected' in listening:
servers = self.cmd( 'netstat -natp' ).split( '\n' )
pstr = ':%d ' % self.port
clist = servers[ 0:1 ] + [ s for s in servers if pstr in s ]
raise Exception( "Please shut down the controller which is"
" running on port %d:\n" % self.port +
'\n'.join( clist ) )
def start( self ):
"""Start <controller> <args> on controller.
Log to /tmp/cN.log"""
pathCheck( self.command )
cout = '/tmp/' + self.name + '.log'
if self.cdir is not None:
self.cmd( 'cd ' + self.cdir )
self.cmd( self.command + ' ' + self.cargs % self.port +
' 1>' + cout + ' 2>' + cout + ' &' )
self.execed = False
def stop( self, *args, **kwargs ):
"Stop controller."
self.cmd( 'kill %' + self.command )
self.cmd( 'wait %' + self.command )
super( Controller, self ).stop( *args, **kwargs )
def IP( self, intf=None ):
"Return IP address of the Controller"
if self.intfs:
ip = Node.IP( self, intf )
else:
ip = self.ip
return ip
def __repr__( self ):
"More informative string representation"
return '<%s %s: %s:%s pid=%s> ' % (
self.__class__.__name__, self.name,
self.IP(), self.port, self.pid )
@classmethod
def isAvailable( cls ):
"Is controller available?"
return which( 'controller' )
class OVSController( Controller ):
"Open vSwitch controller"
def __init__( self, name, **kwargs ):
kwargs.setdefault( 'command', self.isAvailable() or
'ovs-controller' )
Controller.__init__( self, name, **kwargs )
@classmethod
def isAvailable( cls ):
return (which( 'ovs-controller' ) or
which( 'test-controller' ) or
which( 'ovs-testcontroller' ))
class NOX( Controller ):
"Controller to run a NOX application."
def __init__( self, name, *noxArgs, **kwargs ):
"""Init.
name: name to give controller
noxArgs: arguments (strings) to pass to NOX"""
if not noxArgs:
warn( 'warning: no NOX modules specified; '
'running packetdump only\n' )
noxArgs = [ 'packetdump' ]
elif type( noxArgs ) not in ( list, tuple ):
noxArgs = [ noxArgs ]
if 'NOX_CORE_DIR' not in os.environ:
exit( 'exiting; please set missing NOX_CORE_DIR env var' )
noxCoreDir = os.environ[ 'NOX_CORE_DIR' ]
Controller.__init__( self, name,
command=noxCoreDir + '/nox_core',
cargs='--libdir=/usr/local/lib -v -i ptcp:%s ' +
' '.join( noxArgs ),
cdir=noxCoreDir,
**kwargs )
class Ryu( Controller ):
"Controller to run Ryu application"
def __init__( self, name, *ryuArgs, **kwargs ):
"""Init.
name: name to give controller.
ryuArgs: arguments and modules to pass to Ryu"""
homeDir = quietRun( 'printenv HOME' ).strip( '\r\n' )
ryuCoreDir = '%s/ryu/ryu/app/' % homeDir
if not ryuArgs:
warn( 'warning: no Ryu modules specified; '
'running simple_switch only\n' )
ryuArgs = [ ryuCoreDir + 'simple_switch.py' ]
elif type( ryuArgs ) not in ( list, tuple ):
ryuArgs = [ ryuArgs ]
Controller.__init__( self, name,
command='ryu-manager',
cargs='--ofp-tcp-listen-port %s ' +
' '.join( ryuArgs ),
cdir=ryuCoreDir,
**kwargs )
class RemoteController( Controller ):
"Controller running outside of Mininet's control."
def __init__( self, name, ip='127.0.0.1',
port=None, **kwargs):
"""Init.
name: name to give controller
ip: the IP address where the remote controller is
listening
port: the port where the remote controller is listening"""
Controller.__init__( self, name, ip=ip, port=port, **kwargs )
def start( self ):
"Overridden to do nothing."
return
def stop( self ):
"Overridden to do nothing."
return
def checkListening( self ):
"Warn if remote controller is not accessible"
if self.port is not None:
self.isListening( self.ip, self.port )
else:
for port in 6653, 6633:
if self.isListening( self.ip, port ):
self.port = port
info( "Connecting to remote controller"
" at %s:%d\n" % ( self.ip, self.port ))
break
if self.port is None:
self.port = 6653
warn( "Setting remote controller"
" to %s:%d\n" % ( self.ip, self.port ))
def isListening( self, ip, port ):
"Check if a remote controller is listening at a specific ip and port"
listening = self.cmd( "echo A | telnet -e A %s %d" % ( ip, port ) )
if 'Connected' not in listening:
warn( "Unable to contact the remote controller"
" at %s:%d\n" % ( ip, port ) )
return False
else:
return True
DefaultControllers = ( Controller, OVSController )
def findController( controllers=DefaultControllers ):
"Return first available controller from list, if any"
for controller in controllers:
if controller.isAvailable():
return controller
def DefaultController( name, controllers=DefaultControllers, **kwargs ):
"Find a controller that is available and instantiate it"
controller = findController( controllers )
if not controller:
raise Exception( 'Could not find a default OpenFlow controller' )
return controller( name, **kwargs )
def NullController( *_args, **_kwargs ):
"Nonexistent controller - simply returns None"
return None
| [
"[email protected]"
] | |
eba5b10fdb01d5e9de0a691c5d7012932098fcb9 | b8b0a29b6f5bac70c408e46e6df1d6583e9ad8c0 | /portdata/serializers.py | 83fafe1145f099424288819777404e25e9f5cc1e | [] | no_license | varunsak/sdgindia | 20c41575a6f0c638662f1df6bd7a121ce3da8cf8 | a7fe9f6770e7b6ba628c376e773b11a19f58ccf4 | refs/heads/master | 2020-04-08T02:33:04.252409 | 2019-01-19T19:56:43 | 2019-01-19T19:56:43 | 158,939,063 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 500 | py | from rest_framework import serializers
from .models import PortData
class DataSerializer(serializers.ModelSerializer):
id = serializers.IntegerField(read_only=True)
class Meta:
model = PortData
fields = (
'id', 'product', 'quantity', 'unit', 'item_rate_inv', 'currency', 'total_amount', 'fob_inr', 'item_rate_inr', 'fob_usd', 'foreign_port', 'foreign_country', 'india_port', 'india_company',
'foreign_company', 'invoice_number', 'hs_code'
)
| [
"[email protected]"
] | |
104377ea2f080beb98e6dc6a42d72693b8f7e54c | 60b2156d7bc5bd42fa1d8e7684f4b8c2d8f12748 | /tests/test_utils.py | cc5027edc12f34ec0d6c005b5c9015294d5a63ae | [
"MIT"
] | permissive | coras-io/lint-review | 74d89b05611ba4d18ab4224a6af9e5b93b5f27c3 | 0df19429a265a79edecb53b4371bf63db7e61617 | refs/heads/master | 2020-12-25T22:28:52.698909 | 2019-11-28T15:56:53 | 2019-11-28T15:56:53 | 63,415,943 | 2 | 2 | MIT | 2019-11-28T15:56:54 | 2016-07-15T11:07:20 | Python | UTF-8 | Python | false | false | 513 | py | import lintreview.utils as utils
import os
from unittest import skipIf
js_hint_installed = os.path.exists(
os.path.join(os.getcwd(), 'node_modules', '.bin', 'jshint'))
def test_in_path():
assert utils.in_path('python'), 'No python in path'
assert not utils.in_path('bad_cmd_name')
@skipIf(not js_hint_installed, 'Missing local jshint. Skipping')
def test_npm_exists():
assert utils.npm_exists('jshint'), 'Should be there.'
assert not utils.npm_exists('not there'), 'Should not be there.'
| [
"[email protected]"
] | |
ee3452616d5ab280c04845cc2164cbdf6db586d2 | 9032e88ca0c90a15b96d2142d2629484cdf469b6 | /py_controls/MemoryManager.py | fd1bc79f0d91f58ce62c4bd3349152244c888609 | [
"MIT"
] | permissive | CyberCrunch/DU_AI_Gov | 856db1db4e67e37ac8c8f05fc096a9bbc50027a8 | a9fcf3b603c39bf0704df172a6745620d1d3c06b | refs/heads/master | 2021-06-20T12:46:35.360703 | 2017-08-08T19:18:14 | 2017-08-08T19:18:14 | 77,530,730 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,088 | py | # -*- coding: utf-8 -*-
"""
Created on Fri Dec 30 15:52:43 2016
@author: robin
"""
import json
from enum import Enum #testing possible enums for readability...(not implemeted)
class NrH(Enum): #human data formtat for Json
name = 0
human = 1
job = 2
status = 3
position = 4
money = 5
class NrL(Enum): #location data formtat for Json
name = 0
location = 1
planet = 2
structure = 3
longitude = 4
latitude = 5
resource = 6
reward = 7
class SpH(Enum): #human string formtat for registration
name = 0
job = 1
class SpL(Enum): #location string formtat for registration
name = 0
planet = 1
structure = 2
longitude = 3
latitude = 4
def regHuman(msg):
splitStr = msg.split()
if(len(splitStr) != 2):
return "Invalid Parameters, please use Format: !reg YourName YourJob"
with open('memoryDB.json', 'r+') as json_file:
json_data = json.load(json_file)
json_data[splitStr[SpH.name.value]] = ['Human', splitStr[SpH.job.value],"idle", "unknownPos", 0]
json_file.seek(0, 0)
json_file.write(json.dumps(json_data, indent=4))
json_file.truncate()
return ("New human registered: " +msg)
def regLocation(msg):
splitStr = msg.split()
if(len(splitStr) != 5):
return ("Invalid Parameters, please use Format: !geodata name planet type longitude latitude")
with open('memoryDB.json', 'r+') as json_file:
json_data = json.load(json_file)
json_data[splitStr[SpL.name.value]] = ['Location', splitStr[SpL.planet.value], splitStr[SpL.structure.value], splitStr[SpL.longitude.value], splitStr[SpL.latitude.value], "default", 0]
json_file.seek(0, 0)
json_file.write(json.dumps(json_data, indent=4))
json_file.truncate()
return ("New location registered: " +msg)
def getDatabase():
with open('memoryDB.json', 'r') as json_file:
json_data = json.load(json_file)
return(json.dumps(json_data, indent=4, sort_keys=True)) | [
"[email protected]"
] | |
33136feebbdbb37d3c3f294a2ecda89a59ed0b98 | ef55b064f4352e2f97431851d5535d6b173fb504 | /build-gui.py | 3f63548df82b17cc7d1b61a8ad113d3398e7053a | [
"MIT"
] | permissive | cheuer/ALttPDoorRandomizer | 8e56e3594a2a180953d0b44a83b668321e4af167 | 44d7e6c15cca8dc613e8fe9cdca07eaa3c5f44a3 | refs/heads/DoorDev | 2021-03-02T22:59:24.950136 | 2020-03-26T23:04:54 | 2020-03-26T23:04:54 | 245,912,753 | 0 | 0 | MIT | 2020-03-09T05:22:22 | 2020-03-09T00:45:46 | null | UTF-8 | Python | false | false | 543 | py | import subprocess
import os
import shutil
DEST_DIRECTORY = '.'
if os.path.isdir("upx"):
upx_string = "--upx-dir=upx"
else:
upx_string = ""
if os.path.isdir("build"):
shutil.rmtree("build")
subprocess.run(" ".join(["pyinstaller Gui.spec ",
upx_string,
"-y ",
"--onefile ",
f"--distpath {DEST_DIRECTORY} ",
]),
shell=True)
| [
"[email protected]"
] | |
7f9a2d07182faa806f9337f02a6a0ce4035514fd | 0676f6e4d3510a0305d29aa0b1fe740d538d3b63 | /Python/SImplifyPline/CleanUpPolyline.py | 1ce7d7116eb272886ed20d4186ae8a3b571c98fb | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | pgolay/PG_Scripts | f70ffe7e5ca07acd6f4caedc9a9aec566542da7c | 796704a7daa6ac222a40bb02afdb599f74a6b0d4 | refs/heads/master | 2021-01-19T16:53:41.525879 | 2017-02-07T18:26:10 | 2017-02-07T18:26:10 | 2,730,362 | 9 | 1 | null | 2016-12-30T17:58:08 | 2011-11-08T00:04:33 | Python | UTF-8 | Python | false | false | 1,898 | py | import Rhino
import scriptcontext as sc
"""
Cleans up by collapsing tiny segments in a polyline.
"""
def CleanUpPolyline():
while True:
tol = sc.doc.ModelAbsoluteTolerance
if sc.sticky.has_key("PLineSimplifyTol"):
tol = sc.sticky["PLineSimplifyTol"]
go = Rhino.Input.Custom.GetObject()
go.AcceptNumber(True, False)
go.GeometryFilter = Rhino.DocObjects.ObjectType.Curve
opDblTol = Rhino.Input.Custom.OptionDouble(tol)
go.AddOptionDouble("SegmentTolerance",opDblTol)
result = go.Get()
if( go.CommandResult() != Rhino.Commands.Result.Success ):
return
if result == Rhino.Input.GetResult.Object:
if type(go.Object(0).Geometry()) == Rhino.Geometry.PolylineCurve:
curve = go.Object(0).Geometry()
rc, pLine = curve.TryGetPolyline()
pLineId = go.Object(0).ObjectId
else:
sc.doc.Objects.UnselectAll()
sc.doc.Views.Redraw()
print "Sorry, that was not a polyline."
continue
break
elif result == Rhino.Input.GetResult.Option:
tol = opDblTol.CurrentValue
sc.sticky["PLineSimplifyTol"] = tol
continue
elif result == Rhino.Input.GetResult.Number:
tol = go.Number()
sc.sticky["PLineSimplifyTol"] = tol
continue
break
count = pLine.CollapseShortSegments(tol)
if count !=0:
sc.doc.Objects.Replace(pLineId, pLine)
sc.doc.Views.Redraw()
print str(count) + " short segments were collapsed."
else:
print "No short segments were collapsed."
pass
if __name__ == "__main__":
CleanUpPolyline() | [
"[email protected]"
] | |
9abb3baada0faed6fe83d3c15b41aa7c7958cb80 | 2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae | /python/python_27357.py | 1163c19de3fb005d7b6fa68a6a453f6f2e63147f | [] | no_license | AK-1121/code_extraction | cc812b6832b112e3ffcc2bb7eb4237fd85c88c01 | 5297a4a3aab3bb37efa24a89636935da04a1f8b6 | refs/heads/master | 2020-05-23T08:04:11.789141 | 2015-10-22T19:19:40 | 2015-10-22T19:19:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 46 | py | # pyplot.savefig with empty export
plt.show()
| [
"[email protected]"
] | |
77d8acda1bcff51018b3fe72fc9c8578176f31e9 | c9aa19a4d46b5c5357121e76e2e9784f2140ba41 | /cashonly/management/commands/debtreminder.py | 09a10f922fe66a1bb31ef740723ed9ab65469d2c | [] | no_license | klonfed/cashonly | 2e617094ad95b82be62808fbbb781e9a2250b8a6 | 514e1c9cd8814e38b518b0be382940d1cb229725 | refs/heads/master | 2021-01-19T18:30:35.317250 | 2015-11-20T22:20:00 | 2015-11-20T22:20:00 | 41,054,334 | 2 | 2 | null | 2022-08-23T10:21:31 | 2015-08-19T19:07:16 | Python | UTF-8 | Python | false | false | 1,027 | py |
from cashonly.models import *
from django.conf import settings
from django.core.mail import send_mass_mail
from django.core.management.base import NoArgsCommand
from django.template import Context
from django.template.loader import get_template
from django.utils import translation
from django.utils.translation import ugettext as _
class Command(NoArgsCommand):
help = 'Sends a reminder mail to every with a negative credit'
def handle_noargs(self, **options):
translation.activate('de')
tpl = get_template('cashonly/debt_reminder.txt')
messages = []
for a in Account.objects.all():
if a.credit < 0:
name = '%s %s' % (a.user.first_name, a.user.last_name)
context = {'name': name, 'credit': a.credit}
rcpts = ['%s <%s>' % (name, a.user.email)]
messages.append(('%s%s' % (settings.EMAIL_SUBJECT_PREFIX,
_('Debt Reminder')),
tpl.render(Context(context)),
settings.DEFAULT_FROM_EMAIL, rcpts))
send_mass_mail(tuple(messages))
| [
"[email protected]"
] | |
3095ad9d0178728b8363be5fa150c0ea43e6ecea | 9c902c6bc6ea2cce71195acd5baa8f44ab928eb6 | /pythonapp/imgtxt/admin.py | 0124dec01736c26d6587dbe332000f3719f39cdc | [] | no_license | mogilivishal/Verzeo-OCR-Project | a383b56014e13dfef598a191012fc51dc9579624 | 8b34a6c8b323e0b55c7902f2c4f873a1e4ce04e7 | refs/heads/master | 2022-04-17T20:32:45.724447 | 2020-02-16T17:38:52 | 2020-02-16T17:38:52 | 240,932,822 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 93 | py | from django.contrib import admin
from .models import Document
admin.site.register(Document) | [
"[email protected]"
] | |
c45e6ce9c846d77c6611d7c5fa1d641c22336a01 | 4b8c81f54cc52e096ad9ae751f00e88254aab0ca | /20-01-21 while홀.py | 631fadc6b7eb53e75d2df8df8fc563a8e1db0e4e | [] | no_license | dlatpdbs/python | 50305cfcc92bb6c9bae409ec31ebd9e4aa868075 | 2f740941fe1ef172d40cb10a63c1ed19c5925e68 | refs/heads/main | 2022-12-27T15:24:31.243739 | 2020-10-14T05:26:32 | 2020-10-14T05:26:32 | 301,933,143 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 50 | py |
q=1
while q <=100:
print(q)
q=q+2
| [
"[email protected]"
] | |
5dbe47764578bd0bad972363605507b01fd8cdfa | 12cdef3d9de846ac1c430f606bf862ecda6e2345 | /attractions/__init__.py | 4be87a5da7791f1c059468e21ff1aacb5221f3c6 | [] | no_license | kirksudduth/petting_zoo | 45865109dbc9c40fb54fd92cd7fac7b3809cbcd0 | ce9fb52ca0aff0cb640a2041b3996156f8bb8ca1 | refs/heads/master | 2022-11-20T19:22:15.611061 | 2020-07-21T20:21:55 | 2020-07-21T20:21:55 | 279,920,616 | 0 | 0 | null | 2020-07-21T20:21:56 | 2020-07-15T16:30:02 | Python | UTF-8 | Python | false | false | 285 | py | from .attraction import Attraction
from .petting_zoo import Petting_zoo
from .snake_pit import Snake_pit
from .wetlands import Wetlands
from .attractions_instances import creature_culdesac
from .attractions_instances import no_feet_knoll
from .attractions_instances import swimmy_jazz
| [
"[email protected]"
] | |
530f4767b7bb69cd945bd97def72737f1ad66039 | 7da328d5365788bec00b62e3c3de8b5133fba092 | /impala/tests/test_impala.py | 8c58516171a9ff74ed847675759c70ca285b5840 | [
"Apache-2.0"
] | permissive | attilajeges/impyla | f7520677e426f42e60ecf9199d8dacd38eae1b99 | 35297fd573bd8d8984f89eec91f12dbb1837549a | refs/heads/master | 2023-07-15T17:15:48.683389 | 2020-10-01T23:10:16 | 2020-10-01T23:10:16 | 260,346,345 | 0 | 0 | Apache-2.0 | 2020-05-01T00:18:06 | 2020-05-01T00:18:06 | null | UTF-8 | Python | false | false | 2,025 | py | # Copyright 2019 Cloudera Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from pytest import yield_fixture
BIGGER_TABLE_NUM_ROWS = 100
@yield_fixture(scope='module')
def bigger_table(cur):
table_name = 'tmp_bigger_table'
ddl = """CREATE TABLE {0} (s string)
STORED AS PARQUET""".format(table_name)
cur.execute(ddl)
dml = """INSERT INTO {0}
VALUES {1}""".format(table_name,
",".join(["('row{0}')".format(i) for i in xrange(BIGGER_TABLE_NUM_ROWS)]))
# Disable codegen and expr rewrites so query runs faster.
cur.execute("set disable_codegen=1")
cur.execute("set enable_expr_rewrites=0")
cur.execute(dml)
try:
yield table_name
finally:
cur.execute("DROP TABLE {0}".format(table_name))
def test_has_more_rows(cur, bigger_table):
"""Test that impyla correctly handles empty row batches returned with the
hasMoreRows flag."""
# Set the fetch timeout very low and add sleeps so that Impala will return
# empty batches. Run on a single node with a single thread to make as predictable
# as possible.
cur.execute("set fetch_rows_timeout_ms=1")
cur.execute("set num_nodes=1")
cur.execute("set mt_dop=1")
cur.execute("""select *
from {0}
where s != cast(sleep(2) as string)""".format(bigger_table))
expected_rows = [("row{0}".format(i),) for i in xrange(BIGGER_TABLE_NUM_ROWS)]
assert sorted(cur.fetchall()) == sorted(expected_rows)
| [
"[email protected]"
] | |
130a1da7648c1cb9b3d0bdc2b94793d83b2e1729 | 999a7707806f941d334170e9909a268d102929b2 | /yelpCNN.py | 3057ac376eecfe679a7625817028c878379593e2 | [] | no_license | wanaaaa/yelpCNN1D | 7e089ab4ca60e3cf478a6d5b0a5a3b3e80253ba4 | 2f1f1ad9b8101d7a52f2f3c4d01d92e3f197b19b | refs/heads/main | 2023-02-12T20:54:31.046391 | 2021-01-10T18:12:19 | 2021-01-10T18:12:19 | 328,447,970 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,147 | py | # https://chriskhanhtran.github.io/posts/cnn-sentence-classification/
from functionClass import *
from gensim.models import Word2Vec
import torch
import torch.optim as optim
device = 'cuda'
rateReviewTrainList, rateReviewTestList, maxListCount = dataRead()
xyDataLoader = DataLoaderFun(rateReviewTrainList, maxListCount, batchSize=2500)
textCNNmodel = trainFun(xyDataLoader, maxListCount, epochs=20)
# textCNNmodel = TextCnn(maxListCount).cuda(device=device)
textCNNmodel = TextCnn(maxListCount).cpu()
textCNNmodel.load_state_dict(torch.load('traindTextCNNmodel.model'))
textCNNmodel.eval()
# ================================================
# ================================================
# ================================================
xyTestDataLoader = DataLoaderFun(rateReviewTestList, maxListCount, batchSize=1)
for epoch in range(1):
# print("num of epochs->", epoch)
for step, batch in enumerate(xyTestDataLoader):
x_test, y_test = tuple(t.to('cpu') for t in batch)
y_pridict = textCNNmodel(x_test)
print("y_pridict->", y_pridict, 'y_test->', y_test)
# break
torch.cuda.empty_cache() | [
"[email protected]"
] | |
087457541661af279dddac07823ebcb457b7ee3f | 1eacd671cf9c71f486bbaddabe7701caf0d5e1ff | /ironic_python_agent/config.py | 94a255f880b6927251a8a3b0bc097c8963a6368c | [
"Apache-2.0"
] | permissive | tyws/ipa-customize | 65e04be381b7c9b538c02603f4ceead0b25b0265 | 962c9e0b1f904fdc14c0ce542809b11b741d41fb | refs/heads/master | 2020-07-24T13:10:22.269466 | 2019-09-30T05:47:53 | 2019-09-30T05:47:53 | 207,939,220 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,954 | py | # Copyright 2016 Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_config import cfg
from ironic_python_agent import inspector
from ironic_python_agent import netutils
from ironic_python_agent import utils
CONF = cfg.CONF
APARAMS = utils.get_agent_params()
cli_opts = [
cfg.StrOpt('api_url',
default=APARAMS.get('ipa-api-url'),
deprecated_name='api-url',
regex='^http(s?):\/\/.+',
help='URL of the Ironic API. '
'Can be supplied as "ipa-api-url" kernel parameter.'
'The value must start with either http:// or https://.'),
cfg.StrOpt('listen_host',
default=APARAMS.get('ipa-listen-host',
netutils.get_wildcard_address()),
sample_default='::',
deprecated_name='listen-host',
help='The IP address to listen on. '
'Can be supplied as "ipa-listen-host" kernel parameter.'),
cfg.IntOpt('listen_port',
default=int(APARAMS.get('ipa-listen-port', 9999)),
deprecated_name='listen-port',
help='The port to listen on. '
'Can be supplied as "ipa-listen-port" kernel parameter.'),
cfg.StrOpt('advertise_host',
default=APARAMS.get('ipa-advertise-host', None),
deprecated_name='advertise_host',
help='The host to tell Ironic to reply and send '
'commands to. '
'Can be supplied as "ipa-advertise-host" '
'kernel parameter.'),
cfg.IntOpt('advertise_port',
default=int(APARAMS.get('ipa-advertise-port', 9999)),
deprecated_name='advertise-port',
help='The port to tell Ironic to reply and send '
'commands to. '
'Can be supplied as "ipa-advertise-port" '
'kernel parameter.'),
cfg.IntOpt('ip_lookup_attempts',
default=int(APARAMS.get('ipa-ip-lookup-attempts', 3)),
deprecated_name='ip-lookup-attempts',
help='The number of times to try and automatically '
'determine the agent IPv4 address. '
'Can be supplied as "ipa-ip-lookup-attempts" '
'kernel parameter.'),
cfg.IntOpt('ip_lookup_sleep',
default=int(APARAMS.get('ipa-ip-lookup-timeout', 10)),
deprecated_name='ip-lookup-sleep',
help='The amount of time to sleep between attempts '
'to determine IP address. '
'Can be supplied as "ipa-ip-lookup-timeout" '
'kernel parameter.'),
cfg.StrOpt('network_interface',
default=APARAMS.get('ipa-network-interface', None),
deprecated_name='network-interface',
help='The interface to use when looking for an IP address. '
'Can be supplied as "ipa-network-interface" '
'kernel parameter.'),
cfg.IntOpt('lookup_timeout',
default=int(APARAMS.get('ipa-lookup-timeout', 300)),
deprecated_name='lookup-timeout',
help='The amount of time to retry the initial lookup '
'call to Ironic. After the timeout, the agent '
'will exit with a non-zero exit code. '
'Can be supplied as "ipa-lookup-timeout" '
'kernel parameter.'),
cfg.IntOpt('lookup_interval',
default=int(APARAMS.get('ipa-lookup-interval', 1)),
deprecated_name='lookup-interval',
help='The initial interval for retries on the initial '
'lookup call to Ironic. The interval will be '
'doubled after each failure until timeout is '
'exceeded. '
'Can be supplied as "ipa-lookup-interval" '
'kernel parameter.'),
cfg.FloatOpt('lldp_timeout',
default=APARAMS.get('ipa-lldp-timeout',
APARAMS.get('lldp-timeout', 30.0)),
help='The amount of seconds to wait for LLDP packets. '
'Can be supplied as "ipa-lldp-timeout" '
'kernel parameter.'),
cfg.BoolOpt('collect_lldp',
default=APARAMS.get('ipa-collect-lldp', False),
help='Whether IPA should attempt to receive LLDP packets for '
'each network interface it discovers in the inventory. '
'Can be supplied as "ipa-collect-lldp" '
'kernel parameter.'),
cfg.BoolOpt('standalone',
default=APARAMS.get('ipa-standalone', False),
help='Note: for debugging only. Start the Agent but suppress '
'any calls to Ironic API. '
'Can be supplied as "ipa-standalone" '
'kernel parameter.'),
cfg.StrOpt('inspection_callback_url',
default=APARAMS.get('ipa-inspection-callback-url'),
help='Endpoint of ironic-inspector. If set, hardware inventory '
'will be collected and sent to ironic-inspector '
'on start up. '
'Can be supplied as "ipa-inspection-callback-url" '
'kernel parameter.'),
cfg.StrOpt('inspection_collectors',
default=APARAMS.get('ipa-inspection-collectors',
inspector.DEFAULT_COLLECTOR),
help='Comma-separated list of plugins providing additional '
'hardware data for inspection, empty value gives '
'a minimum required set of plugins. '
'Can be supplied as "ipa-inspection-collectors" '
'kernel parameter.'),
cfg.IntOpt('inspection_dhcp_wait_timeout',
default=APARAMS.get('ipa-inspection-dhcp-wait-timeout',
inspector.DEFAULT_DHCP_WAIT_TIMEOUT),
help='Maximum time (in seconds) to wait for the PXE NIC '
'(or all NICs if inspection_dhcp_all_interfaces is True) '
'to get its IP address via DHCP before inspection. '
'Set to 0 to disable waiting completely. '
'Can be supplied as "ipa-inspection-dhcp-wait-timeout" '
'kernel parameter.'),
cfg.BoolOpt('inspection_dhcp_all_interfaces',
default=APARAMS.get('ipa-inspection-dhcp-all-interfaces',
False),
help='Whether to wait for all interfaces to get their IP '
'addresses before inspection. If set to false '
'(the default), only waits for the PXE interface. '
'Can be supplied as '
'"ipa-inspection-dhcp-all-interfaces" '
'kernel parameter.'),
cfg.IntOpt('hardware_initialization_delay',
default=APARAMS.get('ipa-hardware-initialization-delay', 0),
help='How much time (in seconds) to wait for hardware to '
'initialize before proceeding with any actions. '
'Can be supplied as "ipa-hardware-initialization-delay" '
'kernel parameter.'),
cfg.IntOpt('disk_wait_attempts',
default=APARAMS.get('ipa-disk-wait-attempts', 10),
help='The number of times to try and check to see if '
'at least one suitable disk has appeared in inventory '
'before proceeding with any actions. '
'Can be supplied as "ipa-disk-wait-attempts" '
'kernel parameter.'),
cfg.IntOpt('disk_wait_delay',
default=APARAMS.get('ipa-disk-wait-delay', 3),
help='How much time (in seconds) to wait between attempts '
'to check if at least one suitable disk has appeared '
'in inventory. Set to zero to disable. '
'Can be supplied as "ipa-disk-wait-delay" '
'kernel parameter.'),
cfg.BoolOpt('insecure',
default=APARAMS.get('ipa-insecure', False),
help='Verify HTTPS connections. Can be supplied as '
'"ipa-insecure" kernel parameter.'),
cfg.StrOpt('cafile',
help='Path to PEM encoded Certificate Authority file '
'to use when verifying HTTPS connections. '
'Default is to use available system-wide configured CAs.'),
cfg.StrOpt('certfile',
help='Path to PEM encoded client certificate cert file. '
'Must be provided together with "keyfile" option. '
'Default is to not present any client certificates to '
'the server.'),
cfg.StrOpt('keyfile',
help='Path to PEM encoded client certificate key file. '
'Must be provided together with "certfile" option. '
'Default is to not present any client certificates to '
'the server.'),
]
CONF.register_cli_opts(cli_opts)
def list_opts():
return [('DEFAULT', cli_opts)]
| [
"[email protected]"
] | |
25f94ab6830074c80784e5bce87d5041838da2af | 96890d754bd943510ad2b5e3a0cba336fab24d44 | /Week7/After14.py | f051a0b47f89f4fb9463f9bece77e23caaf0f586 | [] | no_license | Chudvan/Python_osnovy_programmirovaniya-Coursera- | 304925397d3e7f4b49bc3f62dc89f782d36a1f76 | 19117cb198ed50bb90ff8082efc0dad4e80bce13 | refs/heads/master | 2020-07-07T13:49:14.504232 | 2019-08-21T02:00:01 | 2019-08-21T02:00:01 | 203,366,623 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 444 | py | from sys import stdin
numberWordsDict = dict()
for line in stdin:
lineList = line.split()
for word in lineList:
if word not in numberWordsDict:
numberWordsDict[word] = 0
numberWordsDict[word] += 1
tupleList = []
for word in numberWordsDict:
tupleList.append((numberWordsDict[word], word))
tupleList.sort(key=lambda curTuple: (-curTuple[0], curTuple[1]))
for curTuple in tupleList:
print(curTuple[1])
| [
"[email protected]"
] | |
744b2b5f9edcfd6d59f3a65ebfda69a83917795e | 8c4ef53ec6c7df2eeeb633a53d1d931558596366 | /propertyestimator/properties/solvation.py | 846f77dd90fa87534dec104a50d994e4dbc33f4f | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | MSchauperl/propertyestimator | ff7bf2d3b6bc441141258483ec991f8806b09469 | 9a67cb61498024c511f9bbe55536ac8e1a3c93be | refs/heads/master | 2020-09-08T07:04:39.660322 | 2019-11-08T21:15:23 | 2019-11-08T21:15:23 | 221,055,340 | 0 | 0 | NOASSERTION | 2019-11-14T21:47:11 | 2019-11-11T19:34:28 | null | UTF-8 | Python | false | false | 8,120 | py | """
A collection of physical property definitions relating to
solvation free energies.
"""
from propertyestimator import unit
from propertyestimator.properties import PhysicalProperty
from propertyestimator.properties.plugins import register_estimable_property
from propertyestimator.protocols import coordinates, forcefield, miscellaneous, yank, simulation, groups
from propertyestimator.substances import Substance
from propertyestimator.thermodynamics import Ensemble
from propertyestimator.workflow import WorkflowOptions
from propertyestimator.workflow.schemas import WorkflowSchema
from propertyestimator.workflow.utils import ProtocolPath
@register_estimable_property()
class SolvationFreeEnergy(PhysicalProperty):
"""A class representation of a solvation free energy property."""
@staticmethod
def get_default_workflow_schema(calculation_layer, options=None):
if calculation_layer == 'SimulationLayer':
# Currently reweighting is not supported.
return SolvationFreeEnergy.get_default_simulation_workflow_schema(options)
return None
@staticmethod
def get_default_simulation_workflow_schema(options=None):
"""Returns the default workflow to use when estimating this property
from direct simulations.
Parameters
----------
options: WorkflowOptions
The default options to use when setting up the estimation workflow.
Returns
-------
WorkflowSchema
The schema to follow when estimating this property.
"""
# Setup the fully solvated systems.
build_full_coordinates = coordinates.BuildCoordinatesPackmol('build_solvated_coordinates')
build_full_coordinates.substance = ProtocolPath('substance', 'global')
build_full_coordinates.max_molecules = 2000
assign_full_parameters = forcefield.BuildSmirnoffSystem(f'assign_solvated_parameters')
assign_full_parameters.force_field_path = ProtocolPath('force_field_path', 'global')
assign_full_parameters.substance = ProtocolPath('substance', 'global')
assign_full_parameters.coordinate_file_path = ProtocolPath('coordinate_file_path',
build_full_coordinates.id)
# Perform a quick minimisation of the full system to give
# YANK a better starting point for its minimisation.
energy_minimisation = simulation.RunEnergyMinimisation('energy_minimisation')
energy_minimisation.system_path = ProtocolPath('system_path', assign_full_parameters.id)
energy_minimisation.input_coordinate_file = ProtocolPath('coordinate_file_path',
build_full_coordinates.id)
equilibration_simulation = simulation.RunOpenMMSimulation('equilibration_simulation')
equilibration_simulation.ensemble = Ensemble.NPT
equilibration_simulation.steps_per_iteration = 100000
equilibration_simulation.output_frequency = 10000
equilibration_simulation.timestep = 2.0 * unit.femtosecond
equilibration_simulation.thermodynamic_state = ProtocolPath('thermodynamic_state', 'global')
equilibration_simulation.system_path = ProtocolPath('system_path', assign_full_parameters.id)
equilibration_simulation.input_coordinate_file = ProtocolPath('output_coordinate_file',
energy_minimisation.id)
# Create a substance which only contains the solute (e.g. for the
# vacuum phase simulations).
filter_solvent = miscellaneous.FilterSubstanceByRole('filter_solvent')
filter_solvent.input_substance = ProtocolPath('substance', 'global')
filter_solvent.component_role = Substance.ComponentRole.Solvent
filter_solute = miscellaneous.FilterSubstanceByRole('filter_solute')
filter_solute.input_substance = ProtocolPath('substance', 'global')
filter_solute.component_role = Substance.ComponentRole.Solute
# Setup the solute in vacuum system.
build_vacuum_coordinates = coordinates.BuildCoordinatesPackmol('build_vacuum_coordinates')
build_vacuum_coordinates.substance = ProtocolPath('filtered_substance', filter_solute.id)
build_vacuum_coordinates.max_molecules = 1
assign_vacuum_parameters = forcefield.BuildSmirnoffSystem(f'assign_parameters')
assign_vacuum_parameters.force_field_path = ProtocolPath('force_field_path', 'global')
assign_vacuum_parameters.substance = ProtocolPath('filtered_substance', filter_solute.id)
assign_vacuum_parameters.coordinate_file_path = ProtocolPath('coordinate_file_path',
build_vacuum_coordinates.id)
# Set up the protocol to run yank.
run_yank = yank.SolvationYankProtocol('run_solvation_yank')
run_yank.solute = ProtocolPath('filtered_substance', filter_solute.id)
run_yank.solvent_1 = ProtocolPath('filtered_substance', filter_solvent.id)
run_yank.solvent_2 = Substance()
run_yank.thermodynamic_state = ProtocolPath('thermodynamic_state', 'global')
run_yank.steps_per_iteration = 500
run_yank.checkpoint_interval = 50
run_yank.solvent_1_coordinates = ProtocolPath('output_coordinate_file', equilibration_simulation.id)
run_yank.solvent_1_system = ProtocolPath('system_path', assign_full_parameters.id)
run_yank.solvent_2_coordinates = ProtocolPath('coordinate_file_path', build_vacuum_coordinates.id)
run_yank.solvent_2_system = ProtocolPath('system_path', assign_vacuum_parameters.id)
# Set up the group which will run yank until the free energy has been determined to within
# a given uncertainty
conditional_group = groups.ConditionalGroup(f'conditional_group')
conditional_group.max_iterations = 20
if options.convergence_mode != WorkflowOptions.ConvergenceMode.NoChecks:
condition = groups.ConditionalGroup.Condition()
condition.condition_type = groups.ConditionalGroup.ConditionType.LessThan
condition.right_hand_value = ProtocolPath('target_uncertainty', 'global')
condition.left_hand_value = ProtocolPath('estimated_free_energy.uncertainty',
conditional_group.id,
run_yank.id)
conditional_group.add_condition(condition)
# Define the total number of iterations that yank should run for.
total_iterations = miscellaneous.MultiplyValue('total_iterations')
total_iterations.value = 2000
total_iterations.multiplier = ProtocolPath('current_iteration', conditional_group.id)
# Make sure the simulations gets extended after each iteration.
run_yank.number_of_iterations = ProtocolPath('result',
total_iterations.id)
conditional_group.add_protocols(total_iterations, run_yank)
# Define the full workflow schema.
schema = WorkflowSchema(property_type=SolvationFreeEnergy.__name__)
schema.id = '{}{}'.format(SolvationFreeEnergy.__name__, 'Schema')
schema.protocols = {
build_full_coordinates.id: build_full_coordinates.schema,
assign_full_parameters.id: assign_full_parameters.schema,
energy_minimisation.id: energy_minimisation.schema,
equilibration_simulation.id: equilibration_simulation.schema,
filter_solvent.id: filter_solvent.schema,
filter_solute.id: filter_solute.schema,
build_vacuum_coordinates.id: build_vacuum_coordinates.schema,
assign_vacuum_parameters.id: assign_vacuum_parameters.schema,
conditional_group.id: conditional_group.schema
}
schema.final_value_source = ProtocolPath('estimated_free_energy', conditional_group.id, run_yank.id)
return schema
| [
"[email protected]"
] | |
2549b51f9b74bd83a48077d8573f285fddd9ebc2 | 70054615f56be28373b00c9df96544ec822be683 | /res/scripts/common/offers.py | d85a601ecaff58e94484a30537cc4c8545a98445 | [] | no_license | wanyancan/WOTDecompiled | c646ad700f5ec3fb81fb4e87862639ce0bdf0000 | 9ffb09007a61d723cdb28549e15db39c34c0ea1e | refs/heads/master | 2020-04-17T23:13:15.649069 | 2013-11-15T16:37:10 | 2013-11-15T16:37:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,339 | py | import time
from collections import namedtuple
import BigWorld
from constants import IS_BASEAPP
from debug_utils import *
ENTITY_TYPE_ACCOUNT = 0
ENTITY_TYPE_CLAN = 1
ENTITY_TYPE_NAMES_BY_IDS = ('Account', 'Clan')
ENTITY_TYPE_IDS_BY_NAMES = {'Account': ENTITY_TYPE_ACCOUNT,
'Clan': ENTITY_TYPE_CLAN}
ENTITY_TYPE_IDS = (ENTITY_TYPE_ACCOUNT, ENTITY_TYPE_CLAN)
OFFER_SELL = 0
_OFFER_KIND_MASK = 192
SRC_WARE_GOLD = 0
SRC_WARE_CREDITS = 256
SRC_WARE_ITEMS = 512
SRC_WARE_VEHICLE = 768
SRC_WARE_TANKMAN = 1024
SRC_WARE_KINDS = (SRC_WARE_GOLD,
SRC_WARE_CREDITS,
SRC_WARE_ITEMS,
SRC_WARE_VEHICLE,
SRC_WARE_TANKMAN)
SRC_WARE_MONEY_KINDS = (SRC_WARE_GOLD, SRC_WARE_CREDITS)
_SRC_WARE_KIND_MASK = 3840
DST_WARE_GOLD = 0
DST_WARE_CREDITS = 4096
DST_WARE_KINDS = (DST_WARE_GOLD, DST_WARE_CREDITS)
_DST_WARE_KIND_MASK = 61440
def makeOfferFlags(offerKind, srcWareKind, dstWareKind, srcEntityType, dstEntityType):
return offerKind | srcWareKind | dstWareKind | srcEntityType | dstEntityType << 3
ParsedOfferFlags = namedtuple('ParsedOfferFlags', 'offerKind srcWareKind dstWareKind srcEntityType dstEntityType')
def parseOfferFlags(flags):
raw = (flags & _OFFER_KIND_MASK,
flags & _SRC_WARE_KIND_MASK,
flags & _DST_WARE_KIND_MASK,
flags & 7,
flags >> 3 & 7)
return ParsedOfferFlags._make(raw)
def parseSrcEntityTypeFromFlags(flags):
return flags & 7
def parseDstEntityTypeFromFlags(flags):
return flags >> 3 & 7
class OutOffers(object):
Offer = namedtuple('Offer', 'flags dstDBID dstName srcWares dstWares validTill fee')
def __init__(self, offersDict, outWriterGetter = None):
offersDict.setdefault('nextID', 0)
offersDict.setdefault('done', {})
offersDict.setdefault('out', {})
self.__data = offersDict
self.__outWriter = outWriterGetter if outWriterGetter is not None else _WriterGetter(offersDict['out'])
return
def __getitem__(self, offerID):
return _makeOutOffer(self.__data['out'][offerID])
def get(self, offerID):
offer = self.__data['out'].get(offerID)
if offer is not None:
return _makeOutOffer(offer)
else:
return
def getExt(self, offerID, default = None):
outExt = self.__data.get('outExt')
if outExt is None:
return default
else:
return outExt.get(offerID, default)
def items(self):
return [ (id, _makeOutOffer(data)) for id, data in self.__data['out'].iteritems() ]
def clear(self):
self.__data['out'].clear()
self.__data['done'].clear()
self.__data.pop('outExt', None)
self.__data['nextID'] += 1
return
def count(self):
return len(self.__data['out'])
def doneOffers(self):
return self.__data['done']
def timedOutOffers(self):
res = []
currTime = int(time.time())
for offerID, offer in self.__data['out'].iteritems():
if offer[5] <= currTime:
res.append(offerID)
return res
def inventorySlots(self):
vehs = []
numTmen = 0
for offer in self.__data['out'].itervalues():
srcWareKind = offer[0] & _SRC_WARE_KIND_MASK
if srcWareKind == SRC_WARE_VEHICLE:
vehs.append(offer[3][0])
elif srcWareKind == SRC_WARE_TANKMAN:
numTmen += 1
return (vehs, numTmen)
def moveToDone(self, offerID):
data = self.__data
data['done'][offerID] = self.__outWriter().pop(offerID)
outExt = data.get('outExt')
if outExt is not None:
outExt.pop(offerID, None)
data['nextID'] += 1
return len(data['done'])
def remove(self, offerID):
if self.__outWriter().pop(offerID, None) is not None:
self.__data['nextID'] += 1
outExt = self.__data.get('outExt')
if outExt is not None:
outExt.pop(offerID, None)
return
def removeDone(self, offerID):
self.__data['done'].pop(offerID, None)
return
def updateDestination(self, offerID, dstEntityType, dstEntityDBID, dstEntityName):
raise self.__data['out'][offerID][1] == dstEntityDBID or AssertionError
def createOffer(self, flags, srcDBID, srcName, dstDBID, dstName, validSec, srcWares, srcFee, dstWares, dstFee, ext = None):
currTime = int(time.time())
validTill = currTime + int(validSec)
offer = (flags,
dstDBID,
dstName,
srcWares,
dstWares,
validTill,
srcFee)
data = self.__data
offerID = ((currTime & 1048575) << 12) + (data['nextID'] & 4095)
data['nextID'] += 1
if not (offerID not in data['out'] and offerID not in data['done']):
raise AssertionError
self.__outWriter()[offerID] = offer
data.setdefault('outExt', {})[offerID] = ext is not None and ext
return (offerID, (offerID,
flags,
srcDBID,
srcName,
srcWares,
dstWares,
validTill,
dstFee))
class InOffers(object):
Offer = namedtuple('Offer', 'srcOfferID flags srcDBID srcName srcWares dstWares validTill fee')
def __init__(self, offersDict, inWriterGetter = None):
offersDict.setdefault('nextID', 0)
offersDict.setdefault('in', {})
self.__data = offersDict
self.__inWriter = inWriterGetter if inWriterGetter is not None else _WriterGetter(offersDict['in'])
return
def __getitem__(self, offerID):
return _makeInOffer(self.__data['in'][offerID])
def get(self, offerID):
offer = self.__data['in'].get(offerID)
if offer is not None:
return _makeInOffer(offer)
else:
return
def items(self):
return [ (id, _makeOutOffer(data)) for id, data in self.__data['in'].iteritems() ]
def clear(self):
self.__data['in'].clear()
self.__data['nextID'] += 1
def count(self):
return len(self.__data['in'])
def timedOutOffers(self):
res = []
currTime = int(time.time())
for offerID, offer in self.__data['in'].iteritems():
if offer[6] <= currTime:
res.append(offerID)
return res
def findOfferBySource(self, srcEntityType, srcEntityDBID, srcOfferID):
for inOfferID, offer in self.__data['in'].iteritems():
if offer[0] == srcOfferID and offer[2] == srcEntityDBID and parseSrcEntityTypeFromFlags(offer[1]) == srcEntityType:
return inOfferID
return None
def add(self, offer):
data = self.__data
offerID = data['nextID']
data['nextID'] += 1
self.__inWriter()[offerID] = tuple(offer)
return offerID
def remove(self, offerID):
if self.__inWriter().pop(offerID, None) is not None:
self.__data['nextID'] += 1
return
def collectOutOfferResults(outOffer):
offerFlags = parseOfferFlags(outOffer.flags)
gold = 0
credits = 0
items = None
if offerFlags.srcWareKind == SRC_WARE_GOLD:
gold -= outOffer.srcWares + outOffer.fee
elif offerFlags.srcWareKind == SRC_WARE_CREDITS:
credits -= outOffer.srcWares + outOffer.fee
else:
items = outOffer.srcWares
if offerFlags.dstWareKind == DST_WARE_GOLD:
gold += outOffer.dstWares
else:
credits += outOffer.dstWares
return (offerFlags,
gold,
credits,
items)
def collectInOfferResults(inOffer):
offerFlags = parseOfferFlags(inOffer.flags)
gold = 0
credits = 0
items = None
if offerFlags.srcWareKind == SRC_WARE_GOLD:
gold += inOffer.srcWares
elif offerFlags.srcWareKind == SRC_WARE_CREDITS:
credits += inOffer.srcWares
else:
items = inOffer.srcWares
if offerFlags.dstWareKind == DST_WARE_GOLD:
gold -= inOffer.dstWares + inOffer.fee
else:
credits -= inOffer.dstWares + inOffer.fee
return (offerFlags,
gold,
credits,
items)
_makeOutOffer = OutOffers.Offer._make
_makeInOffer = InOffers.Offer._make
class _WriterGetter(object):
def __init__(self, dict):
self.__d = dict
def __call__(self):
return self.__d
| [
"[email protected]"
] | |
ede12f3384950d410a2e5b5c0bb5ba2b28076ac9 | 6c67e2ae195521910fd3d8180fc5a70b9f60db81 | /controllers/utils/rtsq_library/rtsq_level.py | bbe95532b2799521638fa5f25075270c273de949 | [
"MIT"
] | permissive | zeroday0619/Real-Time-Delivery-Query-API | be8b7f0cd74e6c8651fc034064f51e6ec20bac17 | fc2f973c205fe453f77ae27dcd99ce3c2e84528d | refs/heads/master | 2020-09-08T01:43:08.857874 | 2019-11-17T22:32:44 | 2019-11-17T22:32:44 | 220,975,056 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 810 | py | def level(resp):
"""
Args:
resp:
level:
string
Returns:
[level 1: 배송준비중, 2: 집화완료, 3: 배송중, 4: 지점 도착, 5: 배송출발, 6:배송 완료]
"""
if resp['level'] == 1:
return {
"code": 1,
"level": "배송 준비중"
}
elif resp['level'] == 2:
return {
"code": 2,
"level": "집화 완료"
}
elif resp['level'] == 3:
return {
"code": 3,
"level": "배송중"
}
elif resp['level'] == 4:
return {
"code": 4,
"level": "지점 도착"
}
elif resp['level'] == 5:
return {
"code": 5,
"level": "배송 출발"
}
elif resp['level'] == 6:
return {
"code": 6,
"level": "배송 완료"
}
else:
return {
"code": 0,
"level": "Internal System Error"
}
| [
"[email protected]"
] | |
3b5723e132a7e8f7a265ee90af5a94bd78032635 | cccabd5a16b9e230bbf8379b4f8d42a64f0f2608 | /pysweng/tests/test_oop.py | 8a4fc0b20b9e6804472a681a7c45f97ba0f8afaf | [
"MIT"
] | permissive | lopezpdvn/pysweng | 75bef93803c15cdf0859c6fefcee2693fb011364 | af28b5454385db5314876dde37f2c2bc18731734 | refs/heads/master | 2021-01-18T23:42:55.054505 | 2016-12-30T09:43:18 | 2016-12-30T09:43:18 | 55,115,536 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 478 | py | import unittest
from pysweng.oop import (dummy_function, DUMMY_GLOBAL_CONSTANT_0,
DUMMY_GLOBAL_CONSTANT_1)
class TestDummies(unittest.TestCase):
def test_global_variables(self):
self.assertEqual(DUMMY_GLOBAL_CONSTANT_0, 'FOO')
self.assertEqual(DUMMY_GLOBAL_CONSTANT_1, 'BAR')
def test_dummy_funcion(self):
self.assertEqual(dummy_function('a'), 'a');
self.assertEqual(dummy_function(555), 555);
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
78c29f84ffce566ea51e7c3404a5822445abcd29 | 230159b8ec7f83369cd5fb04623f901908aaf73d | /src/hackerrank/algo/implementation/kangaroo.py | 33151a6d503e1f4f7182f49c698990759b49d8dd | [] | no_license | nikhilkuria/algo | e006c50c880df0fae882db9bb92d1a08eff36a97 | 1981d6101f345f6ea0bd0da002c6e4e45f6f4523 | refs/heads/master | 2021-01-17T20:16:16.612384 | 2018-06-27T07:36:56 | 2018-06-27T07:36:56 | 60,084,240 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 588 | py | #!/bin/python3
import math
import os
import random
import re
import sys
# Complete the kangaroo function below.
def kangaroo(x1, v1, x2, v2):
kangaroo_one_pos = x1
kangaroo_two_pos = x2
while True:
if kangaroo_one_pos == kangaroo_two_pos:
return "YES"
if kangaroo_one_pos > kangaroo_two_pos and v1 >= v2:
break
if kangaroo_two_pos > kangaroo_one_pos and v2 >= v1:
break
kangaroo_one_pos = kangaroo_one_pos + v1
kangaroo_two_pos = kangaroo_two_pos + v2
return "NO"
print(kangaroo(0,2,5,3))
| [
"[email protected]"
] | |
b10bd3e6fce28ba55ca234a9dcb7dd608cd4763a | 0de115b69243361e7926d0a5400c1fb475a642f5 | /4.5.4 CodingExercise2.py | 7769a572921fc132cf0a40d0db1879e526643fc9 | [] | no_license | Bill-Fujimoto/Intro-to-Python-Course | f475f1c578e33ac37a796038fdaa6ad247876c55 | afe365b0233c4fadb78b2818164ab5726ecd92bb | refs/heads/master | 2020-04-12T21:19:08.688112 | 2018-12-21T21:50:09 | 2018-12-21T21:50:09 | 162,759,968 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,977 | py | #Recall last exercise that you wrote a function, word_lengths,
#which took in a string and returned a dictionary where each
#word of the string was mapped to an integer value of how
#long it was.
#
#This time, write a new function called length_words so that
#the returned dictionary maps an integer, the length of a
#word, to a list of words from the sentence with that length.
#If a word occurs more than once, add it more than once. The
#words in the list should appear in the same order in which
#they appeared in the sentence.
#
#For example:
#
# length_words("I ate a bowl of cereal out of a dog bowl today.")
# -> {3: ['ate', 'dog', 'out'], 1: ['a', 'a', 'i'],
# 5: ['today'], 2: ['of', 'of'], 4: ['bowl'], 6: ['cereal']}
#
#As before, you should remove any punctuation and make the
#string lowercase.
#
#Hint: To create a new list as the value for a dictionary key,
#use empty brackets: lengths[wordLength] = []. Then, you would
#be able to call lengths[wordLength].append(word). Note that
#if you try to append to the list before creating it for that
#key, you'll receive a KeyError.
#Write your function here!
def length_words(string):
to_replace = ".,'!?"
for mark in to_replace:
string = string.replace(mark, "")
string=string.lower()
word_list=string.split()
len_words={}
for word in word_list:
if not len(word)in len_words:
len_words[len(word)] = []
len_words[len(word)].append(word)
return len_words
#Below are some lines of code that will test your function.
#You can change the value of the variable(s) to test your
#function with different inputs.
#
#If your function works correctly, this will originally
#print:
#{1: ['i', 'a', 'a'], 2: ['of', 'of'], 3: ['ate', 'out', 'dog'], 4: ['bowl', 'bowl'], 5: ['today'], 6: ['cereal']}
#
#The keys may appear in a different order, but within each
#list the words should appear in the order shown above.
print(length_words("I ate a bowl of cereal out of a dog bowl today."))
| [
"@vfr1200f1#"
] | @vfr1200f1# |
8015fef8dfd115d1d50b8421196c5d64d05910a8 | 1e88ef7359fc4a6bb4c8d0886971086e14124f15 | /models/CaptionModel.py | 19eb207e6466770b198a0f484cc6e30c9fc8e6be | [] | no_license | sunyuxi/RobustChangeCaptioning | 2e95e6b2e36adce0e2603be0003d28b3431a323d | c3ea1206a34cae8879a2accffc11c15b8fce0181 | refs/heads/master | 2023-08-17T16:02:22.527198 | 2021-08-19T20:55:44 | 2021-08-19T20:55:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,359 | py | # This file contains ShowAttendTell and AllImg model
# ShowAttendTell is from Show, Attend and Tell: Neural Image Caption Generation with Visual Attention
# https://arxiv.org/abs/1502.03044
# AllImg is a model where
# img feature is concatenated with word embedding at every time step as the input of lstm
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from functools import reduce
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import *
class CaptionModel(nn.Module):
def __init__(self):
super(CaptionModel, self).__init__()
# implements beam search
# calls beam_step and returns the final set of beams
# augments log-probabilities with diversity terms when number of groups > 1
def forward(self, *args, **kwargs):
mode = kwargs.get('mode', 'forward')
if 'mode' in kwargs:
del kwargs['mode']
return getattr(self, '_'+mode)(*args, **kwargs)
def beam_search(self, init_state, init_logprobs, *args, **kwargs):
# function computes the similarity score to be augmented
def add_diversity(beam_seq_table, logprobsf, t, divm, diversity_lambda, bdash):
local_time = t - divm
unaug_logprobsf = logprobsf.clone()
for prev_choice in range(divm):
prev_decisions = beam_seq_table[prev_choice][local_time]
for sub_beam in range(bdash):
for prev_labels in range(bdash):
logprobsf[sub_beam][prev_decisions[prev_labels]] = logprobsf[sub_beam][prev_decisions[prev_labels]] - diversity_lambda
return unaug_logprobsf
# does one step of classical beam search
def beam_step(logprobsf, unaug_logprobsf, beam_size, t, beam_seq, beam_seq_logprobs, beam_logprobs_sum, state):
#INPUTS:
#logprobsf: probabilities augmented after diversity
#beam_size: obvious
#t : time instant
#beam_seq : tensor contanining the beams
#beam_seq_logprobs: tensor contanining the beam logprobs
#beam_logprobs_sum: tensor contanining joint logprobs
#OUPUTS:
#beam_seq : tensor containing the word indices of the decoded captions
#beam_seq_logprobs : log-probability of each decision made, same size as beam_seq
#beam_logprobs_sum : joint log-probability of each beam
ys,ix = torch.sort(logprobsf,1,True)
candidates = []
cols = min(beam_size, ys.size(1))
rows = beam_size
if t == 0:
rows = 1
for c in range(cols): # for each column (word, essentially)
for q in range(rows): # for each beam expansion
#compute logprob of expanding beam q with word in (sorted) position c
local_logprob = ys[q,c].item()
candidate_logprob = beam_logprobs_sum[q] + local_logprob
local_unaug_logprob = unaug_logprobsf[q,ix[q,c]]
candidates.append({'c':ix[q,c], 'q':q, 'p':candidate_logprob, 'r':local_unaug_logprob})
candidates = sorted(candidates, key=lambda x: -x['p'])
new_state = [_.clone() for _ in state]
#beam_seq_prev, beam_seq_logprobs_prev
if t >= 1:
#we''ll need these as reference when we fork beams around
beam_seq_prev = beam_seq[:t].clone()
beam_seq_logprobs_prev = beam_seq_logprobs[:t].clone()
for vix in range(beam_size):
v = candidates[vix]
#fork beam index q into index vix
if t >= 1:
beam_seq[:t, vix] = beam_seq_prev[:, v['q']]
beam_seq_logprobs[:t, vix] = beam_seq_logprobs_prev[:, v['q']]
#rearrange recurrent states
for state_ix in range(len(new_state)):
# copy over state in previous beam q to new beam at vix
new_state[state_ix][:, vix] = state[state_ix][:, v['q']] # dimension one is time step
#append new end terminal at the end of this beam
beam_seq[t, vix] = v['c'] # c'th word is the continuation
beam_seq_logprobs[t, vix] = v['r'] # the raw logprob here
beam_logprobs_sum[vix] = v['p'] # the new (sum) logprob along this beam
state = new_state
return beam_seq,beam_seq_logprobs,beam_logprobs_sum,state,candidates
# Start diverse_beam_search
cfg = kwargs['cfg']
gpu_ids = cfg.gpu_id
device = torch.device("cuda:%d" % gpu_ids[0])
beam_size = cfg.model.speaker.get('beam_size', 10)
group_size = cfg.model.speaker.get('group_size', 1)
diversity_lambda = cfg.model.speaker.get('diversity_lambda', 0.5)
decoding_constraint = cfg.model.speaker.get('decoding_constraint', 0)
max_ppl = cfg.model.speaker.get('max_ppl', 0)
bdash = beam_size // group_size # beam per group
# INITIALIZATIONS
beam_seq_table = [torch.LongTensor(self.seq_length, bdash).zero_() for _ in range(group_size)]
beam_seq_logprobs_table = [torch.FloatTensor(self.seq_length, bdash).zero_() for _ in range(group_size)]
beam_logprobs_sum_table = [torch.zeros(bdash) for _ in range(group_size)]
# logprobs # logprobs predicted in last time step, shape (beam_size, vocab_size)
done_beams_table = [[] for _ in range(group_size)]
state_table = [list(torch.unbind(_)) for _ in torch.stack(init_state).chunk(group_size, 2)]
logprobs_table = list(init_logprobs.chunk(group_size, 0))
# END INIT
# Chunk elements in the args
args = list(args)
args = [_.chunk(group_size) if _ is not None else [None]*group_size for _ in args]
args = [[args[i][j] for i in range(len(args))] for j in range(group_size)]
for t in range(self.seq_length + group_size - 1):
for divm in range(group_size):
if t >= divm and t <= self.seq_length + divm - 1:
# add diversity
logprobsf = logprobs_table[divm].data.float()
# suppress previous word
if decoding_constraint and t-divm > 0:
logprobsf.scatter_(1, beam_seq_table[divm][t-divm-1].unsqueeze(1).to(device), float('-inf'))
# suppress UNK tokens in the decoding (here <UNK> has an index of 1)
logprobsf[:, 1] = logprobsf[:, 1] - 1000
# diversity is added here
# the function directly modifies the logprobsf values and hence, we need to return
# the unaugmented ones for sorting the candidates in the end. # for historical
# reasons :-)
unaug_logprobsf = add_diversity(beam_seq_table,logprobsf,t,divm,diversity_lambda,bdash)
# infer new beams
beam_seq_table[divm],\
beam_seq_logprobs_table[divm],\
beam_logprobs_sum_table[divm],\
state_table[divm],\
candidates_divm = beam_step(logprobsf,
unaug_logprobsf,
bdash,
t-divm,
beam_seq_table[divm],
beam_seq_logprobs_table[divm],
beam_logprobs_sum_table[divm],
state_table[divm])
# if time's up... or if end token is reached then copy beams
for vix in range(bdash):
if beam_seq_table[divm][t-divm,vix] == 0 or t == self.seq_length + divm - 1:
final_beam = {
'seq': beam_seq_table[divm][:, vix].clone(),
'logps': beam_seq_logprobs_table[divm][:, vix].clone(),
'unaug_p': beam_seq_logprobs_table[divm][:, vix].sum().item(),
'p': beam_logprobs_sum_table[divm][vix].item()
}
if max_ppl:
final_beam['p'] = final_beam['p'] / (t-divm+1)
done_beams_table[divm].append(final_beam)
# don't continue beams from finished sequences
beam_logprobs_sum_table[divm][vix] = -1000
# move the current group one step forward in time
it = beam_seq_table[divm][t-divm]
logprobs_table[divm], state_table[divm] = self.get_logprobs_state(it.to(device), *(args[divm] + [state_table[divm]]))
# all beams are sorted by their log-probabilities
done_beams_table = [sorted(done_beams_table[i], key=lambda x: -x['p'])[:bdash] for i in range(group_size)]
done_beams = reduce(lambda a,b:a+b, done_beams_table)
return done_beams
| [
"[email protected]"
] | |
7782690aa20fda3d04ab9b554b0255a2f03efd70 | 1e1a5b0e7c939ef9b2aafb3a7c86a1b78d1d014e | /GUI/viewRecord.py | c5d4ef1aab6674c4511dc52fe5d499529a1c34f9 | [] | no_license | SAR2652/MedRec | deac530534974e64d1e204620a58cde02c27804b | b0c0bdf34d67bb115f6bb7379cc6c8ade51e1117 | refs/heads/master | 2020-04-24T11:37:21.954485 | 2019-07-25T07:26:35 | 2019-07-25T07:26:35 | 171,931,435 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 770 | py | import sys
from PyQt5.QtWidgets import QWidget, QListWidget, QLabel, QComboBox
from PyQt5.QtGui import QFont
from PyQt5.QtCore import QUrl
path = 'C:/MedRec'
sys.path.append(path + '/GUI/')
from autocompletecombo import Autocomplete
class ViewRecord(QWidget):
def __init__(self, parent = None):
super(ViewRecord, self).__init__(parent)
self.initViewRecordUI()
def initViewRecordUI(self):
self.setGeometry(525, 225, 1080, 720)
#initialize labels
self.patient_name_label = QLabel('Patient Name : ', self)
self.case_name_label = QLabel('Case Name : ', self)
#initialize fields
self.patient_name_entry = Autocomplete(self)
self.case_name_entry = Autocomplete(self)
#initi
| [
"[email protected]"
] | |
38cdbaf54cca763167f5f61a21d94d207591b3a2 | 5485c26474df9c7f68d94976fae45da5f0091c3c | /auto_feature.py | de319e49c20d33cfabc61c32af47395ae90da9f0 | [] | no_license | CheneyYin/Motor | ecaab18e084ed4083c9ccb980a2d9b4310bf0637 | f3009e0335a9a70d5299b3814f7df4f43b03eff4 | refs/heads/master | 2020-05-07T12:40:15.447944 | 2019-08-12T03:28:22 | 2019-08-12T03:28:22 | 180,515,434 | 1 | 2 | null | 2019-04-17T07:00:08 | 2019-04-10T06:21:22 | Python | UTF-8 | Python | false | false | 627 | py | import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
import tsfresh.feature_extraction.feature_calculators as fc
import matplotlib.pyplot as plt
import warnings
train_path1 = '../Motor-Data/Motor_tain/N/00aab5a5-e096-4e4e-803f-a8525506cbd8_F.csv'
train_path1 = '../Motor-Data/Motor_tain/N/00aab5a5-e096-4e4e-803f-a8525506cbd8_B.csv'
df1 = pd.read_csv(train_path1, header = 0)
df2 = pd.read_csv(train_path2, header = 0)
df = pd.DataFrame(data = np.column_stack([df1['ai1'],df1['ai2'], df2['ai1'], df2['ai2'], range(79999), '1']), columns = ['F_ai1','F_ai2', 'B_ai1', 'B_ai2', 'time', 'id'])
| [
"[email protected]"
] | |
ac0bc0f07ccc5cf690d123d9225d15656bbe59e7 | 4c7aac98eff82b6dc82334755096df5ad00237e6 | /Python/menu.py | 66e3ba4c5b15a961c7e3ea0fd84e0ebe95f018a3 | [] | no_license | HolbertonSchoolTun/HackDay_mastermind | 05fe07993f322384a1c2c644c7ad80441161ef8e | 92c5bbb0d01bae8dfaae3015195db6f33942c5a5 | refs/heads/master | 2022-12-24T04:42:43.966128 | 2020-09-19T02:35:39 | 2020-09-19T02:35:39 | 296,698,599 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 895 | py | #!/usr/bin/python3
"""
"""
import pygame
import pygame_menu
from main import start_game
class Game():
pygame.init()
surface = pygame.display.set_mode((450, 600))
def set_difficulty(value, difficulty):
if value == 1:
return(1)
else:
return (2)
def start_the_game():
# Do the job here !
start_game()
def Play_Mode(mode, value):
pass
pygame.display.set_caption("Mastermind")
menu = pygame_menu.Menu(600, 450, 'MasterMind',
theme=pygame_menu.themes.THEME_DARK)
menu.add_selector('Difficulty : ', [('Hard', 1), ('Easy', 2)], onchange=set_difficulty)
menu.add_selector('Play Mode : ', [('Single Player', 1), ('Two Players', 2)], onchange=Play_Mode)
menu.add_button('Play', start_the_game)
menu.add_button('Quit', pygame_menu.events.EXIT)
menu.mainloop(surface) | [
"achrefbs"
] | achrefbs |
6e1066a32d3b678c93a683c91c32ca9925549774 | 72d010d00355fc977a291c29eb18aeb385b8a9b0 | /MPK261/__init__.py | 1878e1129184af07da8510e9e370e01adae46916 | [] | no_license | maratbakirov/AbletonLive10_MIDIRemoteScripts | bf0749c5c4cce8e83b23f14f671e52752702539d | ed1174d9959b20ed05fb099f0461bbc006bfbb79 | refs/heads/master | 2021-06-16T19:58:34.038163 | 2021-05-09T11:46:46 | 2021-05-09T11:46:46 | 203,174,328 | 0 | 0 | null | 2019-08-19T13:04:23 | 2019-08-19T13:04:22 | null | UTF-8 | Python | false | false | 741 | py | # Embedded file name: /Users/versonator/Jenkins/live/output/mac_64_static/Release/python-bundle/MIDI Remote Scripts/MPK261/__init__.py
# Compiled at: 2018-04-23 20:27:04
from __future__ import absolute_import, print_function, unicode_literals
from .MPK261 import MPK261
from _Framework.Capabilities import controller_id, inport, outport, CONTROLLER_ID_KEY, PORTS_KEY, NOTES_CC, SCRIPT, REMOTE
def get_capabilities():
return {CONTROLLER_ID_KEY: controller_id(vendor_id=2536, product_ids=[
37], model_name='MPK261'),
PORTS_KEY: [
inport(props=[NOTES_CC, SCRIPT, REMOTE]),
outport(props=[SCRIPT, REMOTE])]}
def create_instance(c_instance):
return MPK261(c_instance)
| [
"[email protected]"
] | |
8021537043bd81114de1c88e600fa679c9929fbe | db9dd14e4f5acc3f8ab1e2d6abc296489a896a23 | /factor_catalog.py | 84e055cc211f31175e83a306e32f02b5f901ebfd | [] | no_license | IVRL/GANLocalEditing | 78696cbe052b1060bd3a5ccda3556d53ff0ddf9e | 4c87c1fb332113f38fc4e5ff7424b9655ca0e187 | refs/heads/master | 2021-04-24T12:42:04.789011 | 2020-05-02T17:43:17 | 2020-05-02T20:39:56 | 250,119,837 | 155 | 18 | null | null | null | null | UTF-8 | Python | false | false | 1,869 | py | '''
To download pickled instances for FFHQ and LSUN-Bedrooms, visit: https://drive.google.com/open?id=1GYzEzOCaI8FUS6JHdt6g9UfNTmpO08Tt
'''
import torch
import ptutils
from spherical_kmeans import MiniBatchSphericalKMeans
def one_hot(a, n):
import numpy as np
b = np.zeros((a.size, n))
b[np.arange(a.size), a] = 1
return b
class FactorCatalog:
def __init__(self, k, random_state=0, factorization=None, **kwargs):
if factorization is None:
factorization = MiniBatchSphericalKMeans
self._factorization = factorization(n_clusters=k, random_state=random_state, **kwargs)
self.annotations = {}
def _preprocess(self, X):
X_flat = ptutils.partial_flat(X)
return X_flat
def _postprocess(self, labels, X, raw):
heatmaps = torch.from_numpy(one_hot(labels, self._factorization.cluster_centers_.shape[0])).float()
heatmaps = ptutils.partial_unflat(heatmaps, N=X.shape[0], H=X.shape[-1])
if raw:
heatmaps = ptutils.MultiResolutionStore(heatmaps, 'nearest')
return heatmaps
else:
heatmaps = ptutils.MultiResolutionStore(torch.cat([(heatmaps[:, v].sum(1, keepdim=True)) for v in
self.annotations.values()], 1), 'nearest')
labels = list(self.annotations.keys())
return heatmaps, labels
def fit_predict(self, X, raw=False):
self._factorization.fit(self._preprocess(X))
labels = self._factorization.labels_
return self._postprocess(labels, X, raw)
def predict(self, X, raw=False):
labels = self._factorization.predict(self._preprocess(X))
return self._postprocess(labels, X, raw)
def __repr__(self):
header = '{} catalog:'.format(type(self._factorization))
return '{}\n\t{}'.format(header, self.annotations)
| [
"[email protected]"
] | |
2695f532057b561bf9fbf9c8c1505f68f8c04fb4 | 5d03ee41677bbe4c37a873932f4e2ca63cb50df1 | /gae.sonstige.d/gae.mariahilferstrasse.d/gdata_samples.py | db7a574db198ef71ff3d35ffe6a27715b837f2a3 | [] | no_license | wolfhesse/saikogallery | 159acc1bab431070e8156da8d355e9e51ec0d4ac | f719f29be54d1e2190f3c841ddeeb58997aa555a | refs/heads/master | 2016-09-06T00:41:58.012920 | 2013-05-23T22:11:13 | 2013-05-23T22:11:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,390 | py | import pickle
import gdata.spreadsheet.text_db
client = gdata.spreadsheet.text_db.DatabaseClient()
client.SetCredentials('wolfgang.schuessel','iybnrxaseld')
#client.SetCredentials('ohramweltgeschehen','kidman')
databases=client.GetDatabases(name='imported-from-query')
tables=databases[0].GetTables(name='mhs')
target=tables[0]
source=tables[1]
print 'target table is ' + target.name
print 'source table is ' + source.name
databases=client.GetDatabases(name='geo20080813')
db=databases[0]
tables=db.GetTables(name='')
table=tables[0]
records=table.GetRecords(1,100)
print [r.content for r in records]
print [r.content for r in records if r.content['pickled']!=None]
ap=[r.content['pickled'] for r in records]
print len(ap)
print ap
au=[pickle.loads(i) for i in ap]
print au
#['', '', {'test': 'true', 'name': 'show'}, '', {'hausnummer': 5, 'has_content': False}, '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', {'items': {'lokal': 'Asia Cooking'}, 'wifi': True}, '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '']
print len(au)
#50
for i in range(0,len(au)):
print i,au[i]
print records[30].content
#{'fundstelle': 'TRUE', 'hausnummer': '31', 'pickled': "(dp0\nS'items'\np1\n(dp2\nS'lokal'\np3\nS'Asia Cooking'\np4\nssS'wifi'\np5\nI01\ns.", 'address': 'mariahilferstrasse 31 wien', 'name': 'mhs:31'}
| [
"[email protected]"
] | |
f7d9aea052dd03a9baf3a059a9a907746703c781 | a4d4e2c99b1ee5e8045bfbf55949ea5ae34ae371 | /Jobb/Jobbtider.spec | fab167111d3db9b7dc2170e30bf9f2712feb6021 | [] | no_license | NBerlin/LearningPython | 87ee01633a69d719ce79df0177b3740305569621 | 8d59f9dee34beb712160a13b19c6a882e9b8755d | refs/heads/master | 2022-11-05T03:49:44.159119 | 2019-05-09T17:55:04 | 2019-05-09T17:55:04 | 124,292,605 | 0 | 0 | null | 2022-10-26T17:06:30 | 2018-03-07T20:47:59 | Python | UTF-8 | Python | false | false | 888 | spec | # -*- mode: python -*-
block_cipher = None
a = Analysis(['Jobbtider.py'],
pathex=['C:\\Users\\Nicki\\Documents\\Programmering\\LearnPython\\Jobb'],
binaries=[],
datas=[],
hiddenimports=[],
hookspath=[],
runtime_hooks=[],
excludes=[],
win_no_prefer_redirects=False,
win_private_assemblies=False,
cipher=block_cipher)
pyz = PYZ(a.pure, a.zipped_data,
cipher=block_cipher)
exe = EXE(pyz,
a.scripts,
exclude_binaries=True,
name='Jobbtider',
debug=False,
strip=False,
upx=True,
console=True )
coll = COLLECT(exe,
a.binaries,
a.zipfiles,
a.datas,
strip=False,
upx=True,
name='Jobbtider')
| [
"[email protected]"
] | |
5310941c8e4e3eab87b903780fb19e7edf078c70 | f5d1e8b54ddbc51a9ef1b868eee93096d9b0fbeb | /weapp/wapi/mall/__init__.py | 79511ca63e741640f660e8b960872f86ac13619a | [] | no_license | chengdg/weizoom | 97740c121724fae582b10cdbe0ce227a1f065ece | 8b2f7befe92841bcc35e0e60cac5958ef3f3af54 | refs/heads/master | 2021-01-22T20:29:30.297059 | 2017-03-30T08:39:25 | 2017-03-30T08:39:25 | 85,268,003 | 1 | 3 | null | null | null | null | UTF-8 | Python | false | false | 57 | py | # -*- coding: utf-8 -*-
import product
import promotion
| [
"[email protected]"
] | |
bb452e72141b555c7dd30f34a66fc3fe30f86fbd | 220a2a22f7ecbb960e6a09b1153ec5094aef15f5 | /Log-Parsers/Recognition_Long_Talks/general_classes.py | a374a5df875af86c516cbe3be40426c999673ee0 | [] | no_license | jrweis01/Rubidium | 89b27b8376891b42eb6b8bf952f70d92dd81768c | 6050241aa19401bd5196939aadfc4a095f771d0a | refs/heads/master | 2020-05-30T05:29:11.649283 | 2019-06-02T07:03:19 | 2019-06-02T07:03:19 | 189,561,081 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,156 | py | from templates_data import *
import openpyxl
import os
import sys
import shutil
import datetime
class Utils(object):
def fetch_files_from_folder(self, pathToFolder):
_pathToFiles = []
_fileNames = []
for dirPath, dirNames, fileNames in os.walk(pathToFolder):
selected_path = [os.path.join(dirPath, item) for item in fileNames]
_pathToFiles.extend(selected_path)
selectedFile = [item for item in fileNames]
_fileNames.extend(selectedFile)
# Try to remove empty entries if none of the required files are in directory
try:
_pathToFiles.remove('')
_imageFiles.remove('')
except ValueError:
pass
# Warn if nothing was found in the given path
if selectedFile == []:
print
'No files with given parameters were found in:\n', dirPath, '\n'
print
len(_fileNames), 'files were found is searched folder(s)'
return _pathToFiles, _fileNames
def get_excel_worksheet(self):
pass
@staticmethod
def insertion_sort(items):
for i in range(1, len(items)):
j = i
while j > 0 and items[j] > items[j - 1]:
items[j - 1], items[j] = items[j], items[j - 1]
j = j - 1
return items
def sort_order_dict(self,order_dict):
for key in order_dict:
items = order_dict[key]
items = self.insertion_sort(items)
def sorting_headers(self,sorting_dict,order_dict):
sorted_list = []
for m in order_dict["noise_file_name"]:
for i in order_dict["trig_to_ASR_delay"]:
for j in order_dict["signal_dB"]:
for k in order_dict["noise_dB"]:
for key in sorting_dict:
if (sorting_dict[key]["noise_file_name"] == str(m) and
sorting_dict[key]["trig_to_ASR_delay"] == str(int(i)) and
sorting_dict[key]["signal_dB"] == str(int(j)) and
sorting_dict[key]["noise_dB"] == str(int(k))):
sorted_list.append(key)
return sorted_list
def clear_dict_values(self,dict):
for key in dict:
dict[key].clear()
def get_folder_location_path(self,folder):
program_path = os.path.dirname(sys.argv[0])
template_path = program_path + '\\' + folder
return template_path
class ExcelHandler(object):
def __init__(self, workbook_name):
self.wb_name = workbook_name
self.wb_name_with_dt = self._creat_new_excel_from_template_with_name_and_datetime(workbook_name)
self.wb = openpyxl.load_workbook(str(self.wb_name_with_dt))
self.template_info = {}
self.template_indexes = {'TRIG_ONLY': 4, 'MP_mTRIG_sASR': 4 ,'LJ_sTRIG_mASR' : 4}
self.sheet_MP = None
self.sheet_trig_only = None
self.sheet_LJ_sTRIG_mASR = None
def run_log_printing_LJ_sTRIG_mASR(self,log_dict):
''' for 'LJ_sTRIG_mASR' SHEET TEMPLATE'''
asr_section = log_dict['asr_results_dict']
trig_section = log_dict['trig_results_dict_format']
if self.sheet_LJ_sTRIG_mASR is None:
self.sheet_LJ_sTRIG_mASR = self._open_sheet('LJ_sTRIG_mASR')
ROW = self.template_indexes['LJ_sTRIG_mASR']
''' printing header section'''
self._write_line_to_excel_sheet(self.sheet_LJ_sTRIG_mASR, ROW, 1, log_dict,EXCEL_LJ_sTRIG_mASR_TEMPLATE_HEADER_SECTION)
''' printing trig section'''
self._write_line_to_excel_sheet(self.sheet_LJ_sTRIG_mASR,ROW,27,trig_section,EXCEL_LJ_sTRIG_mASR_TEMPLATE_TRIG_SECTION)
''' printing asr section'''
cmd_template_order = ['volume_down' , 'volume_up' , 'next_song', 'pause' , 'resume', 'what_distance_have_i_done']
cmd_template_dict = {'volume_down': 'empty1.wav' , 'volume_up' : 'empty2.wav' , 'next_song' : 'empty3.wav', 'pause' : 'empty4.wav',
'resume' : 'empty5.wav' , 'what_distance_have_i_done' : 'empty6.wav'}
for command in cmd_template_order:
curr_key = cmd_template_dict[command]
if curr_key in asr_section.keys():
curr_cmd_dict = asr_section[curr_key]
self._write_line_to_excel_sheet(self.sheet_LJ_sTRIG_mASR, ROW, 10, curr_cmd_dict,
EXCEL_LJ_sTRIG_mASR_TEMPLATE_ASR_SECTION)
else:
pass
ROW += 1
self.template_indexes['LJ_sTRIG_mASR']+=6
def run_log_printing_TRIG_ONLY(self,log_dict,exl_tab_name):
''' for 'TRIG_ONLY' SHEET TEMPLATE'''
if self.sheet_trig_only is None:
self.sheet_trig_only = self._open_sheet(exl_tab_name)
ROW = self.template_indexes[exl_tab_name]
self._write_line_to_excel_sheet(self.sheet_trig_only,ROW,1,log_dict,EXCEL_TRIG_TEMPLATE_TUPLE)
self.template_indexes[exl_tab_name] += 1
def run_log_printing_TRIG_ASR_MP(self,log_dict):
''' for 'MP_mTrig_sASR' SHEET TEMPLATE'''
if self.sheet_MP is None:
self.sheet_MP = self._open_sheet("MP_mTRIG_sASR")
ROW = self.template_indexes["MP_mTRIG_sASR"]
self._write_line_to_excel_sheet(self.sheet_MP,ROW,1,log_dict,EXCEL_MP_CMD_TEMPLATE)
self.template_indexes['MP_mTRIG_sASR']+=1
def get_new_wb_name(self):
return self.wb_name_with_dt
def _creat_new_excel_from_template_with_name_and_datetime(self,project_name):
program_path = os.path.dirname(sys.argv[0])
template_path = program_path + '\\template\exl.xlsx'
shutil.copy2(str(template_path), str(program_path))
date_time = datetime.datetime.strftime(datetime.datetime.now(), '_%Y-%m-%d__%H_%M_%S')
exl_file_name = str(project_name) + str(date_time) + ".xlsx"
os.rename("exl.xlsx", str(exl_file_name))
return str(exl_file_name)
def _write_line_to_excel_sheet(self,sheet,row,column,val_dict,template_list):
row = str(row)
start_col = column
for i, key in enumerate(template_list):
col = self._num_to_excel_alphabeit_colms(i+start_col)
try:
# sheet[col + row] = str(val_dict[key])
sheet[col + row] = val_dict[key]
except : print key
def _open_sheet(self,sheet_name):
sheet = self.wb.get_sheet_by_name(sheet_name)
return sheet
def _num_to_excel_alphabeit_colms(self,index_num):
cal1 = index_num % 27
cal2 = index_num // 26
new = index_num - cal2 * 26
if new == 0:
new = 26
cal2 -= 1
if cal2:
mychar = chr(cal2 + 64) + chr(new + 64)
else:
mychar = chr(index_num + 64)
return mychar
def save_workbook(self):
self.wb.save(str(self.wb_name_with_dt))
| [
"[email protected]"
] | |
010ca186f50f28bb57286f398c214119a7c6dfd3 | 4a439662a39631095c75c6a76b88ca3d18f3fad5 | /logisticRegression.py | 9dab37ff36c2176f59125b8e6cc7a8c824057c80 | [] | no_license | allen9408/ICDM_Features | 9cdee93526f776954b5d2610cb8ba4e3bb8ea52c | 293d49f106bb18d93b6a894a10ddd4f3b0fdd27f | refs/heads/master | 2020-12-03T00:15:04.507551 | 2017-07-16T20:06:43 | 2017-07-16T20:06:43 | 96,002,817 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,627 | py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import tempfile
import os
from six.moves import urllib
import pandas as pd
import tensorflow as tf
from featureloader import featureloader
# load training features
train_data = featureloader('TRAIN', 'ECG5000')
df_train, feature_column = train_data.featureloader_UCR()
# df_train.to_csv('tmp_1.csv')
# load test training
test_data = featureloader('TEST', 'ECG5000')
df_test, feature_column = test_data.featureloader_UCR()
# df_test.to_csv('tmp_2.csv')
# remove \n in feature_column
feature_column[-1] = feature_column[-1].strip()
print(feature_column)
def input_fn(df, feature_column):
feature_cols = {k: tf.constant(df[k].values, shape=[df[k].size, 1]) for k in feature_column}
label = tf.constant(df["label"].values)
print(df["label"])
return feature_cols, label
def train_input_fn():
return input_fn(df_train, feature_column)
def eval_input_fn():
return input_fn(df_test, feature_column)
# crossed_columns = tf.contrib.layers.crossed_columns(feature_column)
index = 0
layer=[]
for feature in feature_column:
layer.append(tf.contrib.layers.real_valued_column(feature))
index+= 1
model_dir = tempfile.mkdtemp()
m = tf.contrib.learn.LinearClassifier(feature_columns=layer,
model_dir=model_dir)
# m = tf.contrib.learn.DNNClassifier(feature_columns=layer,
# model_dir=model_dir,
# hidden_units=[100,50])
m.fit(input_fn = train_input_fn, steps=200)
results = m.evaluate(input_fn=eval_input_fn, steps=1)
for key in sorted(results):
print("%s: %s" % (key, results[key])) | [
"[email protected]"
] | |
f11868c799a295320f785d89daea2d28092944a7 | 05824a52e2ca67db8b8d2bd21ece1a53dc5d23de | /code/configuration.py | 7e48afa3edbec5a7b1f8e4dc19656ad3e4e002e4 | [] | no_license | HankTsai/Sales_Forecast_Retailer | 65c19f77fdb3ac573abf9846dee46695e45c91ac | 07d7a37c4b3cc482765ae747fd1cfd9b96096dc1 | refs/heads/main | 2023-07-18T06:13:38.393562 | 2021-08-31T03:40:59 | 2021-08-31T03:40:59 | 378,896,470 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,577 | py |
import os
import logging
from pymssql import connect
from datetime import datetime
from configparser import ConfigParser
config = ConfigParser()
config.read('setting.ini')
class CodeLogger:
"""log儲存設定模組"""
def __init__(self):
self.logger = logging.getLogger(os.path.basename(__file__))
self.formatter = logging.Formatter(
'["%(asctime)s - %(levelname)s - %(name)s - %(message)s" - function:%(funcName)s - line:%(lineno)d]')
self.log_name = config['filepath']['log_path'] + datetime.now().strftime("forecast_%Y-%m-%d_%H-%M-%S.log")
logging.basicConfig(level=logging.INFO, datefmt='%Y%m%d_%H:%M:%S',)
def store_logger(self):
"""設定log儲存"""
handler = logging.FileHandler(self.log_name, "w", encoding = "UTF-8")
handler.setFormatter(self.formatter)
self.logger.addHandler(handler)
self.logger.propagate = False
def show_logger(self):
"""設定log在終端機顯示"""
console = logging.StreamHandler()
console.setLevel(logging.FATAL)
console.setFormatter(self.formatter)
self.logger.addHandler(console)
class DBConnect:
"""繼承並設計DB連線處理"""
def __init__(self):
self.host = config['connect']['server']
self.user = config['connect']['username']
self.password = config['connect']['password']
self.database = config['connect']['database']
self.conn = connect(host=self.host, user=self.user, password=self.password, database=self.database, autocommit=True)
def query(self, sql, as_dict=False, para=()):
"""查詢DB數據"""
# as_dict 是讓數據呈現key/value型態
try:
cursor = self.conn.cursor(as_dict)
if para:
cursor.execute(sql,para)
return cursor
else:
cursor.execute(sql)
return cursor
except Exception as me:
CodeLogger().logger.error(me)
def insert(self, sql, para=()):
"""新增DB數據"""
try:
cursor = self.conn.cursor()
cursor.execute(sql,para)
except Exception as me:
CodeLogger().logger.error(me)
def delete(self, sql, para=()):
"""刪除DB數據"""
try:
cursor = self.conn.cursor()
cursor.execute(sql,para)
except Exception as me:
CodeLogger().logger.error(me)
def commit(self):
self.conn.commit()
def close(self):
self.conn.close() | [
"[email protected]"
] | |
4d75a2fa3fbfcd227da641b06f2ce1f1a779e02e | 6a07912090214567f77e9cd941fb92f1f3137ae6 | /cs212/Unit 4/28.py | ae381957925468dc57906a2813b0cfd324dea8d0 | [] | no_license | rrampage/udacity-code | 4ab042b591fa3e9adab0183d669a8df80265ed81 | bbe968cd27da7cc453eada5b2aa29176b0121c13 | refs/heads/master | 2020-04-18T08:46:00.580903 | 2012-08-25T08:44:24 | 2012-08-25T08:44:24 | 5,352,942 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,983 | py | # cs212 ; Unit 4 ; 28
# -----------------
# User Instructions
#
# In this problem, you will generalize the bridge problem
# by writing a function bridge_problem3, that makes a call
# to lowest_cost_search.
def bridge_problem3(here):
"""Find the fastest (least elapsed time) path to
the goal in the bridge problem."""
# your code here
return lowest_cost_search() # <== your arguments here
# your code here if necessary
def lowest_cost_search(start, successors, is_goal, action_cost):
"""Return the lowest cost path, starting from start state,
and considering successors(state) => {state:action,...},
that ends in a state for which is_goal(state) is true,
where the cost of a path is the sum of action costs,
which are given by action_cost(action)."""
explored = set() # set of states we have visited
frontier = [ [start] ] # ordered list of paths we have blazed
while frontier:
path = frontier.pop(0)
state1 = final_state(path)
if is_goal(state1):
return path
explored.add(state1)
pcost = path_cost(path)
for (state, action) in successors(state1).items():
if state not in explored:
total_cost = pcost + action_cost(action)
path2 = path + [(action, total_cost), state]
add_to_frontier(frontier, path2)
return Fail
def final_state(path): return path[-1]
def path_cost(path):
"The total cost of a path (which is stored in a tuple with the final action)."
if len(path) < 3:
return 0
else:
action, total_cost = path[-2]
return total_cost
def add_to_frontier(frontier, path):
"Add path to frontier, replacing costlier path if there is one."
# (This could be done more efficiently.)
# Find if there is an old path to the final state of this path.
old = None
for i,p in enumerate(frontier):
if final_state(p) == final_state(path):
old = i
break
if old is not None and path_cost(frontier[old]) < path_cost(path):
return # Old path was better; do nothing
elif old is not None:
del frontier[old] # Old path was worse; delete it
## Now add the new path and re-sort
frontier.append(path)
frontier.sort(key=path_cost)
def bsuccessors2(state):
"""Return a dict of {state:action} pairs. A state is a (here, there) tuple,
where here and there are frozensets of people (indicated by their times) and/or
the light."""
here, there = state
if 'light' in here:
return dict(((here - frozenset([a, b, 'light']),
there | frozenset([a, b, 'light'])),
(a, b, '->'))
for a in here if a is not 'light'
for b in here if b is not 'light')
else:
return dict(((here | frozenset([a, b, 'light']),
there - frozenset([a, b, 'light'])),
(a, b, '<-'))
for a in there if a is not 'light'
for b in there if b is not 'light')
def bcost(action):
"Returns the cost (a number) of an action in the bridge problem."
# An action is an (a, b, arrow) tuple; a and b are times; arrow is a string
a, b, arrow = action
return max(a, b)
def test():
here = [1, 2, 5, 10]
assert bridge_problem3(here) == [
(frozenset([1, 2, 'light', 10, 5]), frozenset([])),
((2, 1, '->'), 2),
(frozenset([10, 5]), frozenset([1, 2, 'light'])),
((2, 2, '<-'), 4),
(frozenset(['light', 10, 2, 5]), frozenset([1])),
((5, 10, '->'), 14),
(frozenset([2]), frozenset([1, 10, 5, 'light'])),
((1, 1, '<-'), 15),
(frozenset([1, 2, 'light']), frozenset([10, 5])),
((2, 1, '->'), 17),
(frozenset([]), frozenset([1, 10, 2, 5, 'light']))]
return 'test passes'
print test()
| [
"[email protected]"
] | |
92929d241384233660875a5731e7b8bdb4618600 | 75b1f503e695dd5251e00b6bd66207b99c9c83ff | /caesar_cipher.py | f4a48db54a62b7b6068e748444f02a88f468a015 | [] | no_license | rongoodbin/secret_messages | 2d3a4881b4f06a88ba777832eb1ae59202fb3725 | ff91786d4ef4f467e9a95c36df66b22641033424 | refs/heads/master | 2021-05-04T15:09:51.264542 | 2018-03-05T01:09:45 | 2018-03-05T01:09:45 | 120,221,827 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,324 | py | import string
from ciphers import Cipher
class Caesar(Cipher):
FORWARD = string.ascii_uppercase * 3
def __init__(self, keyword=None, offset=3):
self.offset = offset
self.FORWARD = string.ascii_uppercase + string.ascii_uppercase[:self.offset+1]
self.BACKWARD = string.ascii_uppercase[:self.offset+1] + string.ascii_uppercase
def encrypt(self, text):
output = []
text = text.upper()
for char in text:
try:
index = self.FORWARD.index(char)
except ValueError:
output.append(char)
else:
output.append(self.FORWARD[index+self.offset])
return ''.join(output)
def decrypt(self, text):
output = []
text = text.upper()
for char in text:
try:
index = self.BACKWARD.index(char)
except ValueError:
output.append(char)
else:
output.append(self.BACKWARD[index-self.offset])
return ''.join(output)
if __name__ == "__main__":
atbash = Caesar()
encrypted_text = atbash.encrypt("testing this code! 2pm")
print(encrypted_text)
decrypted_text = atbash.decrypt(encrypted_text)
print(decrypted_text)
| [
"[email protected]"
] | |
579153317b369ad77af1c66c5cb43036e863cc19 | 5be8b0f2ee392abeee6970e7a6364ac9a5b8ceaa | /xiaojian/second_phase/day12/http_sever2.0.py | 12ccde8198046391e24f9698efd843eacb0c011c | [] | no_license | Wellsjian/20180826 | 424b65f828f0174e4d568131da01dafc2a36050a | 0156ad4db891a2c4b06711748d2624080578620c | refs/heads/master | 2021-06-18T12:16:08.466177 | 2019-09-01T10:06:44 | 2019-09-01T10:06:44 | 204,462,572 | 0 | 1 | null | 2021-04-20T18:26:03 | 2019-08-26T11:38:09 | JavaScript | UTF-8 | Python | false | false | 3,467 | py | """
HTTP 2.0
接口设计:
1.提供句柄,通过句柄调用属性和方法
obj = open()
lock = Lock()
2.实例化对象,通过对象设置,启动服务
t = Thread()
p = Process()
3.根据功能需求,无法帮助用户决定的内容,通过参数传递
4.能够解决的问题,不要让用户去解决,需要用户解决的问题可以用重写的方法去解决
技术分析:
HTTP 协议
思路分析
1.使用类进行封装
2.从用户的角度决定代码的编写
"""
# 具体HTTP sever功能.
from socket import *
from select import *
class HTTPSever:
def __init__(self, host, port, dir):
self.addrss = (host, port)
self.host = host
self.port = port
self.dir = dir
self.rlist = []
self.wlist = []
self.xlist = []
self.create_socket()
self.bind()
# 创建套接字
def create_socket(self):
self.sockfd = socket()
self.sockfd.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
# 绑定地址
def bind(self):
self.sockfd.bind(self.addrss)
# 启动服务
def server_forver(self):
self.sockfd.listen(5)
print("listen the port %d" % self.port)
self.rlist.append(self.sockfd)
while True:
rs, ws, xs = select(self.rlist, self.wlist, self.xlist)
self.do_rlist(rs)
# 具体处理请求
def handle(self, connfd):
request = connfd.recv(1024)
if not request:
connfd.close()
self.rlist.remove(connfd)
return
# 提取请求内容
request_line = request.splitlines()[0]
info = request_line.decode().split(" ")[1]
print(connfd.getpeername(), ":", info)
if info == "/" or info[-5:] == ".html":
self.get_html(connfd, info)
else:
self.get_data(connfd,info)
def get_data(self,connfd,info):
response = "HTTP/1.1 200 ok\r\n"
response += "\r\n"
response += "<h1>Waiting for the HTTPSEVER 3.0<h1>"
connfd.send(response.encode())
def get_html(self,connfd,info):
if info == "/":
html_name = self.dir + "/index.html"
else:
html_name = self.dir + info
try:
obj = open(html_name)
except Exception:
response = "HTTP/1.1 404 not found\r\n"
response += "Content_Type:text/html\r\n"
response += "\r\n"
response += "<h1>sorry.....<h1>"
else:
response = "HTTP/1.1 200 OK\r\n"
response += "Content_Type:text/html\r\n"
response += "\r\n"
response += obj.read()
finally:
connfd.send(response.encode())
# 具体处理rlist里的监控信号
def do_rlist(self, rs):
for r in rs:
if r is self.sockfd:
connfd, addr = self.sockfd.accept()
print("Connect from ", addr)
self.rlist.append(connfd)
else:
self.handle(r)
if __name__ == "__main__":
# 希望通过HTTPSever类快速搭建http服务,用以展示自己的网页
# HOST = "0.0.0.0"
# PORT = 22222
# ADDR = (HOST, PORT)
# DIR = "./static"
HOST = "172.40.74.151"
PORT = 8888
DIR ="./hfklswn"
# 实例化对象
httpfd = HTTPSever(HOST, PORT, DIR)
# 启动HTTP服务
httpfd.server_forver()
| [
"[email protected]"
] | |
bc11e7f5741884920b6652765910f0a404da24b5 | 05d11b9cda35371669195e7c07e476dfb95ccaef | /triple_net_tensorboard_random_multiGpus/multi_gpu_demo.py | 16c2d8afe76dc5e507503e631d42c7c1f7fcbd33 | [] | no_license | Continue7777/DSSM- | d32a105c033f4a8074d67c3fee56543d65622669 | af018562123cb3c81fde9b27becf0bc042bafd79 | refs/heads/master | 2021-04-15T09:33:14.255692 | 2018-04-14T16:05:19 | 2018-04-14T16:05:19 | 126,166,329 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 9,507 | py | # -*- coding: utf-8 -*-
from datetime import datetime
import os
import time
import tensorflow as tf
import mnist_inference
# 定义训练神经网络时需要用到的配置。这些配置与5.5节中定义的配置类似。
BATCH_SIZE = 100
LEARNING_RATE_BASE = 0.001
LEARNING_RATE_DECAY = 0.99
REGULARAZTION_RATE = 0.0001
TRAINING_STEPS = 1000
MOVING_AVERAGE_DECAY = 0.99
N_GPU = 4
# 定义日志和模型输出的路径。
MODEL_SAVE_PATH = "/path/to/logs_and_models/"
MODEL_NAME = "model.ckpt"
# 定义数据存储的路径。因为需要为不同的GPU提供不同的训练数据,所以通过placerholder
# 的方式就需要手动准备多份数据。为了方便训练数据的获取过程,可以采用第7章中介绍的输
# 入队列的方式从TFRecord中读取数据。于是在这里提供的数据文件路径为将MNIST训练数据
# 转化为TFRecords格式之后的路径。如何将MNIST数据转化为TFRecord格式在第7章中有
# 详细介绍,这里不再赘述。
DATA_PATH = "/path/to/data.tfrecords"
# 定义输入队列得到训练数据,具体细节可以参考第7章。
def get_input():
filename_queue = tf.train.string_input_producer([DATA_PATH])
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue)
# 定义数据解析格式。
features = tf.parse_single_example(
serialized_example,
features={
'image_raw': tf.FixedLenFeature([], tf.string),
'pixels': tf.FixedLenFeature([], tf.int64),
'label': tf.FixedLenFeature([], tf.int64),
})
# 解析图片和标签信息。
decoded_image = tf.decode_raw(features['image_raw'], tf.uint8)
reshaped_image = tf.reshape(decoded_image, [784])
retyped_image = tf.cast(reshaped_image, tf.float32)
label = tf.cast(features['label'], tf.int32)
# 定义输入队列并返回。
min_after_dequeue = 10000
capacity = min_after_dequeue + 3 * BATCH_SIZE
return tf.train.shuffle_batch(
[retyped_image, label],
batch_size=BATCH_SIZE,
capacity=capacity,
min_after_dequeue=min_after_dequeue)
# 定义损失函数。对于给定的训练数据、正则化损失计算规则和命名空间,计算在这个命名空间
# 下的总损失。之所以需要给定命名空间是因为不同的GPU上计算得出的正则化损失都会加入名为
# loss的集合,如果不通过命名空间就会将不同GPU上的正则化损失都加进来。
def get_loss(x, y_, regularizer, scope):
# 沿用5.5节中定义的函数来计算神经网络的前向传播结果。
y = mnist_inference.inference(x, regularizer)
# 计算交叉熵损失。
cross_entropy = tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(y, y_))
# 计算当前GPU上计算得到的正则化损失。
regularization_loss = tf.add_n(tf.get_collection('losses', scope))
# 计算最终的总损失。
loss = cross_entropy + regularization_loss
return loss
# 计算每一个变量梯度的平均值。
def average_gradients(tower_grads):
average_grads = []
# 枚举所有的变量和变量在不同GPU上计算得出的梯度。
for grad_and_vars in zip(*tower_grads):
# 计算所有GPU上的梯度平均值。
grads = []
for g, _ in grad_and_vars:
expanded_g = tf.expand_dims(g, 0)
grads.append(expanded_g)
grad = tf.concat(0, grads)
grad = tf.reduce_mean(grad, 0)
v = grad_and_vars[0][1]
grad_and_var = (grad, v)
# 将变量和它的平均梯度对应起来。
average_grads.append(grad_and_var)
# 返回所有变量的平均梯度,这将被用于变量更新。
return average_grads
# 主训练过程。
def main(argv=None):
# 将简单的运算放在CPU上,只有神经网络的训练过程放在GPU上。
with tf.Graph().as_default(), tf.device('/cpu:0'):
# 获取训练batch。
x, y_ = get_input()
regularizer = tf.contrib.layers.l2_regularizer(REGULARAZTION_RATE)
# 定义训练轮数和指数衰减的学习率。
global_step = tf.get_variable(
'global_step', [], initializer=tf.constant_initializer(0),
trainable=False)
learning_rate = tf.train.exponential_decay(
LEARNING_RATE_BASE, global_step, 60000 / BATCH_SIZE,
LEARNING_ RATE_DECAY)
# 定义优化方法。
opt = tf.train.GradientDescentOptimizer(learning_rate)
tower_grads = []
# 将神经网络的优化过程跑在不同的GPU上。
for i in range(N_GPU):
# 将优化过程指定在一个GPU上。
with tf.device('/gpu:%d' % i):
with tf.name_scope('GPU_%d' % i) as scope:
cur_loss = get_loss(x, y_, regularizer, scope)
# 在第一次声明变量之后,将控制变量重用的参数设置为True。这样可以
# 让不同的GPU更新同一组参数。注意tf.name_scope函数并不会影响
# tf.get_ variable的命名空间。
tf.get_variable_scope().reuse_variables()
# 使用当前GPU计算所有变量的梯度。
grads = opt.compute_gradients(cur_loss)
tower_grads.append(grads)
# 计算变量的平均梯度,并输出到TensorBoard日志中。
grads = average_gradients(tower_grads)
for grad, var in grads:
if grad is not None:
tf.histogram_summary(
'gradients_on_average/%s' % var.op.name, grad)
# 使用平均梯度更新参数。
apply_gradient_op = opt.apply_gradients(
grads, global_step=global_ step)
for var in tf.trainable_variables():
tf.histogram_summary(var.op.name, var)
# 计算变量的滑动平均值。
variable_averages = tf.train.ExponentialMovingAverage(
MOVING_AVERAGE_DECAY, global_step)
variables_averages_op = variable_averages.apply(
tf.trainable_variables())
# 每一轮迭代需要更新变量的取值并更新变量的滑动平均值。
train_op = tf.group(apply_gradient_op, variables_averages_op)
saver = tf.train.Saver(tf.all_variables())
summary_op = tf.merge_all_summaries()
init = tf.initialize_all_variables()
# 训练过程。
with tf.Session(config=tf.ConfigProto(
allow_soft_placement=True,
log_device_placement=True)) as sess:
# 初始化所有变量并启动队列。
init.run()
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
summary_writer = tf.train.SummaryWriter(
MODEL_SAVE_PATH, sess.graph)
for step in range(TRAINING_STEPS):
# 执行神经网络训练操作,并记录训练操作的运行时间。
start_time = time.time()
_, loss_value = sess.run([train_op, cur_loss])
duration = time.time() - start_time
# 每隔一段时间展示当前的训练进度,并统计训练速度。
if step != 0 and step % 10 == 0:
# 计算使用过的训练数据个数。因为在每一次运行训练操作时,每一个GPU
# 都会使用一个batch的训练数据,所以总共用到的训练数据个数为
# batch大小×GPU个数。
num_examples_per_step = BATCH_SIZE * N_GPU
# num_examples_per_step为本次迭代使用到的训练数据个数,
# duration为运行当前训练过程使用的时间,于是平均每秒可以处理的训
# 练数据个数为num_examples_per_step / duration。
examples_per_sec = num_examples_per_step / duration
# duration为运行当前训练过程使用的时间,因为在每一个训练过程中,
# 每一个GPU都会使用一个batch的训练数据,所以在单个batch上的训
# 练所需要时间为duration / GPU个数。
sec_per_batch = duration / N_GPU
# 输出训练信息。
format_str = ('step %d, loss = %.2f (%.1f examples/ '
' sec; %.3f sec/batch)')
print(format_str % (step, loss_value,
examples_per_sec, sec_per_batch))
# 通过TensorBoard可视化训练过程。
summary = sess.run(summary_op)
summary_writer.add_summary(summary, step)
# 每隔一段时间保存当前的模型。
if step % 1000 == 0 or (step + 1) == TRAINING_STEPS:
checkpoint_path = os.path.join(
MODEL_SAVE_PATH, MODEL_ NAME)
saver.save(sess, checkpoint_path, global_step=step)
coord.request_stop()
coord.join(threads)
if __name__ == '__main__':
tf.app.run() | [
"[email protected]"
] | |
2be33a204326b77eed20224274574b433213be6a | 73501b9e3623c3a9338306dbe52d1d89700f3d91 | /upload_this_on_arduino/pyduino.py | 2e4bf4eb623b2c987b4a395798e2605767cf5739 | [] | no_license | rouanro/PS | 72af2d8f5f3d1c628b8ad599c244235781b04c61 | a474d5ac9d23d50388c1811ddf256efa408b33d6 | refs/heads/master | 2020-03-18T21:57:12.402332 | 2018-05-29T15:19:15 | 2018-05-29T15:19:15 | 135,315,081 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,357 | py | """
A library to interface Arduino through serial connection
"""
import serial
import smtplib
from email.message import EmailMessage
class Arduino():
"""
Models an Arduino connection
"""
def __init__(self, serial_port='/dev/ttyACM0', baud_rate=9600,
read_timeout=5):
"""
Initializes the serial connection to the Arduino board
"""
self.conn = serial.Serial(serial_port, baud_rate)
self.conn.timeout = read_timeout # Timeout for readline()
def set_pin_mode(self, pin_number, mode):
"""
Performs a pinMode() operation on pin_number
Internally sends b'M{mode}{pin_number} where mode could be:
- I for INPUT
- O for OUTPUT
- P for INPUT_PULLUP MO13
"""
# command = (''.join(('M',mode,str(pin_number)))).encode()
#print 'set_pin_mode =',command,(''.join(('M',mode,str(pin_number))))
# self.conn.write(command)
def digital_read(self, pin_number):
"""
Performs a digital read on pin_number and returns the value (1 or 0)
Internally sends b'RD{pin_number}' over the serial connection
"""
command = (''.join(('RD', str(pin_number)))).encode()
#self.conn.write(command)
line_received = self.conn.readline().decode().strip()
header, value = line_received.split(':') # e.g. D13:1
if header == ('D'+ str(pin_number)):
# If header matches
return int(value)
def digital_write(self, pin_number, digital_value):
"""
Writes the digital_value on pin_number
Internally sends b'WD{pin_number}:{digital_value}' over the serial
connection
"""
command = (''.join(('WD', str(pin_number), ':',
str(digital_value)))).encode()
#self.conn.write(command)
def analog_read(self, pin_number):
"""
Performs an analog read on pin_number and returns the value (0 to 1023)
Internally sends b'RA{pin_number}' over the serial connection
"""
command = (''.join(('RA', str(pin_number)))).encode()
self.conn.write(command)
print(command)
line_received = self.conn.readline().decode().strip()
#header, value = line_received.split(':') # e.g. A4:1
if line_received[0:2] == ("A0"):
value = line_received[3:]
# If header matches
return int(value)
if line_received[0:2] == ("A4"):
value = line_received[3:]
return value
# me == the sender's email address
# you == the recipient's email address
# msg = EmailMessage()
# msg['Subject'] = 'Teeeeeeeeeeest'
# msg['From'] = '[email protected]'
# msg['To'] = '[email protected]'
# Send the message via our own SMTP server.
# s = smtplib.SMTP('localhost')
# s.send_message(msg)
# s.quit()
def analog_write(self, pin_number, analog_value):
"""
Writes the analog value (0 to 255) on pin_number
Internally sends b'WA{pin_number}:{analog_value}' over the serial
connection
"""
command = (''.join(('WA', str(pin_number), ':',
str(analog_value)))).encode()
#self.conn.write(command)
def send_message(self, message):
command = message.encode()
self.conn.write(command)
def send_email(self, user, pwd, recipient, subject, body):
FROM = user
TO = recipient if isinstance(recipient, list) else [recipient]
SUBJECT = subject
TEXT = body
# Prepare actual message
message = """From: %s\nTo: %s\nSubject: %s\n\n%s
""" % (FROM, ", ".join(TO), SUBJECT, TEXT)
try:
server = smtplib.SMTP("smtp.gmail.com", 587)
server.ehlo()
server.starttls()
server.login(user, pwd)
server.sendmail(FROM, TO, message)
server.close()
print('successfully sent the mail')
except:
print("failed to send mail")
def close(self):
"""
To ensure we are properly closing our connection to the
Arduino device.
"""
self.conn.close()
print ('Connection to Arduino closed')
| [
"[email protected]"
] | |
ef6cd9dcf7e940c06b7afc339d98f4454ab71b4f | 421c9b869f4391a08a216df784a7c36b8b666557 | /spy.py | a3094bcf5d05106f9d3b7990cef14725c29a9f04 | [] | no_license | Pramod37/spychatcode | 96d6de810010f4f9c9ac2690577442b2e8c7aec9 | 2bdd688e3e0736e229824111a9c8aa4d1bd41f71 | refs/heads/master | 2021-01-11T05:28:34.360188 | 2017-06-22T07:31:11 | 2017-06-22T07:31:11 | 95,031,615 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,650 | py | from details import spy, friends,ChatMessage,Spy
from steganography.steganography import Steganography
from datetime import datetime
status_message = ['on work','updating....','on mood to learn']
print 'Hello let\s get started'
existing = raw_input(" Do You Want continue as " + spy.salutation + " " + spy.name + " (Y/N)? ").upper()
def add_status(current_status_message) :
updated_status_message = None
if current_status_message != None :
print 'your current status message is %s \n' % (current_status_message)
else :
print 'you don\'t have any status message..\n'
default = raw_input("do you want to select from the older status message(y/n)? Or want to write new?(n)")
if default.upper() == "N" :
new_status_message = raw_input("what stauts do you want to set?")
if len(new_status_message) > 0:
status_message.append(new_status_message)
updated_status_message = new_status_message
if updated_status_message.isspace():
print 'you don\'t have any status..'
else:
updated_status_message = updated_status_message.strip()
print updated_status_message
elif default.upper() == 'Y' :
item_position = 1
for message in status_message :
print '%d. %s' % (item_position, message)
item_position = item_position + 1
message_selection = int(raw_input("\n choose from the above message"))
if len(status_message) >= message_selection :
updated_status_message = status_message[message_selection - 1]
else:
print 'the option you choose not available'
if updated_status_message:
print 'Your updated status message is: %s' % (updated_status_message)
else:
updated_status_message.startswith(" ")
print 'You current don\'t have a status update'
return updated_status_message
def add_friend() :
present_friend = spy('','',0,0.0)
present_friend.name = raw_input("please add your friend's name")
present_friend.salutation = raw_input("are they mr. or miss.?")
present_friend.name = present_friend.salutation + " " + present_friend.name
present_friend.age = raw_input("age?")
present_friend.age = int(present_friend.age)
present_friend.rating = raw_input("rating?")
present_friend.rating = float(present_friend.rating)
if len(present_friend.name) > 0 and present_friend.age >= 20 and present_friend.rating >= 2.0:
friends.append(present_friend)
print 'Friend Added!'
else:
print 'sorry! unable to add..invalid entry!'
return len(friends)
def select_friend():
item_number = 0
for friend in friends:
print '%d %s with age %d with rating %.2f is online' % (item_number + 1, friend.name,
friend.age,
friend.rating)
item_number = item_number + 1
friend_choice = raw_input("Choose from your friends")
friend_choice_position = int(friend_choice) - 1
return friend_choice_position
def send_message():
friend_choice = select_friend()
original_image = raw_input("What is the name of image?")
output_path = "output.jpg "
text = raw_input("what do you want to say? ")
Steganography.encode(original_image , output_path, text)
new_chat = ChatMessage(text,True)
friends[friend_choice].chats.append(new_chat)
print "Your secret message image is ready!"
def read_message():
sender = select_friend()
output_path = raw_input("What is the name of the file?")
secret_text = Steganography.decode(output_path)
new_chat = ChatMessage(secret_text,False)
friends[sender].chats.append(new_chat)
print "Your secret message has been saved!"
def read_chat_history():
read_for = select_friend()
print '\n5'
for chat in friends[read_for].chats:
if chat.sent_by_me:
print '[%s] %s: %s' % (chat.time.strftime("%d %B %Y"), 'You said:', chat.message)
else:
print '[%s] %s said: %s' % (chat.time.strftime("%d %B %Y"), friends[read_for].name, chat.message)
def start_chat(spy) :
current_status_message = None
spy.name = spy.salutation + " " + spy.name
if spy.age >=20 and spy.age <=50 :
print "Authentication Complete. Welcome " + spy.name + " age: " + str(spy.age) + " and rating of spy:" + str(
spy.rating) \
+ " Proud to Have You onboard.."
show_menu = True
while show_menu :
menu_choices = "What do you want to do?\n 1. Add a Status\n 2. Add a Friend\n 3. Send a Secret Message\n 4. Read a Secret Message\n" \
" 5. Read chat history\n 6. show status \n 7. show friends list\n 8. exit apllication\n"
menu_choice = raw_input(menu_choices)
if len(menu_choice) > 0 :
menu_choice = int(menu_choice)
if menu_choice == 1 :
print 'you choose to Status Update'
current_status_message = add_status(current_status_message)
elif menu_choice == 2 :
print 'you can add a friend now!'
number_of_friends = add_friend()
print 'You have %d friends' % (number_of_friends)
elif menu_choice == 3 :
print 'you can send a secret message here!'
send_message()
elif menu_choice == 4 :
print 'you can read a secret message here!'
read_message()
elif menu_choice == 5 :
print 'Your chat history'
read_chat_history()
elif menu_choice == 6:
print 'your staus message here!\n'
if current_status_message.startswith(" "):
print 'you don\'t have status.. '
elif current_status_message.isspace():
print'you don\'t have any status..'
else:
current_status_message = add_status(current_status_message)
elif menu_choice == 7 :
print 'your friends are..\n'
for i in friends:
print i.name
elif menu_choice == 8 :
exit()
else :
show_menu = False
else:
print 'sorry You are not eligible to be a spy'
if existing == "Y":
start_chat(spy)
else:
spy = Spy('','',0,0.0)
spy.name = raw_input("welcome to spy chat,tou need to tell your name first:")
if len (spy.name) > 0:
spy.salutation = raw_input("Should I call you Mr. or Ms.?: ")
spy.age = int(raw_input("What is your Age?"))
spy.age = int(spy.age)
spy.rating = float(raw_input("what is your rating:"))
if spy.rating >= 4.5:
print "wow! Great Ace."
elif spy.rating >= 4.0 and spy.rating < 4.5 :
print "you are good."
elif spy.rating >= 3.0 and spy.rating < 4.0 :
print "you can do better."
else:
print 'We can always need to help in Office..'
spy_rating = float(spy.rating)
spy_is_online = True
start_chat(spy)
else :
print "A Spy needs a valid Name!"
| [
"[email protected]"
] | |
c20a34f0a583217bc2954583f5023db885908a21 | 6dd08ec6b4f6351de8450a3d7e592fd6b4994119 | /cbase/server/cbase-1.8.1/testrunner/lib/cli_interface.py | e6a6f9806a3859205b951f3f754ca879f82d6278 | [
"Apache-2.0"
] | permissive | zhgwenming/appstack | d015e96b911fe318f9fba1bdeeea9d888d57dfba | 8fe6c1dfc2f5ed4a36c335e86ae28c17b3769276 | refs/heads/master | 2021-01-23T13:30:19.507537 | 2015-11-09T06:48:35 | 2015-11-09T06:48:35 | 7,576,644 | 1 | 2 | null | 2016-01-05T09:16:22 | 2013-01-12T15:13:21 | C | UTF-8 | Python | false | false | 6,194 | py | #!/usr/bin/env python
#
# Copyright 2010 Membase, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# PYTHONPATH needs to be set up to point to mc_bin_client
import os
import subprocess
DEF_USERNAME = "Administrator"
DEF_PASSWORD = "password"
DEF_KIND = "json"
DEF_MOXI_PORT = 11211
DEF_HTTP_PORT = 8091
DEF_RAMSIZE = 256
DEF_REPLICA = 1
CLI_EXE_LOC = "../membase-cli/membase"
SSH_EXE_LOC = "/opt/membase/bin/cli/membase"
class CLIInterface(object):
def __init__(self, server, http_port=DEF_HTTP_PORT, username=DEF_USERNAME, password=DEF_PASSWORD, kind=DEF_KIND, debug=False, ssh=False, sshkey=None):
self.server = server
self.http_port = http_port
self.username = username
self.password = password
self.kind = kind
self.debug = debug
self.ssh = ssh
self.sshkey = sshkey
if (debug):
self.acting_server_args = "-c %s:%d -u %s -p %s -o %s -d" % (self.server, self.http_port, self.username, self.password, self.kind)
else:
self.acting_server_args = "-c %s:%d -u %s -p %s -o %s" % (self.server, self.http_port, self.username, self.password, self.kind)
def server_list(self):
cmd = " server-list " + self.acting_server_args
return self.execute_command(cmd)
def server_info(self):
cmd = " server-info " + self.acting_server_args
return self.execute_command(cmd)
def server_add(self, server_to_add, rebalance=False):
if (rebalance):
cmd = " rebalance " + self.acting_server_args + " --server-add=%s:%d --server-add-username=%s --server-add-password=%s"\
% (server_to_add, self.http_port, self.username, self.password)
else:
cmd = " server-add " + self.acting_server_args + " --server-add=%s:%d --server-add-username=%s --server-add-password=%s"\
% (server_to_add, self.http_port, self.username, self.password)
return self.execute_command(cmd)
def server_readd(self, server_to_readd):
cmd = " server-readd " + self.acting_server_args + " --server-add=%s:%d --server-add-username=%s --server-add-password=%s"\
% (server_to_readd, self.http_port, self.username, self.password)
return self.execute_command(cmd)
def rebalance(self):
cmd = " rebalance " + self.acting_server_args
return self.execute_command(cmd)
def rebalance_stop(self):
cmd = " reblance-stop " + self.acting_server_args
return self.execute_command(cmd)
def rebalance_status(self):
cmd = " rebalance-status " + self.acting_server_args
return self.execute_command(cmd)
def failover(self, server_to_failover):
cmd = " failover " + self.acting_server_args + " --server-failover %s" % (server_to_failover)
return self.execute_command(cmd)
def cluster_init(self, c_username=DEF_USERNAME, c_password=DEF_PASSWORD, c_port=DEF_HTTP_PORT, c_ramsize=DEF_RAMSIZE):
cmd = " cluster-init " + self.acting_server_args\
+ " --cluster-init-username=%s --cluster-init-password=%s --cluster-init-port=%d --cluster-init-ramsize=%d"\
% (c_username, c_password, c_port, c_ramsize)
return self.execute_command(cmd)
def node_init(self, path):
cmd = " node-init " + self.acting_server_args + " --node-init-data-path=%s" % (path)
return self.execute_command(cmd)
def bucket_list(self):
cmd = " bucket-list " + self.acting_server_args
return self.execute_command(cmd)
def bucket_create(self, bucket_name, bucket_type, bucket_port, bucket_password="", bucket_ramsize=DEF_RAMSIZE, replica_count=DEF_REPLICA):
cmd = " bucket-create " + self.acting_server_args\
+ " --bucket=%s --bucket-type=%s --bucket-port=%d --bucket-password=%s --bucket-ramsize=%d --bucket-replica=%d"\
% (bucket_name, bucket_type, bucket_port, bucket_password, bucket_ramsize, replica_count)
return self.execute_command(cmd)
def bucket_edit(self, bucket_name, bucket_type, bucket_port, bucket_password, bucket_ramsize, replica_count):
cmd = " bucket-edit " + self.acting_server_args\
+ " --bucket=%s --bucket-type=%s --bucket-port=%d --bucket-password=%s --bucket-ramsize=%d --bucket-replica=%d"\
% (bucket_name, bucket_type, bucket_port, bucket_password, bucket_ramsize, replica_count)
return self.execute_command(cmd)
def bucket_delete(self, bucket_name):
cmd = " bucket-delete " + self.acting_server_args + " --bucket=%s" % (bucket_name)
return self.execute_command(cmd)
def bucket_flush(self):
return "I don't work yet :-("
def execute_command(self, cmd):
if (self.ssh):
return self.execute_ssh(SSH_EXE_LOC + cmd)
else:
return self.execute_local(CLI_EXE_LOC + cmd)
def execute_local(self, cmd):
rtn = ""
process = subprocess.Popen(cmd ,shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE)
stdoutdata,stderrdata=process.communicate()
rtn += stdoutdata
return rtn
def execute_ssh(self, cmd):
rtn=""
if (self.sshkey == None):
process = subprocess.Popen("ssh root@%s \"%s\"" % (self.server,cmd),shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE)
else:
process = subprocess.Popen("ssh -i %s root@%s \"%s\"" % (self.sshkey, self.server, cmd),shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE)
stdoutdata,stderrdata=process.communicate()
rtn += stdoutdata
return rtn
| [
"[email protected]"
] | |
c37ff8cfcff227220d098069e2f3040dce7f56e8 | 9145d24e2517d7f3cea6e89158806b95919449b8 | /doc/conf.py | 37c50aca46644bd4ce262e466fa2696daa55957c | [
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] | permissive | pombredanne/coveragepy | b6de846694156581ee0b9a3348f4cfd48719855f | 2364947d7814a065cf2c05d930eda94203b20f1c | refs/heads/master | 2021-01-22T23:43:21.800229 | 2017-03-18T11:14:13 | 2017-03-18T11:14:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,618 | py | # -*- coding: utf-8 -*-
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
#
# coverage.py documentation build configuration file, created by
# sphinx-quickstart on Wed May 13 22:18:33 2009.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.append(os.path.abspath('.'))
# on_rtd is whether we are on readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.ifconfig',
'sphinxcontrib.spelling',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Coverage.py'
copyright = u'2009\N{EN DASH}2017, Ned Batchelder' # CHANGEME
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '4.3.4' # CHANGEME
# The full version, including alpha/beta/rc tags.
release = '4.3.4' # CHANGEME
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
#html_theme = 'default'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# otherwise, readthedocs.org uses their theme by default, so no need to specify it
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
#html_style = "neds.css"
#html_add_permalinks = ""
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['_templates']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_use_modindex = False
# If false, no index is generated.
html_use_index = False
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = '.htm'
# Output file base name for HTML help builder.
htmlhelp_basename = 'coveragepydoc'
# -- Spelling ---
spelling_word_list_filename = 'dict.txt'
spelling_show_suggestions = False
# When auto-doc'ing a class, write the class' docstring and the __init__ docstring
# into the class docs.
autoclass_content = "class"
prerelease = bool(max(release).isalpha())
def setup(app):
app.add_stylesheet('coverage.css')
app.add_config_value('prerelease', False, 'env')
app.info("** Prerelease = %r" % prerelease)
| [
"[email protected]"
] | |
c7a6bbfb9e4f4606a0720e7f9c0efa56e7d90f30 | b22588340d7925b614a735bbbde1b351ad657ffc | /athena/DataQuality/DataQualityConfigurations/python/TCTDisplay.py | 6fa11e45427f043ea1f2b19da409200372d1fc14 | [] | no_license | rushioda/PIXELVALID_athena | 90befe12042c1249cbb3655dde1428bb9b9a42ce | 22df23187ef85e9c3120122c8375ea0e7d8ea440 | refs/heads/master | 2020-12-14T22:01:15.365949 | 2020-01-19T03:59:35 | 2020-01-19T03:59:35 | 234,836,993 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,330 | py | # Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
from DataQualityUtils.DQWebDisplayConfig import DQWebDisplayConfig
dqconfig = DQWebDisplayConfig()
dqconfig.config = "TCT"
dqconfig.hcfg = "/afs/cern.ch/user/a/atlasdqm/dqmdisk/tier0/han_config/Collisions/collisions_run.1.41.hcfg"
dqconfig.hcfg_min10 = "/afs/cern.ch/user/a/atlasdqm/dqmdisk/tier0/han_config/Collisions/collisions_minutes10.1.9.hcfg"
dqconfig.hcfg_min30 = "/afs/cern.ch/user/a/atlasdqm/dqmdisk/tier0/han_config/Collisions/collisions_minutes30.1.5.hcfg"
dqconfig.hanResultsDir = "/afs/cern.ch/atlas/offline/external/FullChainTest/tier0/dqm/han_results"
dqconfig.htmlDir = "/afs/cern.ch/atlas/offline/external/FullChainTest/tier0/dqm/www"
dqconfig.htmlWeb = "http://atlas-project-fullchaintest.web.cern.ch/atlas-project-FullChainTest/tier0/dqm/www"
dqconfig.runlist = "runlist_TCT.xml"
dqconfig.indexFile = "results_TCT.html"
dqconfig.lockFile = "DQWebDisplay_TCT.lock"
dqconfig.dbConnection = "sqlite://;schema=MyCOOL_histo.db;dbname=OFLP200"
dqconfig.dqmfOfl = "/GLOBAL/DETSTATUS/DQMFOFL"
dqconfig.dbConnectionHisto = "sqlite://;schema=MyCOOL_histo.db;dbname=OFLP200"
dqconfig.dqmfOflHisto = "/GLOBAL/DETSTATUS/DQMFOFLH"
dqconfig.dbTagName = "DetStatusDQMFOFL-TCT"
| [
"[email protected]"
] | |
b7955588bac5a73a7f7b1064c773845400d52ab2 | fca01c1f424e8554841fcc221a613fb0bd0a0114 | /zespol/admin.py | 5faf86ccc026a8468d45728f491d6fa65c2630f0 | [] | no_license | Bartoszmleczko/GigTicketsApp | 3bae86cb4cb8d17b90ebed2afa7dd5645b117f51 | 9fa013da7ec8a73aebca7ec00658470b067dee4a | refs/heads/master | 2021-01-26T08:20:47.629696 | 2020-02-26T22:54:46 | 2020-02-26T22:54:46 | 243,381,072 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 339 | py | from django.contrib import admin
from .models import *
# Register your models here.
class ClubAdmin(admin.ModelAdmin):
list_display = ('name','address')
admin.site.register(Band)
admin.site.register(Club,ClubAdmin)
admin.site.register(Concert)
admin.site.register(Ticket)
admin.site.register(Profile)
admin.site.register(Genre)
| [
"[email protected]"
] | |
93ec5f04c17f0e8560d908d5e69d8182511e13bd | 443043c276f5c467db3c1af544f5c0aae53aea8b | /tests/test_helpers.py | a43f15f329ebcb5bd6e8fb32f08d40ee79cb2e09 | [
"MIT"
] | permissive | kurtjd/chesscorpy | ac6dda5bd4e3eb6901d525ea1d9411d6352b9903 | 127c4b1f4983f08c824970c04841071e7533fad9 | refs/heads/master | 2023-07-17T18:25:52.933288 | 2021-09-02T15:29:18 | 2021-09-02T15:29:18 | 388,607,838 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 410 | py | from chesscorpy.helpers import get_player_colors, determine_player_colors
def test_get_player_colors():
assert get_player_colors(5, 5) == ('White', 'black')
assert get_player_colors(5, 2) == ('Black', 'white')
def test_determine_player_colors():
# TODO: Test 'random' color
assert determine_player_colors('white', 1, 2) == (1, 2)
assert determine_player_colors('black', 1, 2) == (2, 1)
| [
"[email protected]"
] | |
8c9f901a8df6c0267bbdc70f47a911e544131ccb | 9a59d7b8a23e848ba08941f293e1a9c97107e8f1 | /models/basic.py | 4a9078a823b08463d1926b548feba56025521290 | [] | no_license | KellerJordan/CapsNet-Adversarial | 23a6c965d0955e4686af5579183dc9fe2df553cc | 9f7b090c367f62249c23d7d2f378d558ad777052 | refs/heads/master | 2021-03-30T21:45:15.874058 | 2018-05-24T17:06:59 | 2018-05-24T17:06:59 | 124,352,303 | 19 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,183 | py | import torch
from torch import nn
from torch.autograd import Variable
import torch.nn.functional as F
# extremely simple network to do basic science with training methods
class BasicNetwork(nn.Module):
def __init__(self):
super().__init__()
self.fc1 = nn.Linear(784, 100)
self.fc2 = nn.Linear(100, 10)
def forward(self, x):
x = x.view(-1, 784)
x = F.relu(self.fc1(x))
out = self.fc2(x)
return out
# simple CNN for experiments on CIFAR10
class KrizhevskyNet(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(3, 64, 5)
self.pool1 = nn.MaxPool2d(3, 2)
self.conv2 = nn.Conv2d(64, 64, 5)
self.pool2 = nn.MaxPool2d(3, 2)
self.fc1 = nn.Linear(64*3*3, 384)
self.fc2 = nn.Linear(384, 192)
self.fc3 = nn.Linear(192, 10)
def forward(self, x):
x = F.relu(self.conv1(x))
x = self.pool1(x)
x = F.relu(self.conv2(x))
x = self.pool2(x)
x = x.view(x.size(0), -1)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
logits = self.fc3(x)
return logits
| [
"[email protected]"
] | |
3b91d9f42ee1ecda8632567b35ac5caa51d497c7 | 35053a371d85c2d45a4f52239d8a70b38194ef48 | /Count of Matches in Tournament.py | 96c8b115113e1096f964d3dcc4f40e3f4b7f16a1 | [] | no_license | Kuehar/LeetCode | 51d169c81a2e572ea854399fc78e1130220388f9 | 4555c20455f181f9dd7b3aba2a8779dea795edfb | refs/heads/master | 2023-04-16T10:13:03.584541 | 2023-04-06T11:47:21 | 2023-04-06T11:47:21 | 243,361,421 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 388 | py | class Solution:
def numberOfMatches(self, n: int) -> int:
return n-1
# O(1) Solution.
# Always this answer is n-1. Sum of matches are always equals to sum of loser.
# Runtime: 28 ms, faster than 82.44% of Python3 online submissions for Count of Matches in Tournament.
# Memory Usage: 14.3 MB, less than 40.04% of Python3 online submissions for Count of Matches in Tournament.
| [
"[email protected]"
] | |
affbdc260006818519072805edce1e7247140a64 | 12db36eaad77c99b97878e96f2c4924dcf2ed83f | /exception/__init__.py | 1847580c2e336851ee2594b864bd64590bf076c2 | [] | no_license | sevenler/orange | 0c442bc09dda1c811fd5e996bf240a1e98e788b7 | 370c04317a4f538f679deb7cab8f6d7a9c9b1d02 | refs/heads/master | 2021-01-11T22:41:25.748658 | 2017-01-17T18:13:40 | 2017-01-17T18:13:40 | 79,017,934 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 87 | py | from error_status import ErrorStatusException
from authority import AuthorityException
| [
"[email protected]"
] | |
c8435b9d400c6a96e9ff08e7bc2c5cf72e3f2732 | 0625bb1df6c031f46e7dfd51f060092c9fa3416f | /water_battle_game.py | 5b1edbb25df3b0e9fb81b6f2d0b8b6cd1f56395c | [] | no_license | Sviatoslav-Lobanov/Python_learning | 393ad9e2f1ba79d8ad204aba51906c88f7d18043 | 87278f0b3b8e3d468cd5d8d22e9a966b8c1821c9 | refs/heads/main | 2023-05-04T07:57:08.746663 | 2021-05-19T22:46:04 | 2021-05-19T22:46:04 | 356,970,617 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,758 | py | from random import randint
# Внутренняя логика игры — корабли, игровая доска и вся логика связанная с ней.
# Внешняя логика игры — пользовательский интерфейс, искусственный интеллект, игровой контроллер, который считает побитые корабли.
# В начале имеет смысл написать классы исключений, которые будет использовать наша программа. Например, когда игрок пытается выстрелить в клетку за пределами поля, во внутренней логике должно выбрасываться соответствующее исключение BoardOutException, а потом отлавливаться во внешней логике, выводя сообщение об этой ошибке пользователю.
class BoardException(Exception):
pass
class BoardOutException(BoardException):
def __str__(self):
return "Вы пытаетесь выстрелить за доску!"
class BoardUsedException(BoardException):
def __str__(self):
return "Вы уже стреляли в эту клетку"
class BoardWrongShipException(BoardException):
def __str__(self):
return "Корабль вышел за границы поля"
pass
# Далее нужно реализовать класс Dot — класс точек на поле. Каждая точка описывается параметрами:
#
# Координата по оси x .
# Координата по оси y .
# В программе мы будем часто обмениваться информацией о точках на поле, поэтому имеет смысле сделать отдельный тип данных дня них.
# Очень удобно будет реализовать в этом классе метод __eq__, чтобы точки можно было проверять на равенство.
# Тогда, чтобы проверить, находится ли точка в списке, достаточно просто использовать оператор in, как мы делали это с числами .
class Dot:
def __init__(self,x,y):
self.x=x
self.y=y
def __eq__(self, other):
return self.x == other.x and self.y == other.y
def __repr__(self):
return f"Dot({self.x},{self.y})"
# Следующим идёт класс Ship — корабль на игровом поле, который описывается параметрами:
#
# Длина.
# Точка, где размещён нос корабля.
# Направление корабля (вертикальное/горизонтальное).
# Количеством жизней (сколько точек корабля еще не подбито).
# И имеет методы:
#
# Метод dots, который возвращает список всех точек корабля.
class Ship:
def __init__(self, bow, long, orientation):
self.bow = bow
self.long = long
self.orientation = orientation
self.lives = long
@property
def dots(self):
ship_dots = []
for i in range(self.long):
cur_x = self.bow.x
cur_y = self.bow.y
if self.orientation == 0:
cur_x += i
elif self.orientation == 1:
cur_y += i
ship_dots.append(Dot(cur_x, cur_y))
return ship_dots
def shooten(self, shot):
return shot in self.dots
# Самый важный класс во внутренней логике — класс Board — игровая доска. Доска описывается параметрами:
#
# Двумерный список, в котором хранятся состояния каждой из клеток.
# Список кораблей доски.
# Параметр hid типа bool — информация о том, нужно ли скрывать корабли на доске (для вывода доски врага) или нет (для своей доски).
# Количество живых кораблей на доске.
class Board:
def __init__(self, hid=False, size=6):
self.size = size
self.hid = hid
self.count = 0
self.field = [["O"] * size for _ in range(size)]
self.busy = []
self.ships = []
# И имеет методы:
#
# Метод add_ship, который ставит корабль на доску (если ставить не получается, выбрасываем исключения).
def add_ship(self, ship):
for d in ship.dots:
if self.out(d) or d in self.busy:
raise BoardWrongShipException()
for d in ship.dots:
self.field[d.x][d.y] = "■"
self.busy.append(d)
self.ships.append(ship)
self.contour(ship)
# Метод contour, который обводит корабль по контуру. Он будет полезен и в ходе самой игры, и в при расстановке кораблей (помечает соседние точки,
# где корабля по правилам быть не может).
def contour(self, ship, verb=False):
near = [
(-1, -1), (-1, 0), (-1, 1),
(0, -1), (0, 0), (0, 1),
(1, -1), (1, 0), (1, 1)
]
for d in ship.dots:
for dx, dy in near:
cur = Dot(d.x + dx, d.y + dy)
if not (self.out(cur)) and cur not in self.busy:
if verb:
self.field[cur.x][cur.y] = "."
self.busy.append(cur)
# Метод, который выводит доску в консоль в зависимости от параметра hid.
def __str__(self):
res = ""
res += " | 1 | 2 | 3 | 4 | 5 | 6 |"
for i, row in enumerate(self.field):
res += f"\n{i + 1} | " + " | ".join(row) + " |"
if self.hid:
res = res.replace("■", "O")
return res
# Метод out, который для точки (объекта класса Dot) возвращает True, если точка выходит за пределы поля, и False, если не выходит.
def out(self, d):
return not ((0 <= d.x < self.size) and (0 <= d.y < self.size))
# Метод shot, который делает выстрел по доске (если есть попытка выстрелить за пределы и в использованную точку, нужно выбрасывать исключения).
def shot(self, d):
if self.out(d):
raise BoardOutException()
if d in self.busy:
raise BoardUsedException()
self.busy.append(d)
for ship in self.ships:
if d in ship.dots:
ship.lives -= 1
self.field[d.x][d.y] = "X"
if ship.lives == 0:
self.count += 1
self.contour(ship, verb=True)
print("Корабль уничтожен!")
return False
else:
print("Корабль ранен!")
return True
self.field[d.x][d.y] = "."
print("Мимо!")
return False
def begin(self):
self.busy = []
class All_board():
def __init__(self, board_1=None, board_2=None):
self.board_1 = board_1
self.board_2 = board_2
def __str__(self):
res = ""
res2 = ""
res += " Доска пользователя Доска компьютера "
res += f"\n | 1 | 2 | 3 | 4 | 5 | 6 | ... | 1 | 2 | 3 | 4 | 5 | 6 |"
for i, row in enumerate(self.board_1.field):
for j, row2 in enumerate(self.board_2.field):
if i == j:
res2 = " | ".join(row2).replace("■", "O")
res += f"\n{i + 1} | " + " | ".join(row) + " | " +"..."+ f"{i + 1} | " + res2 + " | "
return res
# Теперь нужно заняться внешней логикой: Класс Player — класс игрока в игру (и AI, и пользователь). Этот класс будет родителем для классов с AI и с пользователем.
# Игрок описывается параметрами:
# Собственная доска (объект класса Board)
# Доска врага.
# И имеет следующие методы:
#
# ask — метод, который «спрашивает» игрока, в какую клетку он делает выстрел.
# Пока мы делаем общий для AI и пользователя класс, этот метод мы описать не можем.
# Оставим этот метод пустым. Тем самым обозначим, что потомки должны реализовать этот метод.
# move — метод, который делает ход в игре.
# Тут мы вызываем метод ask, делаем выстрел по вражеской доске (метод Board.shot), отлавливаем исключения, и если они есть, пытаемся повторить ход.
# Метод должен возвращать True, если этому игроку нужен повторный ход (например если он выстрелом подбил корабль).
class Player:
def __init__(self, board, enemy):
self.board = board
self.enemy = enemy
self.last_shoot = None
def ask(self):
raise NotImplementedError()
def move(self,shoot_near):
while True:
try:
target = self.ask(shoot_near)
repeat = self.enemy.shot(target)
self.last_shoot = target
# if repeat: print ("после попадания вторая попытка",last_shoot)
return repeat
except BoardException as e:
print(e)
# Теперь нам остаётся унаследовать классы AI и User от Player и переопределить в них метод ask.
# Для AI это будет выбор случайной точка, а для User этот метод будет спрашивать координаты точки из консоли.
class AI(Player):
def ask(self, shoot_near):
if self.last_shoot is not None:
print("Последний выстрел компьютера ",self.last_shoot.x+1,self.last_shoot.y+1)
# Учтим стрелять рядом
if shoot_near:
while True:
try:
print("стреляю рядом 1")
d = Dot(self.last_shoot.x, self.last_shoot.y + 1)
break
except BoardException as e:
print(e)
try:
print("стреляю рядом 2")
d = Dot(self.last_shoot.x, self.last_shoot.y - 1)
break
except BoardException as e:
print(e)
try:
print("стреляю рядом 3")
d = Dot(self.last_shoot.x + 1, self.last_shoot.y)
break
except BoardException as e:
print(e)
try:
print("стреляю рядом 4")
d = Dot(self.last_shoot.x - 1, self.last_shoot.y)
break
except BoardException as e:
print(e)
else:
d = Dot(randint(0, 5), randint(0, 5))
print(f"Ход компьютера: {d.x + 1} {d.y + 1}")
return d
class User(Player):
def ask(self,shoot_near):
if self.last_shoot is not None:
print("Последний выстрел игрока ", self.last_shoot.x+1,self.last_shoot.y+1)
while True:
cords = input("Ваш ход: ").split()
if len(cords) != 2:
print(" Введите 2 координаты! ")
continue
x, y = cords
if not (x.isdigit()) or not (y.isdigit()):
print(" Введите числа! ")
continue
x, y = int(x), int(y)
return Dot(x - 1, y - 1)
# После создаём наш главный класс — класс Game. Игра описывается параметрами:
#
# Игрок-пользователь, объект класса User.
# Доска пользователя.
# Игрок-компьютер, объект класса Ai.
# Доска компьютера.
# И имеет методы:
#
# random_board — метод генерирует случайную доску. Для этого мы просто пытаемся в случайные клетки изначально пустой доски расставлять корабли (в бесконечном цикле пытаемся поставить корабль в случайную току, пока наша попытка не окажется успешной). Лучше расставлять сначала длинные корабли, а потом короткие. Если было сделано много (несколько тысяч) попыток установить корабль, но это не получилось, значит доска неудачная и на неё корабль уже не добавить. В таком случае нужно начать генерировать новую доску.
# greet — метод, который в консоли приветствует пользователя и рассказывает о формате ввода.
# loop — метод с самим игровым циклом. Там мы просто последовательно вызываем метод mode для игроков и делаем проверку, сколько живых кораблей осталось на досках, чтобы определить победу.
# start — запуск игры. Сначала вызываем greet, а потом loop.
class Game:
def __init__(self, size=6):
self.size = size
choice = None
pl = None
while choice is None: # Запускаем выбор расстановки кораблей
choice = int(input("0 - случайная расстановка кораблей, 1 - раставить самостоятельно :"))
if choice == 0:
pl = self.random_board()
break
elif choice == 1:
pl = self.self_board()
break
else:
choice = None
print("Неверно выбрано значение")
co = self.random_board()
co.hid = True
self.ai = AI(co, pl)
self.us = User(pl, co)
self.all = All_board(self.us.board, self.ai.board)
def random_board(self):
board = None
while board is None:
board = self.random_place()
return board
def random_place(self):
lens = [3, 2, 2, 1, 1, 1, 1]
board = Board(size=self.size)
attempts = 0
for l in lens:
while True:
attempts += 1
if attempts > 2000:
return None
ship = Ship(Dot(randint(0, self.size), randint(0, self.size)), l, randint(0, 1))
try:
board.add_ship(ship)
break
except BoardWrongShipException:
pass
board.begin()
return board
# Даем игроку самому расставить корабли
def self_board(self):
lens = [3, 2, 2, 1, 1, 1, 1]
board = Board(size=self.size)
print("--------------------")
print("-Установите корабли-")
print(" формат ввода: x y z")
print(" x - номер строки ")
print(" y - номер столбца ")
print(" z - направление корабля (1-горизонтально, 0-вертикально)")
for l in lens:
while True:
print("-" * 20)
print("Доска пользователя:")
print(board)
bows = input(f"Введите координаты и направление для корабля длинной {l}: ").split()
if len(bows) != 3:
print(" Введите 3 значения! координтаы носа и направление ")
continue
x, y, z = bows
if not (x.isdigit()) or not (y.isdigit()) or not (z.isdigit()):
print(" Введите числа! ")
continue
x, y, z = int(x), int(y), int(z)
ship = Ship(Dot(x-1, y-1), l, z)
try:
board.add_ship(ship)
break
except BoardWrongShipException:
pass
board.begin()
return board
def greet(self):
print("-------------------")
print(" Приветсвуем вас ")
print(" в игре ")
print(" морской бой ")
print("-------------------")
print(" формат ввода: x y ")
print(" x - номер строки ")
print(" y - номер столбца ")
def loop(self):
num = 0
shoot_near = False
while True:
print("-" * 20)
# print("Доска пользователя:")
# print(self.us.board)
# print("-" * 20)
# print("Доска компьютера:")
# print(self.ai.board)
print(self.all)
if num % 2 == 0:
print("-" * 20)
print("Ходит пользователь!")
repeat = self.us.move(shoot_near)
else:
print("-" * 20)
print("Ходит компьютер!")
repeat = self.ai.move(shoot_near)
if repeat:
num -= 1
shoot_near = True
else:
shoot_near = False
if self.ai.board.count == 7:
print("-" * 20)
print("Пользователь выиграл!")
break
if self.us.board.count == 7:
print("-" * 20)
print("Компьютер выиграл!")
break
num += 1
def start(self):
self.greet()
self.loop()
# И останется просто создать экземпляр класса Game и вызвать метод start.
g = Game()
g.start() | [
"[email protected]"
] | |
acc15361e8370b7ba0ae6a4582e8d0fc9c912c4d | f30f6672702591c2d0adad5a2f57af8afd493117 | /todo/migrations/0004_auto_20190612_1151.py | cdf7922646dffb5a2af9d82cfc9a58c456b4640d | [] | no_license | MedMekss/Listed | 0f294ecc16d2db4a9ee37f408b1a7a11229409f4 | 06ac0bb5140b11aaa704a6cd0f60bb2c15eb6449 | refs/heads/master | 2020-05-20T03:25:51.047936 | 2019-06-18T09:20:49 | 2019-06-18T09:20:49 | 185,356,172 | 3 | 1 | null | 2019-06-17T13:30:48 | 2019-05-07T08:25:24 | Python | UTF-8 | Python | false | false | 380 | py | # Generated by Django 2.2.1 on 2019-06-12 09:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('todo', '0003_auto_20190606_1243'),
]
operations = [
migrations.AlterField(
model_name='item',
name='title',
field=models.CharField(max_length=32),
),
]
| [
"[email protected]"
] | |
494c1e3a8da4af904b0d96a5540e85b475400cc2 | 0e4860fecfdd34a3255003cc8c8df086c14083dd | /python/practise/带你学Django资料及源码/课堂与博客代码/peace_blog/blog/admin.py | 9c1fb6228842fe4ec5d8931dc4a0aad2aa044aa9 | [] | no_license | anzhihe/learning | 503ab9a58f280227011da5eaa4b14b46c678e6f3 | 66f7f801e1395207778484e1543ea26309d4b354 | refs/heads/master | 2023-08-08T11:42:11.983677 | 2023-07-29T09:19:47 | 2023-07-29T09:19:47 | 188,768,643 | 1,443 | 617 | null | 2023-08-24T02:10:34 | 2019-05-27T04:04:10 | Python | UTF-8 | Python | false | false | 289 | py | from django.contrib import admin
from .models import *
# Register your models here.
admin.site.register(Banner)
admin.site.register(Category)
admin.site.register(Tag)
admin.site.register(Article)
admin.site.register(FriendLink)
admin.site.register(Comment)
admin.site.register(BlogUser)
| [
"[email protected]"
] | |
875a564377d75822b6c87a33792ad8d32b40b7b6 | a6e4a6f0a73d24a6ba957277899adbd9b84bd594 | /sdk/python/pulumi_azure_native/datacatalog/outputs.py | 26d9e4bddb4ce2d56c83f67f19a73cd325ca56ef | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | MisinformedDNA/pulumi-azure-native | 9cbd75306e9c8f92abc25be3f73c113cb93865e9 | de974fd984f7e98649951dbe80b4fc0603d03356 | refs/heads/master | 2023-03-24T22:02:03.842935 | 2021-03-08T21:16:19 | 2021-03-08T21:16:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,362 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
from ._enums import *
__all__ = [
'PrincipalsResponse',
]
@pulumi.output_type
class PrincipalsResponse(dict):
"""
User principals.
"""
def __init__(__self__, *,
object_id: Optional[str] = None,
upn: Optional[str] = None):
"""
User principals.
:param str object_id: Object Id for the user
:param str upn: UPN of the user.
"""
if object_id is not None:
pulumi.set(__self__, "object_id", object_id)
if upn is not None:
pulumi.set(__self__, "upn", upn)
@property
@pulumi.getter(name="objectId")
def object_id(self) -> Optional[str]:
"""
Object Id for the user
"""
return pulumi.get(self, "object_id")
@property
@pulumi.getter
def upn(self) -> Optional[str]:
"""
UPN of the user.
"""
return pulumi.get(self, "upn")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
| [
"[email protected]"
] | |
3d97346bdf439f2d34bb79bcaaf9889159184176 | 46128392d3fc39d4fb75f07ac0b37234c9628644 | /models/vgg.py | 122ca008f5022af7313548698e8c80a8aa89a742 | [
"MIT"
] | permissive | Light-Alex/Multi-Style-Transfer | 494f1ac8c17a0cbd89eb73658ae9af0c663141a0 | 7d151108cc90a0abeffd2812c3950f516f39d932 | refs/heads/master | 2022-11-23T20:54:23.277987 | 2020-07-28T07:09:55 | 2020-07-28T07:09:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,307 | py | import mxnet as mx
from mxnet.gluon import nn, HybridBlock, Parameter
from mxnet.initializer import Xavier
class Vgg16(HybridBlock):
def __init__(self):
super(Vgg16, self).__init__()
self.conv1_1 = nn.Conv2D(in_channels=3, channels=64, kernel_size=3, strides=1, padding=1)
self.conv1_2 = nn.Conv2D(in_channels=64, channels=64, kernel_size=3, strides=1, padding=1)
self.conv2_1 = nn.Conv2D(in_channels=64, channels=128, kernel_size=3, strides=1, padding=1)
self.conv2_2 = nn.Conv2D(in_channels=128, channels=128, kernel_size=3, strides=1, padding=1)
self.conv3_1 = nn.Conv2D(in_channels=128, channels=256, kernel_size=3, strides=1, padding=1)
self.conv3_2 = nn.Conv2D(in_channels=256, channels=256, kernel_size=3, strides=1, padding=1)
self.conv3_3 = nn.Conv2D(in_channels=256, channels=256, kernel_size=3, strides=1, padding=1)
self.conv4_1 = nn.Conv2D(in_channels=256, channels=512, kernel_size=3, strides=1, padding=1)
self.conv4_2 = nn.Conv2D(in_channels=512, channels=512, kernel_size=3, strides=1, padding=1)
self.conv4_3 = nn.Conv2D(in_channels=512, channels=512, kernel_size=3, strides=1, padding=1)
self.conv5_1 = nn.Conv2D(in_channels=512, channels=512, kernel_size=3, strides=1, padding=1)
self.conv5_2 = nn.Conv2D(in_channels=512, channels=512, kernel_size=3, strides=1, padding=1)
self.conv5_3 = nn.Conv2D(in_channels=512, channels=512, kernel_size=3, strides=1, padding=1)
def hybrid_forward(self,F, X):
h = F.Activation(self.conv1_1(X), act_type='relu')
h = F.Activation(self.conv1_2(h), act_type='relu')
relu1_2 = h
h = F.Pooling(h, pool_type='max', kernel=(2, 2), stride=(2, 2))
h = F.Activation(self.conv2_1(h), act_type='relu')
h = F.Activation(self.conv2_2(h), act_type='relu')
relu2_2 = h
h = F.Pooling(h, pool_type='max', kernel=(2, 2), stride=(2, 2))
h = F.Activation(self.conv3_1(h), act_type='relu')
h = F.Activation(self.conv3_2(h), act_type='relu')
h = F.Activation(self.conv3_3(h), act_type='relu')
relu3_3 = h
h = F.Pooling(h, pool_type='max', kernel=(2, 2), stride=(2, 2))
h = F.Activation(self.conv4_1(h), act_type='relu')
h = F.Activation(self.conv4_2(h), act_type='relu')
h = F.Activation(self.conv4_3(h), act_type='relu')
relu4_3 = h
return [relu1_2, relu2_2, relu3_3, relu4_3]
def _init_weights(self, fixed=False, pretrain_path=None, ctx=None):
if pretrain_path is not None:
print('Loading parameters from {} ...'.format(pretrain_path))
self.collect_params().load(pretrain_path, ctx=ctx)
if fixed:
print('Setting parameters of VGG16 to fixed ...')
for param in self.collect_params().values():
param.grad_req = 'null'
else:
self.initialize(mx.initializer.Xavier(), ctx=ctx)
return_layers_id = {
11: [6, 13, 20, 27],
16: [5, 12, 22, 42]
}
vgg_spec = {11: ([1, 1, 2, 2, 2], [64, 128, 256, 512, 512]),
13: ([2, 2, 2, 2, 2], [64, 128, 256, 512, 512]),
16: ([2, 2, 3, 3, 3], [64, 128, 256, 512, 512]),
19: ([2, 2, 4, 4, 4], [64, 128, 256, 512, 512])}
class VGG(HybridBlock):
r"""VGG model from the `"Very Deep Convolutional Networks for Large-Scale Image Recognition"
<https://arxiv.org/abs/1409.1556>`_ paper.
Parameters
----------
layers : list of int
Numbers of layers in each feature block.
filters : list of int
Numbers of filters in each feature block. List length should match the layers.
classes : int, default 1000
Number of classification classes.
batch_norm : bool, default False
Use batch normalization.
"""
def __init__(self, num_layers, batch_norm=True, pretrain_path=None, ctx=None, **kwargs):
super(VGG, self).__init__(**kwargs)
layers, filters = vgg_spec[num_layers]
self.features = self._make_features(layers, filters, batch_norm)
self.features.add(nn.Dense(4096, activation='relu',
weight_initializer='normal',
bias_initializer='zeros'))
self.features.add(nn.Dropout(rate=0.5))
self.features.add(nn.Dense(4096, activation='relu',
weight_initializer='normal',
bias_initializer='zeros'))
self.features.add(nn.Dropout(rate=0.5))
self.output = nn.Dense(1000,
weight_initializer='normal',
bias_initializer='zeros')
self.return_id_list = return_layers_id[num_layers]
if pretrain_path is not None and os.path.isfile(pretrain_path):
self.pretrained = True
self.load_pretrained_param(pretrain_path, ctx)
def _make_features(self, layers, filters, batch_norm):
featurizer = nn.HybridSequential()
for i, num in enumerate(layers):
for _ in range(num):
featurizer.add(nn.Conv2D(filters[i], kernel_size=3, padding=1,
weight_initializer=Xavier(rnd_type='gaussian',
factor_type='out',
magnitude=2),
bias_initializer='zeros'))
if batch_norm:
featurizer.add(nn.BatchNorm())
featurizer.add(nn.Activation('relu'))
featurizer.add(nn.MaxPool2D(strides=2))
return featurizer
def hybrid_forward(self, F, x):
return_ = []
for id, layer in enumerate(self.features):
if isinstance(layer, nn.basic_layers.Dense):
break
x = layer(x)
if id in self.return_id_list:
return_.append(x)
#x = self.features(x)
#x = self.output(x)
return return_
def load_pretrained_param(self, pretrain_path, ctx):
print('Loading Parameters from {}'.format(pretrain_path))
self.load_parameters(pretrain_path, ctx=ctx) | [
"[email protected]"
] | |
cfd644d146385683734341f86b5e62a3ee4cd227 | d5a196acb7531c89d930ba51e33e2319fab0972d | /150/A.py | 220217dd59ad3170a30a2c1ee380094618c0dce1 | [] | no_license | mido1003/atcorder | f1a073a850557c6f18176ad9ff3dfcfe5414afdf | 92639b15d982f29042883621c2fb874e1813a447 | refs/heads/master | 2020-09-20T16:12:53.708315 | 2020-05-25T09:48:16 | 2020-05-25T09:48:16 | 224,533,793 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 101 | py | k,x = (int(x) for x in input().split())
if k * 500 >= x:
print("Yes")
else:
print("No") | [
"[email protected]"
] | |
1533905896294b79dff04e1b69b2cda7c0496874 | fa1dc1d0d2a169326c97dab863e15403bbd6bdbd | /CS486-686_A2Q2ANN.py | c52223b2857731732b02c8b7a75ccd93868316f2 | [
"MIT"
] | permissive | mojivalipour/nnscratch | f07b893f7ac9792f5c9bb8e8ca5c664e392b6786 | 5e0b7f100d1057fab2c166df5696163634acd726 | refs/heads/master | 2022-11-18T11:43:15.553593 | 2020-07-17T05:19:10 | 2020-07-17T05:19:10 | 271,581,705 | 3 | 8 | null | null | null | null | UTF-8 | Python | false | false | 21,331 | py | #!/usr/bin/env python
# coding: utf-8
# Design and Programming by Lead TA: Mojtaba Valipour @ Data Analytics Lab - UWaterloo.ca
# COURSE: CS 486/686 - Artificial Intelligence - University of Waterloo - Spring 2020 - Alice Gao
# Please let me know if you find any bugs in the code: [email protected]
# The code will be available at https://github.com/mojivalipour/nnscratch
# Version: 0.9.0
# Implement a neural network from scratch
''' Sources:
- http://neuralnetworksanddeeplearning.com/chap2.html
'''
print('Life is easy, you just need to do your best to find your place!')
# Libraries
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
from sklearn import datasets
from sklearn.manifold import TSNE # visualization for data with more than two features
from os import path
import pandas as pd
import csv
import copy
import random
# Helper functions
def fixSeed(seed=1010):
np.random.seed(seed)
random.seed(seed)
# The hyper-parameters for the neural network
nSamples = None # use None if you want to use full sample size
# frogsSmall is the same dataset in Q1 that you have to use for comparision
dataset = '2moons' # 2moons/frogsSmall/frogs
noise = 0.05 # Noise in artificial datasets
visNumSamples = 500 # number of samples to visualize
# for regression, we use mean squared error.
# for classification, we use cross entropy.
# for now only mse is supported!
lossFunction = 'mse'
gdMethod = 'batch' # batch gradient descent method
batchSize = 64 # only for minibatch gradient descent
numEpochs = 200 # number of epochs
learningRate = [0.5,0.05,0.005] # learning rates
# for now only relu and sigmoid is supported
lastActivationFunc = 'sigmoid' # relu/sigmoid/softmax
# last layer activation function, this one is important
# because we need to use it for classification later
crossValidationFlag = True # if you like to run cross validation, set this flag to True
kFold = 3 # k-fold cross validation, at least need to be 2
seed = 6565 # Do not change the seed for Assignment
fixSeed(seed=seed) # fix the seed of random generator to make sure comparision is possible
# Some Useful Notes for those students who are interested to know more:
'''
- Neural networks are prone to overfitting. Increasing the number of parameters
could lead to models that have complexity bigger than data.
- Regularization, Normalization and Dropout are popular solutions to overfitting!
- In a neural network, we usually use the softmax function as last layer
activation for multi-class classification and sigmoid for single class
classification.
- For regression problems, we usually use Relu as last layer activation function
and MSE as the loss function that we want to minimize.
- Cross-entropy is the most useful loss function for multi-class classification.
- Sometimes we need to use multiple neurons in the output layer, which means
that we consider a neuron for each class. In this case, we need to use
one-hot vectors to encode the labels.
- Weight initialization is important! Gradient descent is not robust to
weight initialization! Xavier initialization is the most popular method
to initialize weights in neural networks.
'''
# Load data
colorBox = ['#377eb8','#FA0000','#344AA7', '#1EFA39','#00FBFF','#C500FF','#000000','#FFB600']
if dataset == '2moons':
nSamples = 1000 if nSamples is None else nSamples
X,y = datasets.make_moons(n_samples=nSamples, noise=noise, random_state=seed)
numSamples, numFeatures, numClasses = X.shape[0], X.shape[1], 2
# shuffle X,y
idxList = list(range(nSamples))
random.shuffle(idxList) # inplace
X, y = X[idxList,:], y[idxList]
elif dataset == 'frogsSmall' or dataset == 'frogs':
if dataset == 'frogs':
# original dataset
name = 'Frogs_MFCCs.csv'
else:
# a small subset of frogs original dataset, same as A2Q1
name = 'frogs-small.csv'
# check if we already have the file in the directory
if not path.isfile(name):
# otherwise ask user to upload it
print("Please put this {} file in the current directory using choose files ...".format(name))
# just load the csv file
X = pd.read_csv(name, sep=',')
X["Family"] = X["Family"].astype('category')
X["FamilyCat"] = X["Family"].cat.codes # added to the last column
X, y = X.iloc[:,0:22].to_numpy(), X.iloc[:,-1].to_numpy()
nSamples = X.shape[0] if nSamples is None else nSamples
X, y = X[:nSamples,:], y[:nSamples] # filter number of samples
numSamples, numFeatures, numClasses = X.shape[0], X.shape[1], len(np.unique(y))
print('#INFO: N (Number of Samples): {}, D (Number of Features): {}, C (Number of Classes): {}'.format(numSamples, numFeatures, numClasses))
plt.figure()
# if y min is not zero, make it zero
y = y - y.min()
assert y.min() == 0
# sample required sample for visualization
indices = list(range(numSamples))
selectedIndices = np.random.choice(indices, visNumSamples)
colors = [colorBox[y[idx]] for idx in selectedIndices]
if numFeatures == 2:
XR = X[selectedIndices, :]
else:
# use tsne to reduce dimensionality for visualization
XR = TSNE(n_components=2).fit_transform(X[selectedIndices,:])
plt.scatter(XR[:, 0], XR[:, 1], s=10, color=colors)
plt.savefig('dataset.png')
if len(y.shape) < 2:
y = np.expand_dims(y,-1) # shape of y should be N x 1
# Define the network structure
# # 2-Layer Network
# config = {
# # Layer Name: [Number of Nodes (in and out), Bias, Activation Function]
# 'Hidden Layer 0': [[numFeatures, 30], True, 'relu'], # w1
# 'Fully Connected': [[30, 1], True, lastActivationFunc] # w2
# }
# overfit network example
config = {
# Layer Name: [Number of Nodes (in and out), Bias, Activation Function]
'Hidden Layer 0': [[numFeatures, 1000], True, 'sigmoid'], # w1
'Fully Connected': [[1000, 1], True, lastActivationFunc] # w2
}
# 3-Layer Network
# config = {
# # Layer Name: [Number of Nodes (in and out), Bias, Activation Function]
# 'Hidden Layer 0': [[numFeatures, 3], True, 'sigmoid'], # w1
# 'Hidden Layer 1': [[3, 5], True, 'sigmoid'], # w2
# 'Fully Connected': [[5, 1], True, lastActivationFunc] # w2
# }
# 4-layer Network
# config = {
# # Layer Name: [Number of Nodes (in and out), Bias, Activation Function]
# 'Hidden Layer 0': [[numFeatures, 100], True, 'relu'], # w1
# 'Hidden Layer 1': [[100, 50], True, 'relu'], # w2
# 'Hidden Layer 2': [[50, 5], True, 'relu'], # w3
# 'Fully Connected': [[5, 1], True, lastActivationFunc] # w4
# }
# Fully Connected Neural Network Class
class neuralNetwork():
# initializing network
def __init__(self, config=None, numClass=2, learningRate=0.005,
numEpochs=10, batchSize= 64, lossFunction='mse'):
self.config = config
self.configKeyList = list(self.config.keys())
self.lossFunction = lossFunction
self.numLayers = len(self.config)
self.layers = {}
self.layerShapes = {}
self.learningRate = learningRate
self.numEpochs = numEpochs
self.loss = []
self.lossT = []
self.acc = []
self.accT = []
self.batchSize = batchSize
self.numClass = numClass
self.initWeights()
# random init
def initWeights(self):
self.loss = []
self.lossT = []
self.acc = []
self.accT = []
if self.config != None:
for key in config:
# w is parameters, b is bias, a is activation function
self.layers[key] = {'W':np.random.randn(self.config[key][0][0],
self.config[key][0][1])/np.sqrt(self.config[key][0][1]),
'b':np.random.randn(self.config[key][0][1],
) if self.config[key][1]==True else [], 'a':self.config[key][2]}
# keep track of shape only for better understanding
self.layerShapes[key] = {'IS':self.config[key][0][0],'OS':self.config[key][0][1],
'NP':np.prod(self.layers[key]['W'].shape)+len(self.layers[key]['b'])}
else:
raise '#Err: Make sure you set a configuration correctly!'
# activation functions
def relu(self, X):
return np.maximum(0, X)
def sigmoid(self, X):
#TODO: fix the overflow problem in Numpy exp function
return 1./(1. + np.exp(-X))
def activationFunc(self, X, type='sigmoid'):
if type == 'sigmoid':
return self.sigmoid(X)
elif type == 'relu':
return self.relu(X)
elif type == 'None':
return X # do nothing
else:
raise '#Err: Not implemented activation function!'
# objective/loss/cost functions
def mse(self, y, yPred): # mean square error
return np.mean(np.power(y-yPred,2))
def lossFunc(self, y, yPred, type='mse'):
if type == 'mse':
return self.mse(y, yPred)
else:
raise '#Err: Not implemented objective function!'
# back-propagation learning
# forward pass
def forward(self, X):
# apply a(W.T x X + b) for each layer
for key in config:
#print(X.shape, self.layers[key]['W'].shape)
# save input of each layer for backward pass
self.layers[key]['i'] = X
z = np.dot(X, self.layers[key]['W'])
z = z + self.layers[key]['b'] if len(self.layers[key]['b'])!=0 else z
# save middle calculation for backward pass
self.layers[key]['z'] = z
X = self.activationFunc(z, type=self.layers[key]['a'])
# save middle calculation for backward pass
self.layers[key]['o'] = X
return X # yPred
# backward pass
def backward(self, y, yPred):
# derivative of sigmoid
def sigmoidPrime(x):
return self.sigmoid(x) * (1-self.sigmoid(x))
# derivative of relu
def reluPrime(x):
return np.where(x <= 0, 0, 1)
def identity(x):
return x
#TODO: It's not necessary to use double for,
# it is possible to implement faster and more efficient version
# for each parameter (weights and bias) in each layer
for idx, key in enumerate(config):
# calculate derivatives
if self.layers[key]['a'] == 'sigmoid':
fPrime = sigmoidPrime
elif self.layers[key]['a'] == 'relu':
fPrime = reluPrime
elif self.layers[key]['a'] == 'softmax':
fPrime = softmaxPrime
else: # None
fPrime = identity
deWRTdyPred = -(y-yPred) if self.lossFunction == 'mse' else 1 # de/dyPred
# print('de/dy')
# dyPred/dyPredBeforeActivation # in case of sigmoid g(x) x (1-g(x))
dyPredWRTdyPredPre = fPrime(self.layers[self.configKeyList[-1]]['o'])
# print('dy/dz')
# element wise multiplication/ hadamard product
delta = np.multiply(deWRTdyPred, dyPredWRTdyPredPre)
for idxW in range(len(config),idx,-1): # reverse
if idxW-1 == idx:
# calculating the derivative for the last one is different
# because it is respected to that specific weight
#print('\nWeights of layer',idx)
deltaB = delta
dxWRTdW = self.layers[key]['i'].T # dxWRTdW
delta = np.dot(dxWRTdW,delta)
#print('dz/dw')
else:
# this loop is depended to the number of layers in the configuration
# print('\nWeights of layer',idxW-1)
# the weights of current layer
# how fast the cost is changing as a function of the output activation
dxWRTdh = self.layers[self.configKeyList[idxW-1]]['W'].T # dxPreWRTdx-1
# print('dz/da')
# print('output of layer',idxW-1-1)
# the output of previous layer
# how fast the activation function is changing
dhWRTdhPre = fPrime(self.layers[self.configKeyList[idxW-1-1]]['o']) # dx-1WRTdx-1Pre
# print('da/dz')
delta = np.dot(delta, dxWRTdh) * dhWRTdhPre
# sanity check: Numerical Gradient Checking
# f'(x) = lim (f(x+deltax)-f(x))/deltax when deltax -> 0
# update parameters
# W = W - Gamma * dL/dW
self.layers[key]['djWRTdw'] = delta
self.layers[key]['W'] = self.layers[key]['W'] - self.learningRate/y.shape[0] * delta
# b = b - Gamma * dL/db
self.layers[key]['djWRTdb'] = deltaB
if len(self.layers[key]['b'])!=0:
self.layers[key]['b'] = self.layers[key]['b'] - self.learningRate/y.shape[0] * np.sum(deltaB, axis=0)
# Utility Functions
def summary(self, space=20):
print('{: <{}} | {: <{}} | {: <{}} | {: <{}}'.format("Layer Name", space,
"Input Shape", space,
"Output Shape", space,
"Number of Parameters",space))
for key in config:
print('{: <{}} | {: <{}} | {: <{}} | {: <{}}'.format(key, space,
self.layerShapes[key]['IS'], space,
self.layerShapes[key]['OS'], space,
self.layerShapes[key]['NP'], space))
def fit(self, X, y, XT=None, yT=None, method='batch', batchSize=None, numEpochs=None,
learningRate=None, initialState=None):
if numEpochs is None: # overwrite
numEpochs = self.numEpochs
if learningRate is not None:
self.learningRate = learningRate
if batchSize is not None:
self.batchSize = batchSize
# if initialState is not None:
# # use the given initial parameters (weights and bias)
# self.layers = initialState
if method == 'batch':
# this is infact mini-batch gradient descent, just for consistency in course material
# same as batched gradient descent in class to make it easier for you
pBar = tqdm(range(numEpochs))
for edx in pBar:
for idx in range(0, X.shape[0], self.batchSize):
start = idx
end = start + self.batchSize
end = end if end < X.shape[0] else X.shape[0]
#TODO: Support variable batchsize
if end-start != self.batchSize:
continue
x_, y_ = X[start:end, :], y[start:end, :]
yPred = self.forward(x_)
loss = self.lossFunc(y_, yPred, type=self.lossFunction)
self.backward(y_, yPred)
yPred,yPredOrig = self.predict(X)
loss = self.lossFunc(y, yPredOrig, type=self.lossFunction)
self.loss.append(loss)
acc = self.accuracy(y, yPred)
self.acc.append(acc)
if XT is not None:
yPred, yPredOrig = self.predict(XT)
loss = self.lossFunc(yT, yPredOrig, type=self.lossFunction)
self.lossT.append(loss)
acc = self.accuracy(yT, yPred)
self.accT.append(acc)
else:
raise '#Err: {} Gradient Descent Method is Not implemented!'.format(method)
def predict(self, X):
yPred = self.forward(X)
yPredOrigin = copy.deepcopy(yPred)
# last layer activation function, class prediction should be single
# and the output is between zero and one
if self.config[self.configKeyList[-1]][-1] == 'sigmoid':
yPred[yPred < 0.5] = 0
yPred[yPred >= 0.5] = 1
# multi-class problem
elif self.config[self.configKeyList[-1]][-1] == 'softmax':
raise '#Err: Prediction is not supported for softmax yet!'
# single/multi class problem, single node and it can be anything greater than 0
elif self.config[self.configKeyList[-1]][-1] == 'relu':
yPred = np.round(yPred)
yPred = np.clip(yPred, 0, self.numClass-1) # sanity check
return yPred, yPredOrigin
def error(self, y, yPred):
return self.lossFunc(y, yPred, type=self.lossFunction)
def accuracy(self, y, yPred):
return 100*np.sum(y==yPred)/y.shape[0]
def plotLoss(self, loss=None, ax=None):
if loss is None:
loss = self.loss
if ax is None:
plt.plot(loss)
plt.xlabel("Epochs")
plt.ylabel("Loss")
plt.title("Loss Per Epoch")
plt.show()
else:
ax.plot(loss)
ax.set_xlabel("Epochs")
ax.set_ylabel("Loss")
ax.set_title("Loss Per Epoch")
def crossValidationIndices(self, index, k=5):
# index is a list of indexes
cvList = []
for idx in range(k): # iterate over k-folds
interval = int(len(index)/k)
start = idx * interval
end = start + interval
testIndexes = list(range(start,end))
trainIndexes = list(range(0,start)) + list(range(end,len(index)))
cvList.append((trainIndexes, testIndexes))
return cvList
if crossValidationFlag:
if len(learningRate) == 1:
fig, ax = plt.subplots(3,len(learningRate),figsize=(8,15))
else:
fig, ax = plt.subplots(3,len(learningRate),figsize=(30,3*(len(learningRate)+2)))
else:
fig, ax = plt.subplots(1,1+len(learningRate),figsize=(30,1+len(learningRate)))
for ldx, lr in enumerate(learningRate):
nn = neuralNetwork(config=config, numClass=numClasses, numEpochs=numEpochs,
learningRate=lr, lossFunction=lossFunction)
# Initialize the network and the weights
nn.initWeights()
if crossValidationFlag:
indexes = list(range(X.shape[0]))
cvIndices = nn.crossValidationIndices(indexes, k=kFold)
accList = []
accTList = []
lossList = []
lossTList = []
for k in range(kFold):
nn.initWeights()
XTrain, yTrain = X[cvIndices[k][0],:], y[cvIndices[k][0],:]
XTest, yTest = X[cvIndices[k][1],:], y[cvIndices[k][1],:]
# Train the network
nn.fit(XTrain, yTrain, XTest, yTest, method=gdMethod, batchSize=batchSize,
numEpochs=numEpochs, learningRate=lr)
accList.append(nn.acc)
accTList.append(nn.accT)
lossList.append(nn.loss)
lossTList.append(nn.lossT)
acc = np.mean(accList, axis=0)
accT = np.mean(accTList, axis=0)
loss = np.mean(lossList, axis=0)
lossT = np.mean(lossTList, axis=0)
# print the network structure
nn.summary()
yPred, yPredOrig = nn.predict(X)
print('#INFO: Mean squared error is {}'.format(nn.error(y,yPred)))
colors = [colorBox[int(yPred[idx])] for idx in selectedIndices]
if len(learningRate) == 1:
ax[2].scatter(XR[:, 0], XR[:, 1], s=10, color=colors)
ax[2].set_xlabel("X1")
ax[2].set_ylabel("X2")
ax[2].set_title("Data, LR: {}".format(lr))
ax[0].plot(acc)
ax[0].plot(accT)
ax[0].legend(['Train','Test'])
ax[0].set_xlabel("Epochs")
ax[0].set_ylabel("Accuracy")
ax[0].set_title("Accuracy Per Epoch"+", LR: {}".format(lr))
ax[1].plot(loss)
ax[1].plot(lossT)
ax[1].legend(['Train','Test'])
ax[1].set_xlabel("Epochs")
ax[1].set_ylabel("Loss")
ax[1].set_title("Loss Per Epoch"+", LR: {}".format(lr))
else:
ax[2,ldx].scatter(XR[:, 0], XR[:, 1], s=10, color=colors)
ax[2,ldx].set_xlabel("X1")
ax[2,ldx].set_ylabel("X2")
ax[2,ldx].set_title("Data, LR: {}".format(lr))
ax[0,ldx].plot(acc)
ax[0,ldx].plot(accT)
ax[0,ldx].legend(['Train','Test'])
ax[0,ldx].set_xlabel("Epochs")
ax[0,ldx].set_ylabel("Accuracy")
ax[0,ldx].set_title("Accuracy Per Epoch"+", LR: {}".format(lr))
ax[1,ldx].plot(loss)
ax[1,ldx].plot(lossT)
ax[1,ldx].legend(['Train','Test'])
ax[1,ldx].set_xlabel("Epochs")
ax[1,ldx].set_ylabel("Loss")
ax[1,ldx].set_title("Loss Per Epoch"+", LR: {}".format(lr))
else:
# Perform a single run for visualization.
nn.fit(X, y, method=gdMethod, batchSize=batchSize, numEpochs=numEpochs,
learningRate=lr)
# print the network structure
nn.summary()
yPred, yPredOrig = nn.predict(X)
print('#INFO: Mean squared error is {}'.format(nn.error(y,yPred)))
colors = [colorBox[int(yPred[idx])] for idx in selectedIndices]
ax[ldx+1].scatter(XR[:, 0], XR[:, 1], s=10, color=colors)
ax[ldx+1].set_xlabel("X1")
ax[ldx+1].set_ylabel("X2")
ax[ldx+1].set_title("LR: {}".format(lr))
# Plot the mean squared error with respect to the nu
nn.plotLoss(ax=ax[0])
# train accuracy
acc = nn.accuracy(y.squeeze(-1),yPred.squeeze(-1))
print('#INFO: Train Accuracy is {}'.format(acc))
if not crossValidationFlag:
ax[0].legend(["LR: "+str(lr) for lr in learningRate])
# please feel free to save subplots for a better report
fig.savefig('results.png')
| [
"[email protected]"
] | |
3e849edd794f2c41729ac050618dd2fa4f7ccd80 | 31d43b73e8104cd8aef3d97e39666022f2946223 | /test.py | 5cc8efc532a05bd28380d86159fac3a91718c95a | [] | no_license | kgelber1/SSX-Python | 2ed6b5e6b7b3775779464a7f624a70155ec8f657 | 4f5cded3acec68e24206af90ef5611db9adb1ac3 | refs/heads/master | 2020-06-24T07:08:33.486962 | 2019-10-24T18:11:18 | 2019-10-24T18:11:18 | 198,890,544 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 614 | py | import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
fig, ax = plt.subplots(1,1)
x=np.linspace(np.pi,4*np.pi,100)
N=len(x)
ax.set_xlim(len(x))
ax.set_ylim(-1.5,1.5)
line, = ax.plot([],[],'o-')
def init():
line.set_ydata(np.ma.array(x[:], mask=True))
return line,
def animate(i, *args, **kwargs):
y=np.sin(x*i)
line.set_data(np.arange(N),y) # update the data
return line,
ani = animation.FuncAnimation(fig, animate, init_func=init,
frames=100, interval=10, blit= False, repeat = False)
ani.save('2osc.mp4', writer="ffmpeg")
fig.show()
| [
"[email protected]"
] | |
69e2f645ab6431a303076a1506514f479e530747 | 9fc5dd13e0595bd5796cd7ec109e3b7c290e2692 | /wikipedia-scape.py | a54f56c6c75b06d0d4069f56a187c27ded4d5b68 | [] | no_license | ronandoolan2/python-webscraping | 812d5190dfe5f24029b4737438c80e8d40716971 | 4dc83a331415c3e55f06b1a8d0de47710db5ccd0 | refs/heads/master | 2021-01-19T00:54:22.801053 | 2017-04-16T09:10:47 | 2017-04-16T09:10:47 | 87,218,764 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 756 | py | from bs4 import BeautifulSoup
import urllib2
import re
wiki = "http://en.wikipedia.org/wiki/Mad_Max:_Fury_Road"
header = {'User-Agent': 'Mozilla/5.0'} #Needed to prevent 403 error on Wikipedia
req = urllib2.Request(wiki,headers=header)
page = urllib2.urlopen(req)
soup = BeautifulSoup(page)
rnd = ""
pick = ""
NFL = ""
player = ""
pos = ""
college = ""
conf = ""
notes = ""
table = soup.find("table", { "class" : "wikitable sortable" })
print table
#output = open('output.csv','w')
for row in table.findAll("tr"):
cells = row.findAll("href")
for cell in cells:
# search-term = re.search(r'director',cell)
# if search-term:
# print search-term
#print "---"
print cell.text
print cells.text
#print "---"
| [
"[email protected]"
] | |
86d86d506dc4ea99908834b2219f9ecceb083da6 | ad2219bca2bb88604e6dc4b3625b5872f1ae4eff | /Notepad/settings.py | 5605a2d47eb6aae23e97682217dae909223b29f3 | [] | no_license | Abdul-Afeez/notepad | 20c6d25f73812740c6709e52f3a638ac9c8d84fe | 0acfd2961bb29a11e2f97cfeb178e14e18d5a5e3 | refs/heads/master | 2023-08-16T06:08:15.387908 | 2021-09-23T06:20:30 | 2021-09-23T06:20:30 | 408,530,544 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,095 | py | """
Django settings for Notepad project.
Generated by 'django-admin startproject' using Django 2.2.24.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'i(4*c%rg4$9ce*&g-fb&7(!7^aef$%=3^x3hi@(-sfkwep57f+'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework_simplejwt.authentication.JWTAuthentication',
)
}
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'corsheaders',
'django_mysql',
'rest_framework',
'rest_framework_simplejwt',
'Notepad.owners',
'Notepad.notes',
]
MIDDLEWARE = [
'corsheaders.middleware.CorsMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'Notepad.urls'
CORS_ORIGIN_WHITELIST = (
'http://0.0.0.0:3000',
'http://localhost:3000',
'http://localhost:8000',
'http://18.118.112.37',
'http://18.118.112.37:8000'
)
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'Notepad.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'OPTIONS': {
'read_default_file': '../mysql/my.cnf',
'charset': 'utf8mb4',
},
# Tell Django to build the test database with the 'utf8mb4' character set
'TEST': {
'CHARSET': 'utf8mb4',
'COLLATION': 'utf8mb4_unicode_ci',
},
'NAME': 'note',
'USER': 'root',
'PASSWORD': '123456',
'HOST': 'note_db',
'PORT': 3306,
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
AUTH_USER_MODEL = 'owners.Owner'
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
SESSION_TIMEOUT = 60
| [
"[email protected]"
] | |
3ce92a8f6ac6d21f309094192a22f27c0ba533f5 | 68dc6c82d1c8bd81b8aca375d71d18cc577aa350 | /TextRay/hybridqa/preprocessing/webq/trainDataGen.py | aed9db58d879509a1a5801b283ceadb587bde22c | [] | no_license | umich-dbgroup/TextRay-Release | 3fdbfefda9d6d94b70e810ceb2e0fa95a55949e0 | e25087b594db106382f5dbc9cd2adfcc39cd286f | refs/heads/master | 2022-01-25T19:03:44.577009 | 2019-08-16T13:00:00 | 2022-01-18T02:45:22 | 203,634,858 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 20,489 | py | import os
import json
from kbEndPoint.utils.sparql import sparqlUtils
from preprocessing import stringUtils
from preprocessing import metricUtils
import numpy as np
import nltk
import codecs
import pandas as pd
PREFIX = "/Users/funke/webq"
#
# RAW_QUESTION_PATH = os.path.join(PREFIX, "data/webquestions.examples.train.json")
# QUESTION_PATH = os.path.join(PREFIX, "data/train.json")
# SMART_TOPIC_PATH = os.path.join(PREFIX, "SMART/webquestions.examples.train.e2e.top10.filter.tsv")
# ALL_TOPIC_PATH = os.path.join(PREFIX, "topics/train.csv")
# CANDS_DIR = os.path.join(PREFIX, "cands-train")
# CANDS_WTIH_CONSTRAINTS_DIR = os.path.join(PREFIX, "cands_with_constraints-train")
# CANDS_WTIH_CONSTRAINTS_DIR_DEDUP = os.path.join(PREFIX, "cands_with_constraints-train")
# CANDS_WTIH_CONSTRAINTS_RESCALED_DIR = os.path.join(PREFIX, "cands_with_constraints_rescaled-train")
RAW_QUESTION_PATH = os.path.join(PREFIX, "data/webquestions.examples.test.json")
QUESTION_PATH = os.path.join(PREFIX, "data/test.json")
CANDS_DIR = os.path.join(PREFIX, "cands-test")
CANDS_WTIH_CONSTRAINTS_DIR = os.path.join(PREFIX, "cands_with_constraints-test")
CANDS_WTIH_CONSTRAINTS_DIR_DEDUP = os.path.join(PREFIX, "cands_with_constraints-test")
SMART_TOPIC_PATH = os.path.join(PREFIX, "SMART/webquestions.examples.test.e2e.top10.filter.tsv")
ALL_TOPIC_PATH = os.path.join(PREFIX, "topics/test.csv")
CANDS_WTIH_CONSTRAINTS_RESCALED_DIR = os.path.join(PREFIX, "cands_with_constraints_rescaled-test")
ANS_CONSTRAINT_RELATIONS = ["people.person.gender", "common.topic.notable_types", "common.topic.notable_for"]
class Constraint(object):
def __init__(self, mid, name, relation, is_ans_constraint, surface_form, st_pos, length):
self.mid = mid
self.name =name
self.relation = relation
self.is_ans_constraint = is_ans_constraint
self.surface_form = surface_form
self.st_pos = st_pos
self.length = length
def __str__(self):
return str(self.mid) + " " + str(self.name) + " " + str(self.relation) + " " + str(self.is_ans_constraint)
class Smart_Entity(object):
def __init__(self, line):
split_line = line.strip().split('\t')
self.q_id = split_line[0]
self.surface_form = split_line[1]
self.st_pos = int(split_line[2])
self.length = int(split_line[3])
mid = split_line[4]
if mid.startswith('/'):
mid = mid[1:].replace('/', '.')
self.mid = mid
self.e_name = split_line[5]
self.score = float(split_line[6])
def __str__(self):
return str(self.surface_form) + " (" + str(self.mid) + "," + str(self.e_name) + ")"
class WebQuestionsEndPoint(object):
def __init__(self):
self.sparql = sparqlUtils()
self.topic_entity_dict = {}
self.cache_maxsize = 10000
self.cvt_constraints_cache = {}
self.cvt_constraints_cache_elements_fifo = []
self.topic_entity_dict = {}
self.type_dict = {}
self.type_name_dict = {}
self.all_path_entity_cache = {}
self.entity_name_cache={}
def write_top_entities(self, entity_linking_path, ques_src, dest_topic_path):
names = ['ques_id', 'mention', 'begin_index', 'length', 'mid', 'name', 'score']
df = pd.read_csv(entity_linking_path, delimiter='\t', names=names)
df = df.dropna()
df['mid'] = df['mid'].apply(lambda mid: mid[1:].replace('/', '.'))
df = df.sort_values(['ques_id', 'score'], ascending=[True, False])
df = df.drop_duplicates(subset=['ques_id', 'mid'])
# df = df.groupby('ques_id').reset_index(drop=True)
df.to_csv(dest_topic_path, index=False, encoding='utf-8')
def get_cands(self, ques_src, topic_src, dest_dir):
if not os.path.exists(dest_dir):
os.mkdir(dest_dir)
topics_df = pd.read_csv(topic_src)
file_json = json.load(open(ques_src, 'r'))
questions = file_json
for question in questions:
questionId = question["QuestionId"]
# if questionId != "WebQTrn-158":
# continue
print questionId
dest_path = os.path.join(dest_dir, questionId + ".json")
if os.path.exists(dest_path):
continue
topic_entities = topics_df[topics_df["ques_id"] == questionId].to_dict(orient='records')
candidates = {}
for e in topic_entities:
topic_entity = e['mid']
if topic_entity in self.all_path_entity_cache:
cands = self.all_path_entity_cache[topic_entity]
print ("found")
else:
# print(topic_entity)
cands = []
one_step = self.sparql.one_hop_expansion(topic_entity)
for cand in one_step:
relations = [cand[0]]
cands.append({"relations": relations, "counts": cand[1],
"entities": self.sparql.eval_one_hop_expansion(topic_entity, rel1=cand[0])})
two_step = self.sparql.two_hop_expansion(topic_entity)
for cand in two_step:
relations = [cand[0], cand[1]]
cands.append({"relations": relations, "counts": cand[2],
"entities": self.sparql.eval_two_hop_expansion(topic_entity, rel1=cand[0], rel2=cand[1])})
candidates[topic_entity] = cands
self.all_path_entity_cache[topic_entity] = cands
with open(dest_path, 'w+') as fp:
json.dump(candidates, fp, indent=4)
'''Add core constraints'''
def generate_query_graph_cands(self, ques_src, topic_src, core_chain_path, dest_dir):
topics_df = pd.read_csv(topic_src)
questions = json.load(open(ques_src, 'r'))
ans_dict = {}
ques_str_dict = {}
for question in questions:
qid = question["QuestionId"]
ques_str_dict[qid] = question["ProcessedQuestion"]
ans_dict[qid] = question['Answers']
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
files = [f for f in os.listdir(core_chain_path) if os.path.isfile(os.path.join(core_chain_path, f))]
for f in files:
if ".DS_Store" in f:
continue
q_id = f.replace(".json", "")
ques_string = ques_str_dict[q_id]
if os.path.exists(os.path.join(dest_dir, q_id + ".json")):
print("exists " + str(q_id))
continue
ques_query_graph_cands = {}
try:
file_json = json.load(open(os.path.join(core_chain_path, f), 'r'))
except:
print(f)
continue
links_df = topics_df[topics_df["ques_id"] == q_id]
links = links_df.to_dict(orient='records')
print("Question " + q_id)
for mid in file_json.keys():
topic_entity_names = links_df[links_df['mid'] == mid]['mid'].values
if len(topic_entity_names) == 0:
print('should have a topic entity name in topics path {}'.format(mid))
continue
print(mid)
topic_entity_name = topic_entity_names[0]
answers = ans_dict[q_id]
paths = file_json[mid]
entity_query_graph_cands = []
for path in paths:
main_relation = path["relations"]
print main_relation
constraints = self.__get_constraint_candidates__(ques_string, mid, topic_entity_name, main_relation, links)
cands = self.__get_query_graph_cands__(mid, main_relation, constraints, answers)
entity_query_graph_cands.extend(cands)
ques_query_graph_cands[mid] = entity_query_graph_cands
print("topic {} candidates size {}".format(mid, len(entity_query_graph_cands)))
with open(os.path.join(dest_dir, q_id + ".json"), 'w+') as fp:
json.dump(ques_query_graph_cands, fp, indent=4)
def _add_cvt_to_cache(self, cvt_key, cvt_paths):
self.cvt_constraints_cache_elements_fifo.append(cvt_key)
self.cvt_constraints_cache[cvt_key] = cvt_paths
if len(self.cvt_constraints_cache_elements_fifo) > self.cache_maxsize:
to_delete = self.cvt_constraints_cache_elements_fifo.pop(0)
del self.cvt_constraints_cache[to_delete]
def __get_constraint_candidates__(self, ques_str, topic_entity, topic_entity_name, relation_path, links):
candidates = []
for link in links:
if metricUtils.jaccard_ch(topic_entity_name.lower(), link["mention"].lower()) > 0.4: continue
if link["mid"] == topic_entity: continue
if len(relation_path) == 2:
rel_key = str(relation_path)
if rel_key in self.cvt_constraints_cache:
cvt_constraints = self.cvt_constraints_cache[rel_key]
else:
cvt_constraints = self.sparql.get_all_cvt_constraints(topic_entity, relation_path, False, link["mid"])
self._add_cvt_to_cache(rel_key, cvt_constraints)
for rel in cvt_constraints:
candidates.append(Constraint(link["mid"], link["name"], rel, False, link["mention"], link["begin_index"], link["length"]))
relation_id = str(relation_path)
if relation_id in self.type_dict:
type_mids_rels = self.type_dict[relation_id]
else:
type_mids_rels = self.sparql.get_ans_constraint_candidates(topic_entity, relation_path, ANS_CONSTRAINT_RELATIONS, False)
self.type_dict[relation_id] = type_mids_rels
for mid in type_mids_rels.keys():
if mid in self.type_name_dict:
names = self.type_name_dict[mid]
else:
names = self.sparql.get_names(mid)
self.type_name_dict[mid] = names
if names is None or len(names) == 0:
continue
match = stringUtils.match_names_to_mention(ques_str, names.split("/"))
if match is None:
continue
candidates.append(Constraint(mid, names, type_mids_rels[mid], True, match[0], match[1], match[1] + match[2]))
return candidates
def __get_query_graph_cands__(self, topic_entity, main_relation, constraints, ans_entities):
constraint_combinations = self.__get_constraint_combinations__(constraints)
answer_entities = set(ans_entities)
cands = []
for combination in constraint_combinations:
entity_names = set(self.sparql.eval_all_constraints_named(topic_entity, main_relation, combination, False))
# entity_names = set()
# for e in entities:
# if e in self.entity_name_cache:
# entity_names.add(self.entity_name_cache[e])
# else:
# entity_name = self.sparql.get_names(e)
# self.entity_name_cache[e] = entity_name
# entity_names.add(entity_name)
# common = entities.intersection(answer_entities)
# reward = float(len(common)) / max(1.0, (len(entities) + len(answer_entities) - len(common)))
if len(answer_entities) == 0:
reward = 0,0,0
else:
reward = metricUtils.compute_f1(answer_entities, entity_names)
cand = {"relations": main_relation,
"entities": list(entity_names),
"constraints": [ob.__dict__ for ob in combination],
"reward": reward}
cands.append(cand)
return cands
def __get_constraint_combinations__(self, constraint_candidates):
if len(constraint_candidates) == 0:
return [[]]
elif len(constraint_candidates) == 1:
return [[], [constraint_candidates[0]]]
conflicts = self.__get_conflicts__(constraint_candidates)
constraint_combinations = self.__dfs_search_combinations__(conflicts)
cand_lists = []
cand_lists.append([])
for constraint_combination in constraint_combinations:
cand_list = [constraint_candidates[i] for i in constraint_combination]
cand_lists.append(cand_list)
return cand_lists
def __get_conflicts__(self, constraint_candidates):
cand_size = len(constraint_candidates)
conflict_matrix = []
# conflict matrix (adjacent format)
for i in range(cand_size):
vec = [i]
for j in range(i + 1, cand_size):
cand_1 = constraint_candidates[i]
cand_2 = constraint_candidates[j]
conflict = cand_1.st_pos <= cand_2.st_pos + cand_2.length \
and cand_2.st_pos <= cand_1.st_pos + cand_1.length
if conflict: vec.append(j)
conflict_matrix.append(vec)
return conflict_matrix
def __dfs_search_combinations__(self, mat):
ret_comb_list = []
n = len(mat)
status = np.ones((n,), dtype='int32')
stack = []
ptr = -1
while True:
ptr = self.__nextPick__(ptr, status)
if ptr == -1: # backtrace: restore status array
if len(stack) == 0: break # indicating the end of searching
pop_idx = stack.pop()
for item in mat[pop_idx]: status[item] += 1
ptr = pop_idx
else:
stack.append(ptr)
for item in mat[ptr]: status[item] -= 1
comb = list(stack)
ret_comb_list.append(comb)
return ret_comb_list
def __nextPick__(self, ptr, status):
n = len(status)
for new_ptr in range(ptr + 1, n):
if status[new_ptr] == 1:
return new_ptr
return -1
def get_lookup_key(self, topic, rel_data):
if "constraints" in rel_data:
look_up_key = topic + "_" + str(rel_data["relations"]) + "_" + str(rel_data["constraints"])
else:
look_up_key = topic + "_" + str(rel_data["relations"])
return look_up_key
def deduplicate(self, input_path, src_dir, dest_dir):
questions = json.load(codecs.open(input_path, 'r', encoding='utf-8'))
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
for q in questions:
ques_id = q["QuestionId"]
ques_path = os.path.join(src_dir, ques_id + ".json")
if not os.path.exists(ques_path):
continue
print(ques_id)
main_entity_paths = json.load(codecs.open(ques_path, 'r', encoding='utf-8'))
look_up_keys = set()
main_entity_paths_dedup = {}
for topic in main_entity_paths:
paths = []
for path in main_entity_paths[topic]:
look_up_key = self.get_lookup_key(topic, path)
if look_up_key in look_up_keys:
continue
look_up_keys.add(look_up_key)
paths.append(path)
print("{} deduplicated to {}".format(len(main_entity_paths[topic]), len(paths)))
if len(paths) > 0:
main_entity_paths_dedup[topic] = paths
with open(os.path.join(dest_dir, ques_id + ".json"), 'w+') as fp:
json.dump(main_entity_paths_dedup, fp, indent=4)
def add_ids(self, src, dest):
questions = json.load(codecs.open(src, 'r', encoding='utf-8'))
to_write_json = []
for i, ques in enumerate(questions):
ques_id = "WebQTest-{}".format(i)
ques["QuestionId"] = ques_id
ques["ProcessedQuestion"] = ques["utterance"]
answer_set = set([])
target_value = ques['targetValue']
target_value = target_value[6: -1]
target_value = target_value.replace(') (', ')###(')
spt = target_value.split('###')
for item in spt:
ans_str = item[13: -1]
if ans_str.startswith('"') and ans_str.endswith('"'):
ans_str = ans_str[1: -1]
if isinstance(ans_str, unicode):
ans_str = ans_str.encode('utf-8')
answer_set.add(ans_str)
ques["Answers"] = list(answer_set)
to_write_json.append(ques)
with open(dest, 'w+') as fp:
json.dump(to_write_json, fp, indent=4)
def reward_with_max_f1(self, main_entity_paths):
max_reward = 0, 0, 0
for topic in main_entity_paths:
for path in main_entity_paths[topic]:
if path["reward"][2] > max_reward[2]:
max_reward = path["reward"]
return max_reward
def rescale_rewards_max(self, src_dir, dest_dir):
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
files = [f for f in os.listdir(src_dir)]
for f in files:
if ".DS_Store" in f:
continue
ques_id = f.replace(".json", "")
#print(ques_id)
ques_path = os.path.join(src_dir, f)
main_entity_paths = json.load(codecs.open(ques_path, 'r', encoding='utf-8'))
max_ques_reward = self.reward_with_max_f1(main_entity_paths)
for topic in main_entity_paths:
for path in main_entity_paths[topic]:
path["rescaled_reward"] = [path["reward"][0], path["reward"][1], path["reward"][2]]
if max_ques_reward[2] > 0:
reward = path["rescaled_reward"]
reward[2] = float(reward[2]) * 1.0 / float(max_ques_reward[2])
if max_ques_reward[0] > 0:
reward[0] = min(1.0, float(reward[0]) * 1.0 / float(
max_ques_reward[0])) # hacky way of clipping
if max_ques_reward[1] > 0:
reward[1] = min(1.0, float(reward[1]) * 1.0 / float(
max_ques_reward[1])) # hacky way of clipping
with open(os.path.join(dest_dir, ques_id + ".json"), 'w+') as fp:
json.dump(main_entity_paths, fp, indent=4)
if __name__ == '__main__':
endPoint = WebQuestionsEndPoint()
# endPoint.add_ids(RAW_QUESTION_PATH, QUESTION_PATH)
# endPoint.write_top_entities(SMART_TOPIC_PATH, QUESTION_PATH, ALL_TOPIC_PATH)
# endPoint.get_cands(QUESTION_PATH, ALL_TOPIC_PATH, CANDS_DIR)
# endPoint.generate_query_graph_cands(QUESTION_PATH, ALL_TOPIC_PATH, CANDS_DIR, CANDS_WTIH_CONSTRAINTS_DIR)
# endPoint.deduplicate(QUESTION_PATH, CANDS_WTIH_CONSTRAINTS_DIR, CANDS_WTIH_CONSTRAINTS_DIR_DEDUP)
endPoint.rescale_rewards_max(CANDS_WTIH_CONSTRAINTS_DIR_DEDUP, CANDS_WTIH_CONSTRAINTS_RESCALED_DIR)
| [
"[email protected]"
] | |
b3b23e56815e22c59025e95c60b6cbda2ae81e07 | 9fbe90eab4cb25022e7c93776da3a5733656a09a | /examples/chat/status.py | 9f517a087999e1a586d64cffee8075515a5e83ea | [
"MIT"
] | permissive | Nathanator/networkzero | 453e218d6e0b8080158cb968f4acc5e0cb0fb65c | e6bf437f424660c32cf1ef81f83d9eee925f44e7 | refs/heads/master | 2021-01-15T13:14:53.101742 | 2016-04-07T20:32:28 | 2016-04-07T20:32:28 | 55,724,894 | 0 | 0 | null | 2016-04-07T20:12:18 | 2016-04-07T20:12:17 | null | UTF-8 | Python | false | false | 467 | py | import networkzero as nw0
updates = nw0.discover("chat-updates")
while True:
action, message = nw0.wait_for_notification(updates)
print(action, message)
if action == "JOIN":
print("%s has joined" % message)
elif action == "LEAVE":
print("%s has left" % message)
elif action == "SPEAK":
[person, words] = message
print("%s says: %s" % (person, words))
else:
print("!! Unexpected message: %s" % message)
| [
"[email protected]"
] | |
ebd3c1c21f84ae08aca5e069c923ae54ae4c4266 | 3c74adb0203f00af331e114838ef4190af455d81 | /mysite/blog/models.py | 96c1614bff94e921e61de59a4a36c592af4f0d92 | [] | no_license | SARTHAKKRSHARMA/Blog-Application | 0d0e2f4ca0069c32d2950b0fd2915f4665b84343 | 1250ab5f1f5bb136d837649ee1693651fe2129b7 | refs/heads/master | 2022-04-19T21:00:53.293587 | 2020-04-21T05:57:38 | 2020-04-21T05:57:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,561 | py | from django.db import models
from django.http import HttpResponseRedirect
from django.urls import reverse
from django.contrib.auth.admin import User
from django.utils import timezone
# Create your models here.
class Blog_Detail(models.Model):
author = models.ForeignKey(to=User,on_delete=models.CASCADE,related_name='author')
title = models.CharField(max_length=200)
body = models.TextField()
creation_date = models.DateTimeField(default=timezone.now())
pub_date = models.DateTimeField(blank=True,null=True)
likes = models.IntegerField(default=0)
dislikes = models.IntegerField(default=0)
like_user_reaction = models.ManyToManyField(to=User,blank=True,related_name='like_user')
dislike_user_reaction = models.ManyToManyField(to=User,blank=True,related_name='dislike_user')
def __str__(self):
return self.title
class Comments(models.Model):
author = models.CharField(max_length=250,blank=True)
blog = models.ForeignKey(Blog_Detail,on_delete=models.CASCADE,blank=True,null=True,related_name='comments')
body = models.TextField(blank=True)
creation_date = models.DateTimeField(default=timezone.now(),blank=True)
likes = models.IntegerField(default = 0,blank=True)
dislikes = models.IntegerField(default=0,blank=True)
like_user_reaction = models.ManyToManyField(to=User,blank=True,related_name='like_comment_user')
dislike_user_reaction = models.ManyToManyField(to=User,blank=True,related_name='dislike_comment_user')
def __str__(self):
return self.author
| [
"[email protected]"
] | |
c20346e9a05992ff1130b836fc537db55bc7d17f | e9b3404197c6ee6260ba7e377294805af5b74bd0 | /sphinx_rstbuilder/builders/rst.py | e9837a9ddf1069a17ed3b4e5615103d6a205a667 | [] | no_license | etarasenko/sphinx-rstbuilder | eb9d4083e9cc797fa7507899a7fc8518444015ce | 83a36646c52537f4566264588edef24727251657 | refs/heads/master | 2020-05-17T03:12:10.089707 | 2015-08-05T04:18:50 | 2015-08-05T04:18:50 | 40,223,777 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 371 | py | # -*- coding: utf-8 -*-
from sphinx.builders.text import TextBuilder
from ..writers.rst import RstWriter
class RstBuilder(TextBuilder):
name = 'rst'
format = 'rst'
out_suffix = '.rst'
def get_target_uri(self, docname, typ=None):
return docname + self.out_suffix
def prepare_writing(self, docnames):
self.writer = RstWriter(self)
| [
"[email protected]"
] | |
6ac793e3b8df59989fc5a148e4385b6fe3b6ed70 | dbab24ee5055dad1a57bb212ae30da994022eab5 | /Python/Chapter 6 - tehtävä 3.py | 4703757b6f12df00e86114119c5ffd8b7220709e | [] | no_license | MikBom/mikbom-github.io | e8ab24080a6c6383f4ad973a817e10ab84375e4f | 3dc7312798473a7620529d24fa771d5b09bafbbc | refs/heads/main | 2023-08-14T07:04:01.427822 | 2021-09-21T16:08:32 | 2021-09-21T16:08:32 | 301,965,605 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 193 | py | vari = input("Valitse kohde (1-3):")
if vari == "1":
print("Haukion Kala Oy")
elif vari == "2":
print("Metallipaja VasaraAika")
elif vari == "3":
print("Balin palapelitehdas") | [
"[email protected]"
] | |
1f24bf6dac22f50aece5a8dd643a221f8618bfc3 | 29d62d5523c703288d194b8a2cf15efb8423f166 | /preprocess_dataset.py | b60128ef686b4fc795595ba89976d40b64300b89 | [] | no_license | Jonlenes/clusters-news-headlines | 92c623a5a214ea21d5e66dc2ff8a984e268374c3 | 39d54337ef28476a82bd44d39958534a6f4e7368 | refs/heads/master | 2021-10-19T20:41:54.808979 | 2019-02-23T11:36:32 | 2019-02-23T11:36:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,468 | py | import pandas
import string
from nltk.stem.snowball import SnowballStemmer
from load_dataset import path_dataset
def remove_pnt_and_stemming(text_arr):
""" Remove pontuação e executa o o stemming de todo o dataset"""
stemmer = SnowballStemmer("english", ignore_stopwords=True)
for i in range(0, text_arr.shape[0]):
x[i] = x[i].translate(str.maketrans('', '', string.punctuation)) # removendo todas as pontuaçoes
words = x[i].split()
x[i] = ""
for word in words:
x[i] += stemmer.stem(word) + " "
x[i] = x[i].strip()
x[i] = re.sub(r'[^A-Za-z]+', ' ', x[i])
return text_final
def split_dataset_by_year(dataset, save_dataset=True):
""" Split dataset por ano - retorna/salva 1 dataset para cada ano no arquivo ogirinal """
key = str(dataset[0][0])[:4]
datasets = []
current_dataset = []
for data in dataset:
if key == str(data[0])[:4]:
current_dataset.append(data[1])
else:
datasets.append(current_dataset.copy())
key = str(data[0])[:4]
current_dataset.clear()
current_dataset.append(data[1])
datasets.append(current_dataset.copy())
if save_dataset:
for i in range(0, len(datasets)):
pandas.DataFrame(datasets[i]).to_csv("dataset_" + str(i + 1) + ".csv", index=False)
return datasets
if __name__ == '__main__':
split_dataset_by_year(path_dataset) | [
"[email protected]"
] | |
ec50df0aa2a320ce0f88bb7eea72f3ddae60e3a7 | 476768e5629340efcbc11fd175c7db12e09c2d52 | /python/006.py | be26addbbddf5f50f6e7fff97a4484130aab1bf1 | [] | no_license | zero1hac/projecteuler | fb8ded5de8d4126865c11081e4b407e0ae35e304 | 7dc00e89c9870d5c7d9c6364f1e80e19d69655e5 | refs/heads/master | 2020-04-23T20:10:51.375485 | 2019-03-25T08:38:59 | 2019-03-25T08:38:59 | 171,430,763 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 155 | py | if __name__ == "__main__":
n = 100
sum_of_squares = (n*(n+1)*(2*n+1))/6
square_of_sum = (n*(n+1)/2)**2
print square_of_sum - sum_of_squares | [
"[email protected]"
] | |
ce9d1590d7469131ce9288074f0400fc85a37fce | 48559a749d1663242c31ba6a996945932049b7d9 | /bert_attack/train_100.py | 3431efb0d61b861ad4a385b819d71cdab2bd2a24 | [] | no_license | EthanCDD/Adversarial-Attack_Genetic-attack | 148893af12e2081094c3f5a3801d243b0d344acd | 81191a52a3bade73d114d8837637c74edc0a5c51 | refs/heads/master | 2022-12-24T13:18:46.681250 | 2020-10-09T01:38:15 | 2020-10-09T01:38:15 | 273,260,484 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,908 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Jun 24 17:01:50 2020
@author: 13758
"""
import os
import random
import numpy as np
import matplotlib.pyplot as plt
import torch
from torch import nn
#import nltk
#nltk.download('stopwords')
#stopwords = nltk.corpus.stopwords.words('english')
from torch.nn.utils.rnn import pad_packed_sequence, pack_padded_sequence
from collections import OrderedDict, defaultdict
from torch.utils.data import Dataset, DataLoader, Subset
from keras.preprocessing.sequence import pad_sequences
from data_sampler import data_infor
from pre_processing import pre_processing
from transformers import BertModel, BertTokenizer
from model_lstm_bert import bert_lstm
import argparse
SEED = 1234
random.seed(SEED)
np.random.seed(SEED)
def str2bool(string):
if isinstance(string, bool):
return string
if string.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif string.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected')
parser = argparse.ArgumentParser(
description = 'Sentiment analysis training with BERT&LSTM'
)
parser.add_argument('--freeze',
help = 'Freeze BERT or not',
type = str2bool,
default = True)
parser.add_argument('--nlayer',
help = 'The number of LSTM layers',
type = int,
default = 2)
parser.add_argument('--data',
help = 'The applied dataset',
default = 'IMDB')
parser.add_argument('--kept_prob_dropout',
help = 'The probability to keep params',
type = float,
default = 1)
parser.add_argument('--epoches',
help = 'The number of epoches',
type = int,
default = 100)
parser.add_argument('--learning_rate',
help = 'learning rate',
type = float,
default = 0.0005)
parser.add_argument('--bidirection',
help = 'LSTM bidirection',
type = str2bool,
default = False)
parser.add_argument('--tokenizer',
help = 'Pre-processing tokenizer',
default = 'bert')
parser.add_argument('--save_path',
help = 'Save path',
default = '/lustre/scratch/scratch/ucabdc3/bert_lstm_attack')
def data_loading(train_text, test_text, train_target, test_target):
dataset = data_infor(train_text, train_target)
len_train = len(dataset)
indx = list(range(len_train))
all_train_data = Subset(dataset, indx)
train_indx = random.sample(indx, int(len_train*0.8))
vali_indx = [i for i in indx if i not in train_indx]
train_data = Subset(dataset, train_indx)
vali_data = Subset(dataset, vali_indx)
dataset = data_infor(test_text, test_target)
len_test = len(dataset)
indx = list(range(len_test))
test_data = Subset(dataset, indx)
return all_train_data, train_data, vali_data, test_data
def imdb_run():
args = parser.parse_args()
data = args.data
freeze = args.freeze
nlayer = args.nlayer
kept_prob = args.kept_prob_dropout
bert_lstm_save_path=args.save_path
learning_rate = args.learning_rate
epoches = args.epoches
tokenizer_selection = args.tokenizer
if data.lower() == 'imdb':
data_path = 'aclImdb'
bert = BertModel.from_pretrained('bert-base-uncased')
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
max_len = 100
# max_vocab = bert.config.to_dict()['vocab_size']-3
# data_processed = pre_processing(data_path, max_vocab)
# train_sequences, test_sequences = data_processed.seqs_num()
# train_text_init, test_text_init = data_processed.numerical(train_sequences, test_sequences, max_len = max_len)
max_vocab = 50000
data_processed = pre_processing(data_path, max_vocab, max_len)
if tokenizer_selection.lower() != 'bert':
data_processed.processing()
train_sequences, test_sequences = data_processed.bert_indx(tokenizer)
print('Self preprocessing')
else:
data_processed.bert_tokenize(tokenizer)
train_sequences, test_sequences = data_processed.bert_indx(tokenizer)
print('BERT tokenizer')
train_text_init, test_text_init = data_processed.numerical(tokenizer, train_sequences, test_sequences)
train_text = pad_sequences(train_text_init, maxlen = max_len, padding = 'post')
test_text = pad_sequences(test_text_init, maxlen = max_len, padding = 'post')
train_target = data_processed.all_train_labels
test_target = data_processed.all_test_labels
all_train_data, train_data, vali_data, test_data = data_loading(train_text, test_text, train_target, test_target)
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
print(device)
BatchSize = 128#int(length_train/200)
all_train_loader = DataLoader(all_train_data, batch_size = BatchSize, shuffle = True)
train_loader = DataLoader(train_data, batch_size = BatchSize, shuffle = True)
vali_loader = DataLoader(vali_data, batch_size = BatchSize, shuffle = True)
test_loader = DataLoader(test_data, batch_size = BatchSize, shuffle = True)
bidirection = args.bidirection
model = bert_lstm(bert, 2, bidirection, nlayer, 128, freeze, kept_prob)
model.to(device)
criterion = nn.CrossEntropyLoss()
optimiser = torch.optim.AdamW([cont for cont in model.parameters() if cont.requires_grad], lr = learning_rate)
bert_lstm_save_path = os.path.join(bert_lstm_save_path, 'best_bert_'+str(kept_prob)+'_'+str(learning_rate)+'_'+str(tokenizer_selection)+'_'+str(max_len))
best_epoch = 0
best_acc = 0
patience = 20
for epoch in range(epoches):
test_pred = torch.tensor([])
test_targets = torch.tensor([])
train_pred = torch.tensor([])
train_targets = torch.tensor([])
test_loss = []
train_loss = []
model.train()
for batch_index, (seqs, length, target) in enumerate(all_train_loader):
seqs = seqs.type(torch.LongTensor)
args = torch.argsort(length, descending = True)
length = length[args]
seqs = seqs[args][:, 0:length[0]]
target = target[args].type(torch.LongTensor)
optimiser.zero_grad()
seqs, target, length = seqs.to(device), target.to(device), length.to(device)
output, pred_out = model(seqs, length, True)
loss = criterion(output, target)
loss.backward()
optimiser.step()
train_pred = torch.cat((train_pred, pred_out.cpu()), dim = 0)
train_targets = torch.cat((train_targets, target.type(torch.float).cpu()))
train_loss.append(loss)
if batch_index % 100 == 0:
print('Train Batch:{}, Train Loss:{:.4f}.'.format(batch_index, loss.item()))
train_accuracy = model.evaluate_accuracy(train_pred.detach().numpy(), train_targets.detach().numpy())
print('Epoch:{}, Train Accuracy:{:.4f}, Train Mean loss:{:.4f}.'.format(epoch, train_accuracy, sum(train_loss)/len(train_loss)))
print("\n")
model.eval()
with torch.no_grad():
for batch_index, (seqs, length, target) in enumerate(test_loader):
seqs = seqs.type(torch.LongTensor)
len_order = torch.argsort(length, descending = True)
length = length[len_order]
seqs = seqs[len_order]
target = target[len_order].type(torch.LongTensor)
seqs, target, length = seqs.to(device), target.to(device), length.to(device)
output, pred_out = model(seqs, length, False)
test_pred = torch.cat((test_pred, pred_out.type(torch.float).cpu()), dim = 0)
test_targets = torch.cat((test_targets, target.type(torch.float).cpu()))
loss = criterion(output, target)
test_loss.append(loss.item())
if batch_index % 100 == 0:
print('Vali Batch:{}, Vali Loss:{:.4f}.'.format(batch_index, loss.item()))
accuracy = model.evaluate_accuracy(test_pred.numpy(), test_targets.numpy())
print('Epoch:{}, Vali Accuracy:{:.4f}, Vali Mean loss:{:.4f}.'.format(epoch, accuracy, sum(test_loss)/len(test_loss)))
# best save
if accuracy > best_acc:
best_acc = accuracy
best_epoch = epoch
torch.save(model.state_dict(), bert_lstm_save_path)
# early stop
if epoch-best_epoch >=patience:
print('Early stopping')
print('Best epoch: {}, Best accuracy: {:.4f}.'.format(best_epoch, best_acc))
print('\n\n')
break
model.load_state_dict(torch.load(bert_lstm_save_path))
model.eval()
with torch.no_grad():
for batch_index, (seqs, length, target) in enumerate(test_loader):
seqs = seqs.type(torch.LongTensor)
len_order = torch.argsort(length, descending = True)
length = length[len_order]
seqs = seqs[len_order]
target = target[len_order].type(torch.LongTensor)
seqs, target, length = seqs.to(device), target.to(device), length.to(device)
output, pred_out = model(seqs, length, False)
test_pred = torch.cat((test_pred, pred_out.type(torch.float).cpu()), dim = 0)
test_targets = torch.cat((test_targets, target.type(torch.float).cpu()))
loss = criterion(output, target)
test_loss.append(loss.item())
accuracy = model.evaluate_accuracy(test_pred.numpy(), test_targets.numpy())
print('Test Accuracy:{:.4f}, Test Mean loss:{:.4f}.'.format(accuracy, sum(test_loss)/len(test_loss)))
if __name__ == '__main__':
imdb_run() | [
"[email protected]"
] | |
31846c424c367a17bdb45fce97db5a1cdb4217d9 | 42e5b4f01d73dfa29cb85b753d84cead2d7f4890 | /backend/filter_sites.py | 92b46bcd871af50063e521c5410b4165720ce6ce | [] | no_license | Rp300/Forward_Data_Lab_Education_Today | 4522984bee42e864aa64242645338810232e5b1d | 370ef25f95f68857f41083a455f32d0a46ac2e38 | refs/heads/master | 2023-07-20T11:33:00.867145 | 2021-09-03T17:37:32 | 2021-09-03T17:37:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,397 | py | from googlesearch import search
import requests
from bs4 import BeautifulSoup as bs
import html2text
import io
import sys
import json
from getpass import getpass
from mysql.connector import connect, Error
# This function converts the HTML of the professor pages into local text files for analysis
def htmlToText(search_query):
# Reconfigure the encoding to avoid issues
sys.stdin.reconfigure(encoding='utf-8')
sys.stdout.reconfigure(encoding='utf-8')
# Initialization
list = search(search_query, 10, "en")
urls = []
# Finding List of Google search URL's that have .org, .edu, or scholar.google in the URL
for i in range(len(list)):
if ".edu" in list[i] or ".org" in list[i] or "scholar.google" in list[i]:
urls.append(list[i])
# print(urls)
# Converting the HTML content for each page into separate text files
count = 0
for url in urls:
# Accessing the Webpage
page = requests.get(url)
# Getting the webpage's content in pure html
soup = bs(page.content, features="lxml")
# Convert HTML into easy-to-read plain ASCII text
clean_html = html2text.html2text(soup.prettify())
file_name = "site" + str(count) + ".txt"
count += 1
with io.open(file_name, "w", encoding="utf-8") as temp_file:
temp_file.write(clean_html)
temp_file.close()
# This function returns the publications' URL and Title as JSON strings. It also INSERTS the data into the database.
def getPublicationUrlAndTitle(search_query):
# Reconfigure the encoding to avoid issues
sys.stdin.reconfigure(encoding='utf-8')
sys.stdout.reconfigure(encoding='utf-8')
# Initialization
list = search(search_query, 10, "en")
urls = []
publications = []
publications_titles = []
professor = search_query.split(", ")[0]
institution = search_query.split(", ")[1]
# Finding List of Google search URL's that have .org, .edu, or scholar.google in the URL
for i in range(len(list)):
if ".edu" in list[i] or ".org" in list[i] or "scholar.google" in list[i]:
urls.append(list[i])
# print(urls)
# Converting the HTML content for each page into separate text files
count = 0
for url in urls:
# Accessing the Webpage
page = requests.get(url)
# Getting the webpage's content in pure html
soup = bs(page.content, features="lxml")
# Extracting Abstract Link from Google Scholar
if "scholar.google" in url:
print("Google Scholar Publication: " + url)
for link in soup.find_all(["a"], "gsc_a_at"):
# Potential Error as the tag changes to data-href on some browsers:
# print(link.get('data-href'))
if link.get('href') is not None:
publications.append("https://scholar.google.com" + link.get('href'))
publications_titles.append(link.text)
# Convert Python arrays to JSON strings
# jsonStrUrls = json.dumps(publications)
# print(jsonStrUrls)
# jsonStrPublicationTitles = json.dumps(publications_titles)
# print(publications_titles)
# Print out the publication titles and url's for the professor.
# for x in range(len(publications)):
# print(publications_titles[x])
# print(publications[x])
# Push the publications individually to the publications table on MySQL
try:
with connect(
host="104.198.163.126",
user="root",
password="yEBpALG6zHDoCFLn",
database='project'
) as connection:
mycursor = connection.cursor()
sql = "INSERT IGNORE INTO Publication (title, name, institution, url) VALUES (%s, %s, %s, %s)"
for x in range(len(publications)):
val = (publications_titles[x], professor, institution, publications[x])
mycursor.execute(sql, val)
connection.commit()
connection.close()
except Error as e:
print(e)
return publications
# search_query = "Jiawei Han, University of Illinois at Urbana-Champaign"
# # htmlToText(search_query)
# getPublicationUrlAndTitle(search_query)
| [
"[email protected]"
] | |
838994d7fee7f1ec6d0ab6addde154ea89e34fc1 | df1ad0d061f7982edd0d5e159a1abc31daa8fd4c | /MapApp/migrations/0004_registerview.py | f535de1806e149f392a7e4d785b7132cf36a7735 | [
"Apache-2.0"
] | permissive | todor943/mapEngine | f6c056ca1b2fcf6207d5a1bf2b5908f062fff353 | 560c4e9bee2ef20e988124030db801337d8722f1 | refs/heads/master | 2020-12-24T18:55:13.667780 | 2017-11-06T19:54:04 | 2017-11-06T19:54:04 | 57,469,622 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,065 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-10-02 22:32
from __future__ import unicode_literals
import django.contrib.auth.models
import django.contrib.auth.validators
from django.db import migrations, models
import django.utils.timezone
import django.views.generic.base
class Migration(migrations.Migration):
dependencies = [
('auth', '0008_alter_user_username_max_length'),
('MapApp', '0003_auto_20171002_1846'),
]
operations = [
migrations.CreateModel(
name='RegisterView',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')),
('first_name', models.CharField(blank=True, max_length=30, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=30, verbose_name='last name')),
('email', models.EmailField(blank=True, max_length=254, verbose_name='email address')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'verbose_name': 'user',
'verbose_name_plural': 'users',
'abstract': False,
},
bases=(models.Model, django.views.generic.base.View),
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
]
| [
"[email protected]"
] | |
4a1bb31236fc9773fb0e6894d55116aa1e9934d8 | 3eeedc6a5a9d31c5fff5d06ef2c659147ae27fec | /users/migrations/0025_auto_20190705_2137.py | 964d4ad9e73d3a9c5f127b9ccbc37afb649ad016 | [] | no_license | aahmedsamy/special_offer | 2ef284e7a9a759f79e150adfd7c18625ec4c1b8c | 384efad727f80aa4d9452485c3b5899abc3d39cb | refs/heads/master | 2022-03-08T22:42:07.040004 | 2019-07-21T20:57:55 | 2019-07-21T20:57:55 | 181,128,711 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 448 | py | # Generated by Django 2.2 on 2019-07-05 17:37
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('users', '0024_auto_20190705_2112'),
]
operations = [
migrations.AlterModelOptions(
name='advertisernotification',
options={'ordering': ['-id'], 'verbose_name': 'AdvertiserNotification', 'verbose_name_plural': 'AdvertiserNotifications'},
),
]
| [
"[email protected]"
] | |
e3de38465362031a14aa2ff4b827877b72f76780 | 60de13f814ebfff48740b693563bf4b83096534d | /venv/Scripts/pip-script.py | ee13259c85c9b690ddae6a5c5196f921bda9b1ed | [] | no_license | Daria8402/bandurova17ov1 | 1c568d41b64fa3c1093193fb78b6c5c15a569cd7 | 5b202d32a4b2707664615b7d9d98f4c77efa9622 | refs/heads/master | 2021-02-18T12:12:56.944287 | 2020-03-05T15:43:51 | 2020-03-05T15:43:51 | 245,193,981 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 382 | py | #!D:\GitHub\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip')()
)
| [
"[email protected]"
] | |
abaf5bf0704250f8f6056f02c645210cc6095283 | 33b3029d6efaa195a0530e8bafbbdc82e7aea697 | /scripts/test_01.py | 1cd1fbd9755fc06808f6eb20be588a2e5622a120 | [] | no_license | wuyun19890323/lesson001 | 333bc2239151c6337a797d57926f683c05fa0c60 | aa2e202b846664adfa5c1af8312b89000311ba8d | refs/heads/master | 2020-03-19T11:11:58.829176 | 2018-06-08T12:53:05 | 2018-06-08T12:53:05 | 136,438,645 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,639 | py | from selenium.webdriver.common.by import By
from base.base_driver import browser_fire
from page.page_load import PageLoad
import unittest
class TestLoad(unittest.TestCase):
# def get_text(self,loc):
# return self.scr_load.get_att(self.load_text)
def get_ass(self):
self.scr_load.get_scr(self.scr_load.load_get())
# 网址
url = "http://localhost/iwebshop/"
# 定位登录链接
load_mark = By.XPATH, "//a[@href='/iwebshop/index.php?controller=simple&action=login']"
# 定位用户名
username = By.XPATH, "//input[@type='text']"
# 定位密码
password = By.XPATH, "//input[@type='password']"
# 定位登录按钮
load_click = By.XPATH, "//input[@type='submit']"
# 定位登录后文本域
load_text = By.XPATH, "//p[@class='loginfo']"
# 定位退出按钮
load_quit = By.XPATH, "//a[@class='reg']"
# 定位登录前账户或错误提示
load_wrong = By.XPATH, "//div[@class ='prompt']"
# 定位登录前账户为空是提示填写用户名或邮箱
load_username_null = By.XPATH, "//tbody/tr[1]/td/label[@class='invalid-msg']"
# 定位登录前密码为空是提示填写密码
load_password_null = By.XPATH, "//tbody/tr[2]/td/label[@class='invalid-msg']"
def setUp(self):
self.driver = browser_fire()
self.scr_load = PageLoad(self.driver)
self.scr_load.get_url(self.url)
self.scr_load.maxi_wait(30)
# 正确账户正确密码
def test_load001(self):
# 点击登录链接
self.scr_load.click_load(self.load_mark)
# 输入用户名
self.scr_load.input_text(self.username, "admin")
# 输入密码
self.scr_load.input_text(self.password, "123456")
# 点击登录按钮
self.scr_load.click_load(self.load_click)
try:
self.assertIn("admin", self.scr_load.get_att(self.load_text))
except AssertionError:
self.get_ass()
raise
self.scr_load.click_load(self.load_quit)
def tearDown(self):
self.driver.quit()
# 正确账户错误密码
def test_load002(self):
# 点击登录链接
self.scr_load.click_load(self.load_mark)
# 输入用户名
self.scr_load.input_text(self.username, "admin")
# 输入密码
self.scr_load.input_text(self.password, "1234567")
# 点击登录按钮
self.scr_load.click_load(self.load_click)
try:
self.assertIn("用户名和密码不匹配", self.scr_load.get_att(self.load_wrong))
except AssertionError:
self.get_ass()
raise
# 正确账户密码为空
def test_load003(self):
# 点击登录链接
self.scr_load.click_load(self.load_mark)
# 输入用户名
self.scr_load.input_text(self.username, "admin")
# 输入密码
self.scr_load.input_text(self.password, "")
# 点击登录按钮
self.scr_load.click_load(self.load_click)
try:
self.assertIn("填写密码", self.scr_load.get_att(self.load_password_null))
except AssertionError:
self.get_ass()
raise
# 错误账户正确密码
def test_load004(self):
# 点击登录链接
self.scr_load.click_load(self.load_mark)
# 输入用户名
self.scr_load.input_text(self.username, "admin1")
# 输入密码
self.scr_load.input_text(self.password, "123456")
# 点击登录按钮
self.scr_load.click_load(self.load_click)
try:
self.assertIn("用户名和密码不匹配", self.scr_load.get_att(self.load_wrong))
except AssertionError:
self.get_ass()
raise
# 错误账户错误密码
def test_load005(self):
# 点击登录链接
self.scr_load.click_load(self.load_mark)
# 输入用户名
self.scr_load.input_text(self.username, "admin1")
# 输入密码
self.scr_load.input_text(self.password, "1234567")
# 点击登录按钮
self.scr_load.click_load(self.load_click)
try:
self.assertIn("用户名和密码不匹配", self.scr_load.get_att(self.load_wrong))
except AssertionError:
self.get_ass()
raise
# 错误账户密码为空
def test_load006(self):
# 点击登录链接
self.scr_load.click_load(self.load_mark)
# 输入用户名
self.scr_load.input_text(self.username, "admin1")
# 输入密码
self.scr_load.input_text(self.password, "")
# 点击登录按钮
self.scr_load.click_load(self.load_click)
try:
self.assertIn("填写密码", self.scr_load.get_att(self.load_password_null))
except AssertionError:
self.get_ass()
raise
# 空账户正确密码
def test_load007(self):
# 点击登录链接
self.scr_load.click_load(self.load_mark)
# 输入用户名
self.scr_load.input_text(self.username, "")
# 输入密码
self.scr_load.input_text(self.password, "123456")
# 点击登录按钮
self.scr_load.click_load(self.load_click)
try:
self.assertIn("填写用户名或邮箱", self.scr_load.get_att(self.load_username_null))
except AssertionError:
self.get_ass()
raise
# 空账户错误密码
def test_load008(self):
# 点击登录链接
self.scr_load.click_load(self.load_mark)
# 输入用户名
self.scr_load.input_text(self.username, "")
# 输入密码
self.scr_load.input_text(self.password, "1234567")
# 点击登录按钮
self.scr_load.click_load(self.load_click)
try:
self.assertIn("填写用户名或邮箱", self.scr_load.get_att(self.load_username_null))
except AssertionError:
self.get_ass()
raise
# 空账户空密码
def test_load009(self):
# 点击登录链接
self.scr_load.click_load(self.load_mark)
# 输入用户名
self.scr_load.input_text(self.username, "")
# 输入密码
self.scr_load.input_text(self.password, "")
# 点击登录按钮
self.scr_load.click_load(self.load_click)
try:
self.assertIn("填写用户名或邮箱", self.scr_load.get_att(self.load_username_null))
except AssertionError:
self.get_ass()
raise
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
fbc3cb3337489cd49a68c2578139f993cb3822c4 | a0083584308a52b045550dbe76007e2467b7e40f | /pythonvideos/napalm_mac_Address.py | f7338f62e722e668b8d2dd285552ab03e44f5a7b | [] | no_license | narkalya/git-demo | ac511391a2c8026d53215262202b924a220ded0b | abffcdf9e0d1afd15742bfdd45784423eb04d4ab | refs/heads/master | 2020-03-25T08:58:49.356341 | 2018-08-06T15:09:25 | 2018-08-06T15:09:25 | 143,641,171 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 286 | py | from napalm import get_network_driver
driver = get_network_driver('ios')
iosvl2 = driver('192.168.122.72', 'david', 'cisco')
iosvl2.open()
print iosvl2.get_facts()
ios_output = iosvl2.get_mac_address_table()
print (json.dumps(ios_output, sort_keys=True, indent=4))
iosvl2.close()
| [
"[email protected]"
] | |
d35605db5bdf283207a2c171638328c4c8b53252 | 4e30d990963870478ed248567e432795f519e1cc | /tests/api/v3_1_1/test_nbar_app.py | 13a1bcd9798917799871178339c1315dd3a03d61 | [
"MIT"
] | permissive | CiscoISE/ciscoisesdk | 84074a57bf1042a735e3fc6eb7876555150d2b51 | f468c54998ec1ad85435ea28988922f0573bfee8 | refs/heads/main | 2023-09-04T23:56:32.232035 | 2023-08-25T17:31:49 | 2023-08-25T17:31:49 | 365,359,531 | 48 | 9 | MIT | 2023-08-25T17:31:51 | 2021-05-07T21:43:52 | Python | UTF-8 | Python | false | false | 9,399 | py | # -*- coding: utf-8 -*-
"""IdentityServicesEngineAPI nbar_app API fixtures and tests.
Copyright (c) 2021 Cisco and/or its affiliates.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import pytest
from fastjsonschema.exceptions import JsonSchemaException
from ciscoisesdk.exceptions import MalformedRequest
from ciscoisesdk.exceptions import ciscoisesdkException
from tests.environment import IDENTITY_SERVICES_ENGINE_VERSION
pytestmark = pytest.mark.skipif(IDENTITY_SERVICES_ENGINE_VERSION != '3.1.1', reason='version does not match')
def is_valid_get_nbar_apps(json_schema_validate, obj):
if not obj:
return False
assert hasattr(obj, 'headers')
assert hasattr(obj, 'content')
assert hasattr(obj, 'text')
assert hasattr(obj, 'response')
assert hasattr(obj, 'status_code')
json_schema_validate('jsd_1e8a476ad8455fdebad0d8973c810495_v3_1_1').validate(obj.response)
return True
def get_nbar_apps(api):
endpoint_result = api.nbar_app.get_nbar_apps(
filter='value1,value2',
filter_type='string',
page=0,
size=0,
sort='string',
sort_by='string'
)
return endpoint_result
@pytest.mark.nbar_app
def test_get_nbar_apps(api, validator):
try:
assert is_valid_get_nbar_apps(
validator,
get_nbar_apps(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print("ERROR: {error}".format(error=original_e))
raise original_e
def get_nbar_apps_default(api):
endpoint_result = api.nbar_app.get_nbar_apps(
filter=None,
filter_type=None,
page=None,
size=None,
sort=None,
sort_by=None
)
return endpoint_result
@pytest.mark.nbar_app
def test_get_nbar_apps_default(api, validator):
try:
assert is_valid_get_nbar_apps(
validator,
get_nbar_apps_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
def is_valid_create_nbar_app(json_schema_validate, obj):
if not obj:
return False
assert hasattr(obj, 'headers')
assert hasattr(obj, 'content')
assert hasattr(obj, 'text')
assert hasattr(obj, 'response')
assert hasattr(obj, 'status_code')
json_schema_validate('jsd_ccc30178afce5e51a65e96cd95ca1773_v3_1_1').validate(obj.response)
return True
def create_nbar_app(api):
endpoint_result = api.nbar_app.create_nbar_app(
active_validation=False,
description='string',
id='string',
name='string',
network_identities=[{'ports': 'string', 'protocol': 'string'}],
payload=None
)
return endpoint_result
@pytest.mark.nbar_app
def test_create_nbar_app(api, validator):
try:
assert is_valid_create_nbar_app(
validator,
create_nbar_app(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print("ERROR: {error}".format(error=original_e))
raise original_e
def create_nbar_app_default(api):
endpoint_result = api.nbar_app.create_nbar_app(
active_validation=False,
description=None,
id=None,
name=None,
network_identities=None,
payload=None
)
return endpoint_result
@pytest.mark.nbar_app
def test_create_nbar_app_default(api, validator):
try:
assert is_valid_create_nbar_app(
validator,
create_nbar_app_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
def is_valid_get_nbar_app_by_id(json_schema_validate, obj):
if not obj:
return False
assert hasattr(obj, 'headers')
assert hasattr(obj, 'content')
assert hasattr(obj, 'text')
assert hasattr(obj, 'response')
assert hasattr(obj, 'status_code')
json_schema_validate('jsd_61e99726f3745554a07ee102f74fe3bd_v3_1_1').validate(obj.response)
return True
def get_nbar_app_by_id(api):
endpoint_result = api.nbar_app.get_nbar_app_by_id(
id='string'
)
return endpoint_result
@pytest.mark.nbar_app
def test_get_nbar_app_by_id(api, validator):
try:
assert is_valid_get_nbar_app_by_id(
validator,
get_nbar_app_by_id(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print("ERROR: {error}".format(error=original_e))
raise original_e
def get_nbar_app_by_id_default(api):
endpoint_result = api.nbar_app.get_nbar_app_by_id(
id='string'
)
return endpoint_result
@pytest.mark.nbar_app
def test_get_nbar_app_by_id_default(api, validator):
try:
assert is_valid_get_nbar_app_by_id(
validator,
get_nbar_app_by_id_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
def is_valid_update_nbar_app_by_id(json_schema_validate, obj):
if not obj:
return False
assert hasattr(obj, 'headers')
assert hasattr(obj, 'content')
assert hasattr(obj, 'text')
assert hasattr(obj, 'response')
assert hasattr(obj, 'status_code')
json_schema_validate('jsd_b55622f1671359919573b261ba16ea71_v3_1_1').validate(obj.response)
return True
def update_nbar_app_by_id(api):
endpoint_result = api.nbar_app.update_nbar_app_by_id(
active_validation=False,
description='string',
id='string',
name='string',
network_identities=[{'ports': 'string', 'protocol': 'string'}],
payload=None
)
return endpoint_result
@pytest.mark.nbar_app
def test_update_nbar_app_by_id(api, validator):
try:
assert is_valid_update_nbar_app_by_id(
validator,
update_nbar_app_by_id(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print("ERROR: {error}".format(error=original_e))
raise original_e
def update_nbar_app_by_id_default(api):
endpoint_result = api.nbar_app.update_nbar_app_by_id(
active_validation=False,
id='string',
description=None,
name=None,
network_identities=None,
payload=None
)
return endpoint_result
@pytest.mark.nbar_app
def test_update_nbar_app_by_id_default(api, validator):
try:
assert is_valid_update_nbar_app_by_id(
validator,
update_nbar_app_by_id_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
def is_valid_delete_nbar_app_by_id(json_schema_validate, obj):
if not obj:
return False
assert hasattr(obj, 'headers')
assert hasattr(obj, 'content')
assert hasattr(obj, 'text')
assert hasattr(obj, 'response')
assert hasattr(obj, 'status_code')
json_schema_validate('jsd_44d289d5685350f5b00f130db0a45142_v3_1_1').validate(obj.response)
return True
def delete_nbar_app_by_id(api):
endpoint_result = api.nbar_app.delete_nbar_app_by_id(
id='string'
)
return endpoint_result
@pytest.mark.nbar_app
def test_delete_nbar_app_by_id(api, validator):
try:
assert is_valid_delete_nbar_app_by_id(
validator,
delete_nbar_app_by_id(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print("ERROR: {error}".format(error=original_e))
raise original_e
def delete_nbar_app_by_id_default(api):
endpoint_result = api.nbar_app.delete_nbar_app_by_id(
id='string'
)
return endpoint_result
@pytest.mark.nbar_app
def test_delete_nbar_app_by_id_default(api, validator):
try:
assert is_valid_delete_nbar_app_by_id(
validator,
delete_nbar_app_by_id_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
| [
"[email protected]"
] | |
435f09a949e10d5926b47462513ec6a935159a57 | ba4f68fb01aa32970dadea67cc8d039b4c0f6d9e | /python/facebook_abcs/graphs/bfs_short_reach.py | d7e090dc241a595327009effbf8e195b8a27e16d | [] | no_license | campbellmarianna/Code-Challenges | 12a7808563e36b1a2964f10ae64618c0be41b6c0 | 12e21c51665d81cf1ea94c2005f4f9d3584b66ec | refs/heads/master | 2021-08-03T23:23:58.297437 | 2020-05-15T07:13:46 | 2020-05-15T07:13:46 | 168,234,828 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,585 | py | '''
Prompt:
Consider an undirected graph where each edge is the same weight. Each of the nodes is labeled consecutively.
You will be given a number of queries. For each query, you will be given a list of edges describing an undirected graph. After you create a representation of the graph, you must determine and report the shortest distance to each of the other nodes from a given starting position using the breadth-first search algorithm (BFS). Distances are to be reported in node number order, ascending. If a node is unreachable, print for that node. Each of the edges weighs 6 units of distance.
For example, given a graph with nodes and edges, , a visual representation is:
image
The start node for the example is node . Outputs are calculated for distances to nodes through : . Each edge is units, and the unreachable node has the required return distance of .
Function Description
Complete the bfs function in the editor below. It must return an array of integers representing distances from the start node to each other node in node ascending order. If a node is unreachable, its distance is .
bfs has the following parameter(s):
n: the integer number of nodes
m: the integer number of edges
edges: a 2D array of start and end nodes for edges
s: the node to start traversals from
Input Format
The first line contains an integer , the number of queries. Each of the following sets of lines has the following format:
The first line contains two space-separated integers and , the number of nodes and edges in the graph.
Each line of the subsequent lines contains two space-separated integers, and , describing an edge connecting node to node .
The last line contains a single integer, , denoting the index of the starting node.
Constraints
Output Format
For each of the queries, print a single line of space-separated integers denoting the shortest distances to each of the other nodes from starting position . These distances should be listed sequentially by node number (i.e., ), but should not include node . If some node is unreachable from , print as the distance to that node.
Sample Input
2 # the number of queries
4 2 # n: number of nodes m: number of edges in the graph
1 2 # u and v: describing an edge connecting node u to node v
1 3
1
3 1
2 3
2 # s: denoting the index of the starting node.
Sample Output
6 6 -1
-1 6
'''
# Very helpful Bread First Search is looping through a sorted array and adding to a queue
# https: // www.youtube.com/watch?v = -uR7BSfNJko
# Getting user input Iteration #1
# N = int(input())
# print(N)
# for _ in range(N):
# parts = input().strip().split(' ')
# print(parts)
for line in fileinput.input():
parts = line.strip().split(' ')
print(parts)
# Along with Breadth First Search Algorithm by lorisrossi https://www.hackerrank.com/challenges/bfsshortreach/forum
def bfs(n, m, edges, s):
from collections import deque
# Build graph
graph = {}
for num in range(1, n+1):
graph[num] = set()
for l, r in edges:
graph[l].add(r)
graph[r].add(l)
reached = {}
# Explore graph once
frontier = deque([(s, 0)])
seen = {s}
while frontier:
curr_node, curr_cost = frontier.popleft()
for nbour in graph[curr_node]:
if nbour not in seen:
seen.add(nbour)
reached[nbour] = curr_cost+6
frontier.append((nbour, curr_cost+6))
result = []
for node in range(1, n+1):
if s != node:
result.append(reached.get(node, -1))
return result
| [
"[email protected]"
] | |
a7db3aafce35b88f549ace0cd7c17cbf0a387681 | 5005531655078e12546aba6b727dfa390acf3b2f | /src/python/dag.py | 0e5ed778d8ab13c47b050ed8443692b92c029af1 | [] | no_license | davidb2/rosalind | ee9201e2f79150a69389f702cf5926b42b6bce9f | c59930d3341e17d9f9f29c3b6c39d44f845d3215 | refs/heads/master | 2022-09-23T17:23:57.206469 | 2020-06-07T01:34:32 | 2020-06-07T01:34:32 | 187,707,621 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 977 | py | #!/usr/bin/env python3.6
import argparse
from queue import Queue
def acyclic(v, e, indeg, outdeg):
unseen = set(range(1, v+1))
bfs = Queue()
for u in unseen:
if len(indeg[u]) == 0:
bfs.put(u)
while len(unseen) > 0:
if bfs.empty(): return False
top = bfs.get()
for out in outdeg[top]:
indeg[out].remove(top)
if len(indeg[out]) == 0:
bfs.put(out)
unseen.remove(top)
return True
def main(args):
k = int(input())
ans = []
for _ in range(k):
input()
v, e = tuple(map(int, input().split()))
indeg = {u: set() for u in range(1, v+1)}
outdeg = {u: set() for u in range(1, v+1)}
for _ in range(e):
a, b = tuple(map(int, input().split()))
indeg[b].add(a)
outdeg[a].add(b)
ans.append(+1 if acyclic(v, e, indeg, outdeg) else -1)
print(' '.join(map(str, ans)))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
args = parser.parse_args()
main(args)
| [
"[email protected]"
] | |
547125bf7f5eb3fd28b9773a2c0f621cc581f51f | 5087f23a7d11be7580b6f0e3d8bc434ff0451c05 | /lambda.py | 09b30ea8d1db850cf24c3624943f654d851a2c87 | [
"MIT"
] | permissive | ShanmukhaSrinivas/python-75-hackathon | b07c35fea469c8fbc8769d02d3bb63d6af93cafc | 57eafed31d84ac09079ba9bcbaa9263d79996313 | refs/heads/master | 2020-04-11T10:51:15.137518 | 2019-12-13T07:55:52 | 2019-12-13T07:55:52 | 161,728,454 | 0 | 0 | MIT | 2018-12-14T03:47:44 | 2018-12-14T03:47:44 | null | UTF-8 | Python | false | false | 146 | py | # labmda function that returns a test number is even or not
f = lambda x: 'Even' if x%2==0 else 'Odd'
print(f(int(input('Enter a number \n')))) | [
"[email protected]"
] | |
58c682f65699112c1c375db59345043239b23d43 | 2ae229b7ac6d99f4731c65a413e289ef88eb3678 | /AWS/ec2/lib/boto-2.34.0/boto/rds2/layer1.py | 5615f1107db93270538336b2454ae73436d31f4f | [
"MIT"
] | permissive | aristotle-tek/cuny-bdif | a614c6c89f88ad42c39738c8eae1eef5b3cb1797 | 3a66ebc2a423ec719edfa31932949243a603cd92 | refs/heads/master | 2021-01-10T13:55:13.753417 | 2018-04-19T20:16:27 | 2018-04-19T20:16:27 | 50,869,942 | 6 | 10 | MIT | 2020-07-23T14:45:28 | 2016-02-01T20:48:16 | Python | UTF-8 | Python | false | false | 158,232 | py | # Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import boto
from boto.connection import AWSQueryConnection
from boto.regioninfo import RegionInfo
from boto.exception import JSONResponseError
from boto.rds2 import exceptions
from boto.compat import json
class RDSConnection(AWSQueryConnection):
"""
Amazon Relational Database Service
Amazon Relational Database Service (Amazon RDS) is a web service
that makes it easier to set up, operate, and scale a relational
database in the cloud. It provides cost-efficient, resizable
capacity for an industry-standard relational database and manages
common database administration tasks, freeing up developers to
focus on what makes their applications and businesses unique.
Amazon RDS gives you access to the capabilities of a familiar
MySQL or Oracle database server. This means the code,
applications, and tools you already use today with your existing
MySQL or Oracle databases work with Amazon RDS without
modification. Amazon RDS automatically backs up your database and
maintains the database software that powers your DB instance.
Amazon RDS is flexible: you can scale your database instance's
compute resources and storage capacity to meet your application's
demand. As with all Amazon Web Services, there are no up-front
investments, and you pay only for the resources you use.
This is the Amazon RDS API Reference . It contains a comprehensive
description of all Amazon RDS Query APIs and data types. Note that
this API is asynchronous and some actions may require polling to
determine when an action has been applied. See the parameter
description to determine if a change is applied immediately or on
the next instance reboot or during the maintenance window. For
more information on Amazon RDS concepts and usage scenarios, go to
the `Amazon RDS User Guide`_.
"""
APIVersion = "2013-09-09"
DefaultRegionName = "us-east-1"
DefaultRegionEndpoint = "rds.us-east-1.amazonaws.com"
ResponseError = JSONResponseError
_faults = {
"InvalidSubnet": exceptions.InvalidSubnet,
"DBParameterGroupQuotaExceeded": exceptions.DBParameterGroupQuotaExceeded,
"DBSubnetGroupAlreadyExists": exceptions.DBSubnetGroupAlreadyExists,
"DBSubnetGroupQuotaExceeded": exceptions.DBSubnetGroupQuotaExceeded,
"InstanceQuotaExceeded": exceptions.InstanceQuotaExceeded,
"InvalidRestore": exceptions.InvalidRestore,
"InvalidDBParameterGroupState": exceptions.InvalidDBParameterGroupState,
"AuthorizationQuotaExceeded": exceptions.AuthorizationQuotaExceeded,
"DBSecurityGroupAlreadyExists": exceptions.DBSecurityGroupAlreadyExists,
"InsufficientDBInstanceCapacity": exceptions.InsufficientDBInstanceCapacity,
"ReservedDBInstanceQuotaExceeded": exceptions.ReservedDBInstanceQuotaExceeded,
"DBSecurityGroupNotFound": exceptions.DBSecurityGroupNotFound,
"DBInstanceAlreadyExists": exceptions.DBInstanceAlreadyExists,
"ReservedDBInstanceNotFound": exceptions.ReservedDBInstanceNotFound,
"DBSubnetGroupDoesNotCoverEnoughAZs": exceptions.DBSubnetGroupDoesNotCoverEnoughAZs,
"InvalidDBSecurityGroupState": exceptions.InvalidDBSecurityGroupState,
"InvalidVPCNetworkState": exceptions.InvalidVPCNetworkState,
"ReservedDBInstancesOfferingNotFound": exceptions.ReservedDBInstancesOfferingNotFound,
"SNSTopicArnNotFound": exceptions.SNSTopicArnNotFound,
"SNSNoAuthorization": exceptions.SNSNoAuthorization,
"SnapshotQuotaExceeded": exceptions.SnapshotQuotaExceeded,
"OptionGroupQuotaExceeded": exceptions.OptionGroupQuotaExceeded,
"DBParameterGroupNotFound": exceptions.DBParameterGroupNotFound,
"SNSInvalidTopic": exceptions.SNSInvalidTopic,
"InvalidDBSubnetGroupState": exceptions.InvalidDBSubnetGroupState,
"DBSubnetGroupNotFound": exceptions.DBSubnetGroupNotFound,
"InvalidOptionGroupState": exceptions.InvalidOptionGroupState,
"SourceNotFound": exceptions.SourceNotFound,
"SubscriptionCategoryNotFound": exceptions.SubscriptionCategoryNotFound,
"EventSubscriptionQuotaExceeded": exceptions.EventSubscriptionQuotaExceeded,
"DBSecurityGroupNotSupported": exceptions.DBSecurityGroupNotSupported,
"InvalidEventSubscriptionState": exceptions.InvalidEventSubscriptionState,
"InvalidDBSubnetState": exceptions.InvalidDBSubnetState,
"InvalidDBSnapshotState": exceptions.InvalidDBSnapshotState,
"SubscriptionAlreadyExist": exceptions.SubscriptionAlreadyExist,
"DBSecurityGroupQuotaExceeded": exceptions.DBSecurityGroupQuotaExceeded,
"ProvisionedIopsNotAvailableInAZ": exceptions.ProvisionedIopsNotAvailableInAZ,
"AuthorizationNotFound": exceptions.AuthorizationNotFound,
"OptionGroupAlreadyExists": exceptions.OptionGroupAlreadyExists,
"SubscriptionNotFound": exceptions.SubscriptionNotFound,
"DBUpgradeDependencyFailure": exceptions.DBUpgradeDependencyFailure,
"PointInTimeRestoreNotEnabled": exceptions.PointInTimeRestoreNotEnabled,
"AuthorizationAlreadyExists": exceptions.AuthorizationAlreadyExists,
"DBSubnetQuotaExceeded": exceptions.DBSubnetQuotaExceeded,
"OptionGroupNotFound": exceptions.OptionGroupNotFound,
"DBParameterGroupAlreadyExists": exceptions.DBParameterGroupAlreadyExists,
"DBInstanceNotFound": exceptions.DBInstanceNotFound,
"ReservedDBInstanceAlreadyExists": exceptions.ReservedDBInstanceAlreadyExists,
"InvalidDBInstanceState": exceptions.InvalidDBInstanceState,
"DBSnapshotNotFound": exceptions.DBSnapshotNotFound,
"DBSnapshotAlreadyExists": exceptions.DBSnapshotAlreadyExists,
"StorageQuotaExceeded": exceptions.StorageQuotaExceeded,
"SubnetAlreadyInUse": exceptions.SubnetAlreadyInUse,
}
def __init__(self, **kwargs):
region = kwargs.pop('region', None)
if not region:
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint)
if 'host' not in kwargs:
kwargs['host'] = region.endpoint
super(RDSConnection, self).__init__(**kwargs)
self.region = region
def _required_auth_capability(self):
return ['hmac-v4']
def add_source_identifier_to_subscription(self, subscription_name,
source_identifier):
"""
Adds a source identifier to an existing RDS event notification
subscription.
:type subscription_name: string
:param subscription_name: The name of the RDS event notification
subscription you want to add a source identifier to.
:type source_identifier: string
:param source_identifier:
The identifier of the event source to be added. An identifier must
begin with a letter and must contain only ASCII letters, digits,
and hyphens; it cannot end with a hyphen or contain two consecutive
hyphens.
Constraints:
+ If the source type is a DB instance, then a `DBInstanceIdentifier`
must be supplied.
+ If the source type is a DB security group, a `DBSecurityGroupName`
must be supplied.
+ If the source type is a DB parameter group, a `DBParameterGroupName`
must be supplied.
+ If the source type is a DB snapshot, a `DBSnapshotIdentifier` must be
supplied.
"""
params = {
'SubscriptionName': subscription_name,
'SourceIdentifier': source_identifier,
}
return self._make_request(
action='AddSourceIdentifierToSubscription',
verb='POST',
path='/', params=params)
def add_tags_to_resource(self, resource_name, tags):
"""
Adds metadata tags to an Amazon RDS resource. These tags can
also be used with cost allocation reporting to track cost
associated with Amazon RDS resources, or used in Condition
statement in IAM policy for Amazon RDS.
For an overview on tagging Amazon RDS resources, see `Tagging
Amazon RDS Resources`_.
:type resource_name: string
:param resource_name: The Amazon RDS resource the tags will be added
to. This value is an Amazon Resource Name (ARN). For information
about creating an ARN, see ` Constructing an RDS Amazon Resource
Name (ARN)`_.
:type tags: list
:param tags: The tags to be assigned to the Amazon RDS resource.
"""
params = {'ResourceName': resource_name, }
self.build_complex_list_params(
params, tags,
'Tags.member',
('Key', 'Value'))
return self._make_request(
action='AddTagsToResource',
verb='POST',
path='/', params=params)
def authorize_db_security_group_ingress(self, db_security_group_name,
cidrip=None,
ec2_security_group_name=None,
ec2_security_group_id=None,
ec2_security_group_owner_id=None):
"""
Enables ingress to a DBSecurityGroup using one of two forms of
authorization. First, EC2 or VPC security groups can be added
to the DBSecurityGroup if the application using the database
is running on EC2 or VPC instances. Second, IP ranges are
available if the application accessing your database is
running on the Internet. Required parameters for this API are
one of CIDR range, EC2SecurityGroupId for VPC, or
(EC2SecurityGroupOwnerId and either EC2SecurityGroupName or
EC2SecurityGroupId for non-VPC).
You cannot authorize ingress from an EC2 security group in one
Region to an Amazon RDS DB instance in another. You cannot
authorize ingress from a VPC security group in one VPC to an
Amazon RDS DB instance in another.
For an overview of CIDR ranges, go to the `Wikipedia
Tutorial`_.
:type db_security_group_name: string
:param db_security_group_name: The name of the DB security group to add
authorization to.
:type cidrip: string
:param cidrip: The IP range to authorize.
:type ec2_security_group_name: string
:param ec2_security_group_name: Name of the EC2 security group to
authorize. For VPC DB security groups, `EC2SecurityGroupId` must be
provided. Otherwise, EC2SecurityGroupOwnerId and either
`EC2SecurityGroupName` or `EC2SecurityGroupId` must be provided.
:type ec2_security_group_id: string
:param ec2_security_group_id: Id of the EC2 security group to
authorize. For VPC DB security groups, `EC2SecurityGroupId` must be
provided. Otherwise, EC2SecurityGroupOwnerId and either
`EC2SecurityGroupName` or `EC2SecurityGroupId` must be provided.
:type ec2_security_group_owner_id: string
:param ec2_security_group_owner_id: AWS Account Number of the owner of
the EC2 security group specified in the EC2SecurityGroupName
parameter. The AWS Access Key ID is not an acceptable value. For
VPC DB security groups, `EC2SecurityGroupId` must be provided.
Otherwise, EC2SecurityGroupOwnerId and either
`EC2SecurityGroupName` or `EC2SecurityGroupId` must be provided.
"""
params = {'DBSecurityGroupName': db_security_group_name, }
if cidrip is not None:
params['CIDRIP'] = cidrip
if ec2_security_group_name is not None:
params['EC2SecurityGroupName'] = ec2_security_group_name
if ec2_security_group_id is not None:
params['EC2SecurityGroupId'] = ec2_security_group_id
if ec2_security_group_owner_id is not None:
params['EC2SecurityGroupOwnerId'] = ec2_security_group_owner_id
return self._make_request(
action='AuthorizeDBSecurityGroupIngress',
verb='POST',
path='/', params=params)
def copy_db_snapshot(self, source_db_snapshot_identifier,
target_db_snapshot_identifier, tags=None):
"""
Copies the specified DBSnapshot. The source DBSnapshot must be
in the "available" state.
:type source_db_snapshot_identifier: string
:param source_db_snapshot_identifier: The identifier for the source DB
snapshot.
Constraints:
+ Must be the identifier for a valid system snapshot in the "available"
state.
Example: `rds:mydb-2012-04-02-00-01`
:type target_db_snapshot_identifier: string
:param target_db_snapshot_identifier: The identifier for the copied
snapshot.
Constraints:
+ Cannot be null, empty, or blank
+ Must contain from 1 to 255 alphanumeric characters or hyphens
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
Example: `my-db-snapshot`
:type tags: list
:param tags: A list of tags.
"""
params = {
'SourceDBSnapshotIdentifier': source_db_snapshot_identifier,
'TargetDBSnapshotIdentifier': target_db_snapshot_identifier,
}
if tags is not None:
self.build_complex_list_params(
params, tags,
'Tags.member',
('Key', 'Value'))
return self._make_request(
action='CopyDBSnapshot',
verb='POST',
path='/', params=params)
def create_db_instance(self, db_instance_identifier, allocated_storage,
db_instance_class, engine, master_username,
master_user_password, db_name=None,
db_security_groups=None,
vpc_security_group_ids=None,
availability_zone=None, db_subnet_group_name=None,
preferred_maintenance_window=None,
db_parameter_group_name=None,
backup_retention_period=None,
preferred_backup_window=None, port=None,
multi_az=None, engine_version=None,
auto_minor_version_upgrade=None,
license_model=None, iops=None,
option_group_name=None, character_set_name=None,
publicly_accessible=None, tags=None):
"""
Creates a new DB instance.
:type db_name: string
:param db_name: The meaning of this parameter differs according to the
database engine you use.
**MySQL**
The name of the database to create when the DB instance is created. If
this parameter is not specified, no database is created in the DB
instance.
Constraints:
+ Must contain 1 to 64 alphanumeric characters
+ Cannot be a word reserved by the specified database engine
Type: String
**Oracle**
The Oracle System ID (SID) of the created DB instance.
Default: `ORCL`
Constraints:
+ Cannot be longer than 8 characters
**SQL Server**
Not applicable. Must be null.
:type db_instance_identifier: string
:param db_instance_identifier: The DB instance identifier. This
parameter is stored as a lowercase string.
Constraints:
+ Must contain from 1 to 63 alphanumeric characters or hyphens (1 to 15
for SQL Server).
+ First character must be a letter.
+ Cannot end with a hyphen or contain two consecutive hyphens.
Example: `mydbinstance`
:type allocated_storage: integer
:param allocated_storage: The amount of storage (in gigabytes) to be
initially allocated for the database instance.
**MySQL**
Constraints: Must be an integer from 5 to 1024.
Type: Integer
**Oracle**
Constraints: Must be an integer from 10 to 1024.
**SQL Server**
Constraints: Must be an integer from 200 to 1024 (Standard Edition and
Enterprise Edition) or from 30 to 1024 (Express Edition and Web
Edition)
:type db_instance_class: string
:param db_instance_class: The compute and memory capacity of the DB
instance.
Valid Values: `db.t1.micro | db.m1.small | db.m1.medium | db.m1.large |
db.m1.xlarge | db.m2.xlarge |db.m2.2xlarge | db.m2.4xlarge`
:type engine: string
:param engine: The name of the database engine to be used for this
instance.
Valid Values: `MySQL` | `oracle-se1` | `oracle-se` | `oracle-ee` |
`sqlserver-ee` | `sqlserver-se` | `sqlserver-ex` | `sqlserver-web`
:type master_username: string
:param master_username:
The name of master user for the client DB instance.
**MySQL**
Constraints:
+ Must be 1 to 16 alphanumeric characters.
+ First character must be a letter.
+ Cannot be a reserved word for the chosen database engine.
Type: String
**Oracle**
Constraints:
+ Must be 1 to 30 alphanumeric characters.
+ First character must be a letter.
+ Cannot be a reserved word for the chosen database engine.
**SQL Server**
Constraints:
+ Must be 1 to 128 alphanumeric characters.
+ First character must be a letter.
+ Cannot be a reserved word for the chosen database engine.
:type master_user_password: string
:param master_user_password: The password for the master database user.
Can be any printable ASCII character except "/", '"', or "@".
Type: String
**MySQL**
Constraints: Must contain from 8 to 41 characters.
**Oracle**
Constraints: Must contain from 8 to 30 characters.
**SQL Server**
Constraints: Must contain from 8 to 128 characters.
:type db_security_groups: list
:param db_security_groups: A list of DB security groups to associate
with this DB instance.
Default: The default DB security group for the database engine.
:type vpc_security_group_ids: list
:param vpc_security_group_ids: A list of EC2 VPC security groups to
associate with this DB instance.
Default: The default EC2 VPC security group for the DB subnet group's
VPC.
:type availability_zone: string
:param availability_zone: The EC2 Availability Zone that the database
instance will be created in.
Default: A random, system-chosen Availability Zone in the endpoint's
region.
Example: `us-east-1d`
Constraint: The AvailabilityZone parameter cannot be specified if the
MultiAZ parameter is set to `True`. The specified Availability Zone
must be in the same region as the current endpoint.
:type db_subnet_group_name: string
:param db_subnet_group_name: A DB subnet group to associate with this
DB instance.
If there is no DB subnet group, then it is a non-VPC DB instance.
:type preferred_maintenance_window: string
:param preferred_maintenance_window: The weekly time range (in UTC)
during which system maintenance can occur.
Format: `ddd:hh24:mi-ddd:hh24:mi`
Default: A 30-minute window selected at random from an 8-hour block of
time per region, occurring on a random day of the week. To see the
time blocks available, see ` Adjusting the Preferred Maintenance
Window`_ in the Amazon RDS User Guide.
Valid Days: Mon, Tue, Wed, Thu, Fri, Sat, Sun
Constraints: Minimum 30-minute window.
:type db_parameter_group_name: string
:param db_parameter_group_name:
The name of the DB parameter group to associate with this DB instance.
If this argument is omitted, the default DBParameterGroup for the
specified engine will be used.
Constraints:
+ Must be 1 to 255 alphanumeric characters
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
:type backup_retention_period: integer
:param backup_retention_period:
The number of days for which automated backups are retained. Setting
this parameter to a positive number enables backups. Setting this
parameter to 0 disables automated backups.
Default: 1
Constraints:
+ Must be a value from 0 to 8
+ Cannot be set to 0 if the DB instance is a master instance with read
replicas
:type preferred_backup_window: string
:param preferred_backup_window: The daily time range during which
automated backups are created if automated backups are enabled,
using the `BackupRetentionPeriod` parameter.
Default: A 30-minute window selected at random from an 8-hour block of
time per region. See the Amazon RDS User Guide for the time blocks
for each region from which the default backup windows are assigned.
Constraints: Must be in the format `hh24:mi-hh24:mi`. Times should be
Universal Time Coordinated (UTC). Must not conflict with the
preferred maintenance window. Must be at least 30 minutes.
:type port: integer
:param port: The port number on which the database accepts connections.
**MySQL**
Default: `3306`
Valid Values: `1150-65535`
Type: Integer
**Oracle**
Default: `1521`
Valid Values: `1150-65535`
**SQL Server**
Default: `1433`
Valid Values: `1150-65535` except for `1434` and `3389`.
:type multi_az: boolean
:param multi_az: Specifies if the DB instance is a Multi-AZ deployment.
You cannot set the AvailabilityZone parameter if the MultiAZ
parameter is set to true.
:type engine_version: string
:param engine_version: The version number of the database engine to
use.
**MySQL**
Example: `5.1.42`
Type: String
**Oracle**
Example: `11.2.0.2.v2`
Type: String
**SQL Server**
Example: `10.50.2789.0.v1`
:type auto_minor_version_upgrade: boolean
:param auto_minor_version_upgrade: Indicates that minor engine upgrades
will be applied automatically to the DB instance during the
maintenance window.
Default: `True`
:type license_model: string
:param license_model: License model information for this DB instance.
Valid values: `license-included` | `bring-your-own-license` | `general-
public-license`
:type iops: integer
:param iops: The amount of Provisioned IOPS (input/output operations
per second) to be initially allocated for the DB instance.
Constraints: Must be an integer greater than 1000.
:type option_group_name: string
:param option_group_name: Indicates that the DB instance should be
associated with the specified option group.
Permanent options, such as the TDE option for Oracle Advanced Security
TDE, cannot be removed from an option group, and that option group
cannot be removed from a DB instance once it is associated with a
DB instance
:type character_set_name: string
:param character_set_name: For supported engines, indicates that the DB
instance should be associated with the specified CharacterSet.
:type publicly_accessible: boolean
:param publicly_accessible: Specifies the accessibility options for the
DB instance. A value of true specifies an Internet-facing instance
with a publicly resolvable DNS name, which resolves to a public IP
address. A value of false specifies an internal instance with a DNS
name that resolves to a private IP address.
Default: The default behavior varies depending on whether a VPC has
been requested or not. The following list shows the default
behavior in each case.
+ **Default VPC:**true
+ **VPC:**false
If no DB subnet group has been specified as part of the request and the
PubliclyAccessible value has not been set, the DB instance will be
publicly accessible. If a specific DB subnet group has been
specified as part of the request and the PubliclyAccessible value
has not been set, the DB instance will be private.
:type tags: list
:param tags: A list of tags.
"""
params = {
'DBInstanceIdentifier': db_instance_identifier,
'AllocatedStorage': allocated_storage,
'DBInstanceClass': db_instance_class,
'Engine': engine,
'MasterUsername': master_username,
'MasterUserPassword': master_user_password,
}
if db_name is not None:
params['DBName'] = db_name
if db_security_groups is not None:
self.build_list_params(params,
db_security_groups,
'DBSecurityGroups.member')
if vpc_security_group_ids is not None:
self.build_list_params(params,
vpc_security_group_ids,
'VpcSecurityGroupIds.member')
if availability_zone is not None:
params['AvailabilityZone'] = availability_zone
if db_subnet_group_name is not None:
params['DBSubnetGroupName'] = db_subnet_group_name
if preferred_maintenance_window is not None:
params['PreferredMaintenanceWindow'] = preferred_maintenance_window
if db_parameter_group_name is not None:
params['DBParameterGroupName'] = db_parameter_group_name
if backup_retention_period is not None:
params['BackupRetentionPeriod'] = backup_retention_period
if preferred_backup_window is not None:
params['PreferredBackupWindow'] = preferred_backup_window
if port is not None:
params['Port'] = port
if multi_az is not None:
params['MultiAZ'] = str(
multi_az).lower()
if engine_version is not None:
params['EngineVersion'] = engine_version
if auto_minor_version_upgrade is not None:
params['AutoMinorVersionUpgrade'] = str(
auto_minor_version_upgrade).lower()
if license_model is not None:
params['LicenseModel'] = license_model
if iops is not None:
params['Iops'] = iops
if option_group_name is not None:
params['OptionGroupName'] = option_group_name
if character_set_name is not None:
params['CharacterSetName'] = character_set_name
if publicly_accessible is not None:
params['PubliclyAccessible'] = str(
publicly_accessible).lower()
if tags is not None:
self.build_complex_list_params(
params, tags,
'Tags.member',
('Key', 'Value'))
return self._make_request(
action='CreateDBInstance',
verb='POST',
path='/', params=params)
def create_db_instance_read_replica(self, db_instance_identifier,
source_db_instance_identifier,
db_instance_class=None,
availability_zone=None, port=None,
auto_minor_version_upgrade=None,
iops=None, option_group_name=None,
publicly_accessible=None, tags=None):
"""
Creates a DB instance that acts as a read replica of a source
DB instance.
All read replica DB instances are created as Single-AZ
deployments with backups disabled. All other DB instance
attributes (including DB security groups and DB parameter
groups) are inherited from the source DB instance, except as
specified below.
The source DB instance must have backup retention enabled.
:type db_instance_identifier: string
:param db_instance_identifier: The DB instance identifier of the read
replica. This is the unique key that identifies a DB instance. This
parameter is stored as a lowercase string.
:type source_db_instance_identifier: string
:param source_db_instance_identifier: The identifier of the DB instance
that will act as the source for the read replica. Each DB instance
can have up to five read replicas.
Constraints: Must be the identifier of an existing DB instance that is
not already a read replica DB instance.
:type db_instance_class: string
:param db_instance_class: The compute and memory capacity of the read
replica.
Valid Values: `db.m1.small | db.m1.medium | db.m1.large | db.m1.xlarge
| db.m2.xlarge |db.m2.2xlarge | db.m2.4xlarge`
Default: Inherits from the source DB instance.
:type availability_zone: string
:param availability_zone: The Amazon EC2 Availability Zone that the
read replica will be created in.
Default: A random, system-chosen Availability Zone in the endpoint's
region.
Example: `us-east-1d`
:type port: integer
:param port: The port number that the DB instance uses for connections.
Default: Inherits from the source DB instance
Valid Values: `1150-65535`
:type auto_minor_version_upgrade: boolean
:param auto_minor_version_upgrade: Indicates that minor engine upgrades
will be applied automatically to the read replica during the
maintenance window.
Default: Inherits from the source DB instance
:type iops: integer
:param iops: The amount of Provisioned IOPS (input/output operations
per second) to be initially allocated for the DB instance.
:type option_group_name: string
:param option_group_name: The option group the DB instance will be
associated with. If omitted, the default option group for the
engine specified will be used.
:type publicly_accessible: boolean
:param publicly_accessible: Specifies the accessibility options for the
DB instance. A value of true specifies an Internet-facing instance
with a publicly resolvable DNS name, which resolves to a public IP
address. A value of false specifies an internal instance with a DNS
name that resolves to a private IP address.
Default: The default behavior varies depending on whether a VPC has
been requested or not. The following list shows the default
behavior in each case.
+ **Default VPC:**true
+ **VPC:**false
If no DB subnet group has been specified as part of the request and the
PubliclyAccessible value has not been set, the DB instance will be
publicly accessible. If a specific DB subnet group has been
specified as part of the request and the PubliclyAccessible value
has not been set, the DB instance will be private.
:type tags: list
:param tags: A list of tags.
"""
params = {
'DBInstanceIdentifier': db_instance_identifier,
'SourceDBInstanceIdentifier': source_db_instance_identifier,
}
if db_instance_class is not None:
params['DBInstanceClass'] = db_instance_class
if availability_zone is not None:
params['AvailabilityZone'] = availability_zone
if port is not None:
params['Port'] = port
if auto_minor_version_upgrade is not None:
params['AutoMinorVersionUpgrade'] = str(
auto_minor_version_upgrade).lower()
if iops is not None:
params['Iops'] = iops
if option_group_name is not None:
params['OptionGroupName'] = option_group_name
if publicly_accessible is not None:
params['PubliclyAccessible'] = str(
publicly_accessible).lower()
if tags is not None:
self.build_complex_list_params(
params, tags,
'Tags.member',
('Key', 'Value'))
return self._make_request(
action='CreateDBInstanceReadReplica',
verb='POST',
path='/', params=params)
def create_db_parameter_group(self, db_parameter_group_name,
db_parameter_group_family, description,
tags=None):
"""
Creates a new DB parameter group.
A DB parameter group is initially created with the default
parameters for the database engine used by the DB instance. To
provide custom values for any of the parameters, you must
modify the group after creating it using
ModifyDBParameterGroup . Once you've created a DB parameter
group, you need to associate it with your DB instance using
ModifyDBInstance . When you associate a new DB parameter group
with a running DB instance, you need to reboot the DB Instance
for the new DB parameter group and associated settings to take
effect.
:type db_parameter_group_name: string
:param db_parameter_group_name:
The name of the DB parameter group.
Constraints:
+ Must be 1 to 255 alphanumeric characters
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
This value is stored as a lower-case string.
:type db_parameter_group_family: string
:param db_parameter_group_family: The DB parameter group family name. A
DB parameter group can be associated with one and only one DB
parameter group family, and can be applied only to a DB instance
running a database engine and engine version compatible with that
DB parameter group family.
:type description: string
:param description: The description for the DB parameter group.
:type tags: list
:param tags: A list of tags.
"""
params = {
'DBParameterGroupName': db_parameter_group_name,
'DBParameterGroupFamily': db_parameter_group_family,
'Description': description,
}
if tags is not None:
self.build_complex_list_params(
params, tags,
'Tags.member',
('Key', 'Value'))
return self._make_request(
action='CreateDBParameterGroup',
verb='POST',
path='/', params=params)
def create_db_security_group(self, db_security_group_name,
db_security_group_description, tags=None):
"""
Creates a new DB security group. DB security groups control
access to a DB instance.
:type db_security_group_name: string
:param db_security_group_name: The name for the DB security group. This
value is stored as a lowercase string.
Constraints:
+ Must be 1 to 255 alphanumeric characters
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
+ Must not be "Default"
+ May not contain spaces
Example: `mysecuritygroup`
:type db_security_group_description: string
:param db_security_group_description: The description for the DB
security group.
:type tags: list
:param tags: A list of tags.
"""
params = {
'DBSecurityGroupName': db_security_group_name,
'DBSecurityGroupDescription': db_security_group_description,
}
if tags is not None:
self.build_complex_list_params(
params, tags,
'Tags.member',
('Key', 'Value'))
return self._make_request(
action='CreateDBSecurityGroup',
verb='POST',
path='/', params=params)
def create_db_snapshot(self, db_snapshot_identifier,
db_instance_identifier, tags=None):
"""
Creates a DBSnapshot. The source DBInstance must be in
"available" state.
:type db_snapshot_identifier: string
:param db_snapshot_identifier: The identifier for the DB snapshot.
Constraints:
+ Cannot be null, empty, or blank
+ Must contain from 1 to 255 alphanumeric characters or hyphens
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
Example: `my-snapshot-id`
:type db_instance_identifier: string
:param db_instance_identifier:
The DB instance identifier. This is the unique key that identifies a DB
instance. This parameter isn't case sensitive.
Constraints:
+ Must contain from 1 to 63 alphanumeric characters or hyphens
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
:type tags: list
:param tags: A list of tags.
"""
params = {
'DBSnapshotIdentifier': db_snapshot_identifier,
'DBInstanceIdentifier': db_instance_identifier,
}
if tags is not None:
self.build_complex_list_params(
params, tags,
'Tags.member',
('Key', 'Value'))
return self._make_request(
action='CreateDBSnapshot',
verb='POST',
path='/', params=params)
def create_db_subnet_group(self, db_subnet_group_name,
db_subnet_group_description, subnet_ids,
tags=None):
"""
Creates a new DB subnet group. DB subnet groups must contain
at least one subnet in at least two AZs in the region.
:type db_subnet_group_name: string
:param db_subnet_group_name: The name for the DB subnet group. This
value is stored as a lowercase string.
Constraints: Must contain no more than 255 alphanumeric characters or
hyphens. Must not be "Default".
Example: `mySubnetgroup`
:type db_subnet_group_description: string
:param db_subnet_group_description: The description for the DB subnet
group.
:type subnet_ids: list
:param subnet_ids: The EC2 Subnet IDs for the DB subnet group.
:type tags: list
:param tags: A list of tags into tuples.
"""
params = {
'DBSubnetGroupName': db_subnet_group_name,
'DBSubnetGroupDescription': db_subnet_group_description,
}
self.build_list_params(params,
subnet_ids,
'SubnetIds.member')
if tags is not None:
self.build_complex_list_params(
params, tags,
'Tags.member',
('Key', 'Value'))
return self._make_request(
action='CreateDBSubnetGroup',
verb='POST',
path='/', params=params)
def create_event_subscription(self, subscription_name, sns_topic_arn,
source_type=None, event_categories=None,
source_ids=None, enabled=None, tags=None):
"""
Creates an RDS event notification subscription. This action
requires a topic ARN (Amazon Resource Name) created by either
the RDS console, the SNS console, or the SNS API. To obtain an
ARN with SNS, you must create a topic in Amazon SNS and
subscribe to the topic. The ARN is displayed in the SNS
console.
You can specify the type of source (SourceType) you want to be
notified of, provide a list of RDS sources (SourceIds) that
triggers the events, and provide a list of event categories
(EventCategories) for events you want to be notified of. For
example, you can specify SourceType = db-instance, SourceIds =
mydbinstance1, mydbinstance2 and EventCategories =
Availability, Backup.
If you specify both the SourceType and SourceIds, such as
SourceType = db-instance and SourceIdentifier = myDBInstance1,
you will be notified of all the db-instance events for the
specified source. If you specify a SourceType but do not
specify a SourceIdentifier, you will receive notice of the
events for that source type for all your RDS sources. If you
do not specify either the SourceType nor the SourceIdentifier,
you will be notified of events generated from all RDS sources
belonging to your customer account.
:type subscription_name: string
:param subscription_name: The name of the subscription.
Constraints: The name must be less than 255 characters.
:type sns_topic_arn: string
:param sns_topic_arn: The Amazon Resource Name (ARN) of the SNS topic
created for event notification. The ARN is created by Amazon SNS
when you create a topic and subscribe to it.
:type source_type: string
:param source_type: The type of source that will be generating the
events. For example, if you want to be notified of events generated
by a DB instance, you would set this parameter to db-instance. if
this value is not specified, all events are returned.
Valid values: db-instance | db-parameter-group | db-security-group |
db-snapshot
:type event_categories: list
:param event_categories: A list of event categories for a SourceType
that you want to subscribe to. You can see a list of the categories
for a given SourceType in the `Events`_ topic in the Amazon RDS
User Guide or by using the **DescribeEventCategories** action.
:type source_ids: list
:param source_ids:
The list of identifiers of the event sources for which events will be
returned. If not specified, then all sources are included in the
response. An identifier must begin with a letter and must contain
only ASCII letters, digits, and hyphens; it cannot end with a
hyphen or contain two consecutive hyphens.
Constraints:
+ If SourceIds are supplied, SourceType must also be provided.
+ If the source type is a DB instance, then a `DBInstanceIdentifier`
must be supplied.
+ If the source type is a DB security group, a `DBSecurityGroupName`
must be supplied.
+ If the source type is a DB parameter group, a `DBParameterGroupName`
must be supplied.
+ If the source type is a DB snapshot, a `DBSnapshotIdentifier` must be
supplied.
:type enabled: boolean
:param enabled: A Boolean value; set to **true** to activate the
subscription, set to **false** to create the subscription but not
active it.
:type tags: list
:param tags: A list of tags.
"""
params = {
'SubscriptionName': subscription_name,
'SnsTopicArn': sns_topic_arn,
}
if source_type is not None:
params['SourceType'] = source_type
if event_categories is not None:
self.build_list_params(params,
event_categories,
'EventCategories.member')
if source_ids is not None:
self.build_list_params(params,
source_ids,
'SourceIds.member')
if enabled is not None:
params['Enabled'] = str(
enabled).lower()
if tags is not None:
self.build_complex_list_params(
params, tags,
'Tags.member',
('Key', 'Value'))
return self._make_request(
action='CreateEventSubscription',
verb='POST',
path='/', params=params)
def create_option_group(self, option_group_name, engine_name,
major_engine_version, option_group_description,
tags=None):
"""
Creates a new option group. You can create up to 20 option
groups.
:type option_group_name: string
:param option_group_name: Specifies the name of the option group to be
created.
Constraints:
+ Must be 1 to 255 alphanumeric characters or hyphens
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
Example: `myoptiongroup`
:type engine_name: string
:param engine_name: Specifies the name of the engine that this option
group should be associated with.
:type major_engine_version: string
:param major_engine_version: Specifies the major version of the engine
that this option group should be associated with.
:type option_group_description: string
:param option_group_description: The description of the option group.
:type tags: list
:param tags: A list of tags.
"""
params = {
'OptionGroupName': option_group_name,
'EngineName': engine_name,
'MajorEngineVersion': major_engine_version,
'OptionGroupDescription': option_group_description,
}
if tags is not None:
self.build_complex_list_params(
params, tags,
'Tags.member',
('Key', 'Value'))
return self._make_request(
action='CreateOptionGroup',
verb='POST',
path='/', params=params)
def delete_db_instance(self, db_instance_identifier,
skip_final_snapshot=None,
final_db_snapshot_identifier=None):
"""
The DeleteDBInstance action deletes a previously provisioned
DB instance. A successful response from the web service
indicates the request was received correctly. When you delete
a DB instance, all automated backups for that instance are
deleted and cannot be recovered. Manual DB snapshots of the DB
instance to be deleted are not deleted.
If a final DB snapshot is requested the status of the RDS
instance will be "deleting" until the DB snapshot is created.
The API action `DescribeDBInstance` is used to monitor the
status of this operation. The action cannot be canceled or
reverted once submitted.
:type db_instance_identifier: string
:param db_instance_identifier:
The DB instance identifier for the DB instance to be deleted. This
parameter isn't case sensitive.
Constraints:
+ Must contain from 1 to 63 alphanumeric characters or hyphens
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
:type skip_final_snapshot: boolean
:param skip_final_snapshot: Determines whether a final DB snapshot is
created before the DB instance is deleted. If `True` is specified,
no DBSnapshot is created. If false is specified, a DB snapshot is
created before the DB instance is deleted.
The FinalDBSnapshotIdentifier parameter must be specified if
SkipFinalSnapshot is `False`.
Default: `False`
:type final_db_snapshot_identifier: string
:param final_db_snapshot_identifier:
The DBSnapshotIdentifier of the new DBSnapshot created when
SkipFinalSnapshot is set to `False`.
Specifying this parameter and also setting the SkipFinalShapshot
parameter to true results in an error.
Constraints:
+ Must be 1 to 255 alphanumeric characters
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
"""
params = {'DBInstanceIdentifier': db_instance_identifier, }
if skip_final_snapshot is not None:
params['SkipFinalSnapshot'] = str(
skip_final_snapshot).lower()
if final_db_snapshot_identifier is not None:
params['FinalDBSnapshotIdentifier'] = final_db_snapshot_identifier
return self._make_request(
action='DeleteDBInstance',
verb='POST',
path='/', params=params)
def delete_db_parameter_group(self, db_parameter_group_name):
"""
Deletes a specified DBParameterGroup. The DBParameterGroup
cannot be associated with any RDS instances to be deleted.
The specified DB parameter group cannot be associated with any
DB instances.
:type db_parameter_group_name: string
:param db_parameter_group_name:
The name of the DB parameter group.
Constraints:
+ Must be the name of an existing DB parameter group
+ You cannot delete a default DB parameter group
+ Cannot be associated with any DB instances
"""
params = {'DBParameterGroupName': db_parameter_group_name, }
return self._make_request(
action='DeleteDBParameterGroup',
verb='POST',
path='/', params=params)
def delete_db_security_group(self, db_security_group_name):
"""
Deletes a DB security group.
The specified DB security group must not be associated with
any DB instances.
:type db_security_group_name: string
:param db_security_group_name:
The name of the DB security group to delete.
You cannot delete the default DB security group.
Constraints:
+ Must be 1 to 255 alphanumeric characters
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
+ Must not be "Default"
+ May not contain spaces
"""
params = {'DBSecurityGroupName': db_security_group_name, }
return self._make_request(
action='DeleteDBSecurityGroup',
verb='POST',
path='/', params=params)
def delete_db_snapshot(self, db_snapshot_identifier):
"""
Deletes a DBSnapshot.
The DBSnapshot must be in the `available` state to be deleted.
:type db_snapshot_identifier: string
:param db_snapshot_identifier: The DBSnapshot identifier.
Constraints: Must be the name of an existing DB snapshot in the
`available` state.
"""
params = {'DBSnapshotIdentifier': db_snapshot_identifier, }
return self._make_request(
action='DeleteDBSnapshot',
verb='POST',
path='/', params=params)
def delete_db_subnet_group(self, db_subnet_group_name):
"""
Deletes a DB subnet group.
The specified database subnet group must not be associated
with any DB instances.
:type db_subnet_group_name: string
:param db_subnet_group_name:
The name of the database subnet group to delete.
You cannot delete the default subnet group.
Constraints:
+ Must be 1 to 255 alphanumeric characters
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
"""
params = {'DBSubnetGroupName': db_subnet_group_name, }
return self._make_request(
action='DeleteDBSubnetGroup',
verb='POST',
path='/', params=params)
def delete_event_subscription(self, subscription_name):
"""
Deletes an RDS event notification subscription.
:type subscription_name: string
:param subscription_name: The name of the RDS event notification
subscription you want to delete.
"""
params = {'SubscriptionName': subscription_name, }
return self._make_request(
action='DeleteEventSubscription',
verb='POST',
path='/', params=params)
def delete_option_group(self, option_group_name):
"""
Deletes an existing option group.
:type option_group_name: string
:param option_group_name:
The name of the option group to be deleted.
You cannot delete default option groups.
"""
params = {'OptionGroupName': option_group_name, }
return self._make_request(
action='DeleteOptionGroup',
verb='POST',
path='/', params=params)
def describe_db_engine_versions(self, engine=None, engine_version=None,
db_parameter_group_family=None,
max_records=None, marker=None,
default_only=None,
list_supported_character_sets=None):
"""
Returns a list of the available DB engines.
:type engine: string
:param engine: The database engine to return.
:type engine_version: string
:param engine_version: The database engine version to return.
Example: `5.1.49`
:type db_parameter_group_family: string
:param db_parameter_group_family:
The name of a specific DB parameter group family to return details for.
Constraints:
+ Must be 1 to 255 alphanumeric characters
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more than the `MaxRecords` value is available, a
pagination token called a marker is included in the response so
that the following results can be retrieved.
Default: 100
Constraints: minimum 20, maximum 100
:type marker: string
:param marker: An optional pagination token provided by a previous
request. If this parameter is specified, the response includes only
records beyond the marker, up to the value specified by
`MaxRecords`.
:type default_only: boolean
:param default_only: Indicates that only the default version of the
specified engine or engine and major version combination is
returned.
:type list_supported_character_sets: boolean
:param list_supported_character_sets: If this parameter is specified,
and if the requested engine supports the CharacterSetName parameter
for CreateDBInstance, the response includes a list of supported
character sets for each engine version.
"""
params = {}
if engine is not None:
params['Engine'] = engine
if engine_version is not None:
params['EngineVersion'] = engine_version
if db_parameter_group_family is not None:
params['DBParameterGroupFamily'] = db_parameter_group_family
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
if default_only is not None:
params['DefaultOnly'] = str(
default_only).lower()
if list_supported_character_sets is not None:
params['ListSupportedCharacterSets'] = str(
list_supported_character_sets).lower()
return self._make_request(
action='DescribeDBEngineVersions',
verb='POST',
path='/', params=params)
def describe_db_instances(self, db_instance_identifier=None,
filters=None, max_records=None, marker=None):
"""
Returns information about provisioned RDS instances. This API
supports pagination.
:type db_instance_identifier: string
:param db_instance_identifier:
The user-supplied instance identifier. If this parameter is specified,
information from only the specific DB instance is returned. This
parameter isn't case sensitive.
Constraints:
+ Must contain from 1 to 63 alphanumeric characters or hyphens
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
:type filters: list
:param filters:
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more records exist than the specified `MaxRecords`
value, a pagination token called a marker is included in the
response so that the remaining results may be retrieved.
Default: 100
Constraints: minimum 20, maximum 100
:type marker: string
:param marker: An optional pagination token provided by a previous
DescribeDBInstances request. If this parameter is specified, the
response includes only records beyond the marker, up to the value
specified by `MaxRecords` .
"""
params = {}
if db_instance_identifier is not None:
params['DBInstanceIdentifier'] = db_instance_identifier
if filters is not None:
self.build_complex_list_params(
params, filters,
'Filters.member',
('FilterName', 'FilterValue'))
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeDBInstances',
verb='POST',
path='/', params=params)
def describe_db_log_files(self, db_instance_identifier,
filename_contains=None, file_last_written=None,
file_size=None, max_records=None, marker=None):
"""
Returns a list of DB log files for the DB instance.
:type db_instance_identifier: string
:param db_instance_identifier:
The customer-assigned name of the DB instance that contains the log
files you want to list.
Constraints:
+ Must contain from 1 to 63 alphanumeric characters or hyphens
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
:type filename_contains: string
:param filename_contains: Filters the available log files for log file
names that contain the specified string.
:type file_last_written: long
:param file_last_written: Filters the available log files for files
written since the specified date, in POSIX timestamp format.
:type file_size: long
:param file_size: Filters the available log files for files larger than
the specified size.
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more records exist than the specified MaxRecords
value, a pagination token called a marker is included in the
response so that the remaining results can be retrieved.
:type marker: string
:param marker: The pagination token provided in the previous request.
If this parameter is specified the response includes only records
beyond the marker, up to MaxRecords.
"""
params = {'DBInstanceIdentifier': db_instance_identifier, }
if filename_contains is not None:
params['FilenameContains'] = filename_contains
if file_last_written is not None:
params['FileLastWritten'] = file_last_written
if file_size is not None:
params['FileSize'] = file_size
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeDBLogFiles',
verb='POST',
path='/', params=params)
def describe_db_parameter_groups(self, db_parameter_group_name=None,
filters=None, max_records=None,
marker=None):
"""
Returns a list of `DBParameterGroup` descriptions. If a
`DBParameterGroupName` is specified, the list will contain
only the description of the specified DB parameter group.
:type db_parameter_group_name: string
:param db_parameter_group_name:
The name of a specific DB parameter group to return details for.
Constraints:
+ Must be 1 to 255 alphanumeric characters
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
:type filters: list
:param filters:
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more records exist than the specified `MaxRecords`
value, a pagination token called a marker is included in the
response so that the remaining results may be retrieved.
Default: 100
Constraints: minimum 20, maximum 100
:type marker: string
:param marker: An optional pagination token provided by a previous
`DescribeDBParameterGroups` request. If this parameter is
specified, the response includes only records beyond the marker, up
to the value specified by `MaxRecords`.
"""
params = {}
if db_parameter_group_name is not None:
params['DBParameterGroupName'] = db_parameter_group_name
if filters is not None:
self.build_complex_list_params(
params, filters,
'Filters.member',
('FilterName', 'FilterValue'))
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeDBParameterGroups',
verb='POST',
path='/', params=params)
def describe_db_parameters(self, db_parameter_group_name, source=None,
max_records=None, marker=None):
"""
Returns the detailed parameter list for a particular DB
parameter group.
:type db_parameter_group_name: string
:param db_parameter_group_name:
The name of a specific DB parameter group to return details for.
Constraints:
+ Must be 1 to 255 alphanumeric characters
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
:type source: string
:param source: The parameter types to return.
Default: All parameter types returned
Valid Values: `user | system | engine-default`
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more records exist than the specified `MaxRecords`
value, a pagination token called a marker is included in the
response so that the remaining results may be retrieved.
Default: 100
Constraints: minimum 20, maximum 100
:type marker: string
:param marker: An optional pagination token provided by a previous
`DescribeDBParameters` request. If this parameter is specified, the
response includes only records beyond the marker, up to the value
specified by `MaxRecords`.
"""
params = {'DBParameterGroupName': db_parameter_group_name, }
if source is not None:
params['Source'] = source
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeDBParameters',
verb='POST',
path='/', params=params)
def describe_db_security_groups(self, db_security_group_name=None,
filters=None, max_records=None,
marker=None):
"""
Returns a list of `DBSecurityGroup` descriptions. If a
`DBSecurityGroupName` is specified, the list will contain only
the descriptions of the specified DB security group.
:type db_security_group_name: string
:param db_security_group_name: The name of the DB security group to
return details for.
:type filters: list
:param filters:
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more records exist than the specified `MaxRecords`
value, a pagination token called a marker is included in the
response so that the remaining results may be retrieved.
Default: 100
Constraints: minimum 20, maximum 100
:type marker: string
:param marker: An optional pagination token provided by a previous
DescribeDBSecurityGroups request. If this parameter is specified,
the response includes only records beyond the marker, up to the
value specified by `MaxRecords`.
"""
params = {}
if db_security_group_name is not None:
params['DBSecurityGroupName'] = db_security_group_name
if filters is not None:
self.build_complex_list_params(
params, filters,
'Filters.member',
('FilterName', 'FilterValue'))
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeDBSecurityGroups',
verb='POST',
path='/', params=params)
def describe_db_snapshots(self, db_instance_identifier=None,
db_snapshot_identifier=None,
snapshot_type=None, filters=None,
max_records=None, marker=None):
"""
Returns information about DB snapshots. This API supports
pagination.
:type db_instance_identifier: string
:param db_instance_identifier:
A DB instance identifier to retrieve the list of DB snapshots for.
Cannot be used in conjunction with `DBSnapshotIdentifier`. This
parameter is not case sensitive.
Constraints:
+ Must contain from 1 to 63 alphanumeric characters or hyphens
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
:type db_snapshot_identifier: string
:param db_snapshot_identifier:
A specific DB snapshot identifier to describe. Cannot be used in
conjunction with `DBInstanceIdentifier`. This value is stored as a
lowercase string.
Constraints:
+ Must be 1 to 255 alphanumeric characters
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
+ If this is the identifier of an automated snapshot, the
`SnapshotType` parameter must also be specified.
:type snapshot_type: string
:param snapshot_type: The type of snapshots that will be returned.
Values can be "automated" or "manual." If not specified, the
returned results will include all snapshots types.
:type filters: list
:param filters:
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more records exist than the specified `MaxRecords`
value, a pagination token called a marker is included in the
response so that the remaining results may be retrieved.
Default: 100
Constraints: minimum 20, maximum 100
:type marker: string
:param marker: An optional pagination token provided by a previous
`DescribeDBSnapshots` request. If this parameter is specified, the
response includes only records beyond the marker, up to the value
specified by `MaxRecords`.
"""
params = {}
if db_instance_identifier is not None:
params['DBInstanceIdentifier'] = db_instance_identifier
if db_snapshot_identifier is not None:
params['DBSnapshotIdentifier'] = db_snapshot_identifier
if snapshot_type is not None:
params['SnapshotType'] = snapshot_type
if filters is not None:
self.build_complex_list_params(
params, filters,
'Filters.member',
('FilterName', 'FilterValue'))
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeDBSnapshots',
verb='POST',
path='/', params=params)
def describe_db_subnet_groups(self, db_subnet_group_name=None,
filters=None, max_records=None,
marker=None):
"""
Returns a list of DBSubnetGroup descriptions. If a
DBSubnetGroupName is specified, the list will contain only the
descriptions of the specified DBSubnetGroup.
For an overview of CIDR ranges, go to the `Wikipedia
Tutorial`_.
:type db_subnet_group_name: string
:param db_subnet_group_name: The name of the DB subnet group to return
details for.
:type filters: list
:param filters:
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more records exist than the specified `MaxRecords`
value, a pagination token called a marker is included in the
response so that the remaining results may be retrieved.
Default: 100
Constraints: minimum 20, maximum 100
:type marker: string
:param marker: An optional pagination token provided by a previous
DescribeDBSubnetGroups request. If this parameter is specified, the
response includes only records beyond the marker, up to the value
specified by `MaxRecords`.
"""
params = {}
if db_subnet_group_name is not None:
params['DBSubnetGroupName'] = db_subnet_group_name
if filters is not None:
self.build_complex_list_params(
params, filters,
'Filters.member',
('FilterName', 'FilterValue'))
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeDBSubnetGroups',
verb='POST',
path='/', params=params)
def describe_engine_default_parameters(self, db_parameter_group_family,
max_records=None, marker=None):
"""
Returns the default engine and system parameter information
for the specified database engine.
:type db_parameter_group_family: string
:param db_parameter_group_family: The name of the DB parameter group
family.
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more records exist than the specified `MaxRecords`
value, a pagination token called a marker is included in the
response so that the remaining results may be retrieved.
Default: 100
Constraints: minimum 20, maximum 100
:type marker: string
:param marker: An optional pagination token provided by a previous
`DescribeEngineDefaultParameters` request. If this parameter is
specified, the response includes only records beyond the marker, up
to the value specified by `MaxRecords`.
"""
params = {
'DBParameterGroupFamily': db_parameter_group_family,
}
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeEngineDefaultParameters',
verb='POST',
path='/', params=params)
def describe_event_categories(self, source_type=None):
"""
Displays a list of categories for all event source types, or,
if specified, for a specified source type. You can see a list
of the event categories and source types in the ` Events`_
topic in the Amazon RDS User Guide.
:type source_type: string
:param source_type: The type of source that will be generating the
events.
Valid values: db-instance | db-parameter-group | db-security-group |
db-snapshot
"""
params = {}
if source_type is not None:
params['SourceType'] = source_type
return self._make_request(
action='DescribeEventCategories',
verb='POST',
path='/', params=params)
def describe_event_subscriptions(self, subscription_name=None,
filters=None, max_records=None,
marker=None):
"""
Lists all the subscription descriptions for a customer
account. The description for a subscription includes
SubscriptionName, SNSTopicARN, CustomerID, SourceType,
SourceID, CreationTime, and Status.
If you specify a SubscriptionName, lists the description for
that subscription.
:type subscription_name: string
:param subscription_name: The name of the RDS event notification
subscription you want to describe.
:type filters: list
:param filters:
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more records exist than the specified `MaxRecords`
value, a pagination token called a marker is included in the
response so that the remaining results can be retrieved.
Default: 100
Constraints: minimum 20, maximum 100
:type marker: string
:param marker: An optional pagination token provided by a previous
DescribeOrderableDBInstanceOptions request. If this parameter is
specified, the response includes only records beyond the marker, up
to the value specified by `MaxRecords` .
"""
params = {}
if subscription_name is not None:
params['SubscriptionName'] = subscription_name
if filters is not None:
self.build_complex_list_params(
params, filters,
'Filters.member',
('FilterName', 'FilterValue'))
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeEventSubscriptions',
verb='POST',
path='/', params=params)
def describe_events(self, source_identifier=None, source_type=None,
start_time=None, end_time=None, duration=None,
event_categories=None, max_records=None, marker=None):
"""
Returns events related to DB instances, DB security groups, DB
snapshots, and DB parameter groups for the past 14 days.
Events specific to a particular DB instance, DB security
group, database snapshot, or DB parameter group can be
obtained by providing the name as a parameter. By default, the
past hour of events are returned.
:type source_identifier: string
:param source_identifier:
The identifier of the event source for which events will be returned.
If not specified, then all sources are included in the response.
Constraints:
+ If SourceIdentifier is supplied, SourceType must also be provided.
+ If the source type is `DBInstance`, then a `DBInstanceIdentifier`
must be supplied.
+ If the source type is `DBSecurityGroup`, a `DBSecurityGroupName` must
be supplied.
+ If the source type is `DBParameterGroup`, a `DBParameterGroupName`
must be supplied.
+ If the source type is `DBSnapshot`, a `DBSnapshotIdentifier` must be
supplied.
+ Cannot end with a hyphen or contain two consecutive hyphens.
:type source_type: string
:param source_type: The event source to retrieve events for. If no
value is specified, all events are returned.
:type start_time: timestamp
:param start_time: The beginning of the time interval to retrieve
events for, specified in ISO 8601 format. For more information
about ISO 8601, go to the `ISO8601 Wikipedia page.`_
Example: 2009-07-08T18:00Z
:type end_time: timestamp
:param end_time: The end of the time interval for which to retrieve
events, specified in ISO 8601 format. For more information about
ISO 8601, go to the `ISO8601 Wikipedia page.`_
Example: 2009-07-08T18:00Z
:type duration: integer
:param duration: The number of minutes to retrieve events for.
Default: 60
:type event_categories: list
:param event_categories: A list of event categories that trigger
notifications for a event notification subscription.
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more records exist than the specified `MaxRecords`
value, a pagination token called a marker is included in the
response so that the remaining results may be retrieved.
Default: 100
Constraints: minimum 20, maximum 100
:type marker: string
:param marker: An optional pagination token provided by a previous
DescribeEvents request. If this parameter is specified, the
response includes only records beyond the marker, up to the value
specified by `MaxRecords`.
"""
params = {}
if source_identifier is not None:
params['SourceIdentifier'] = source_identifier
if source_type is not None:
params['SourceType'] = source_type
if start_time is not None:
params['StartTime'] = start_time
if end_time is not None:
params['EndTime'] = end_time
if duration is not None:
params['Duration'] = duration
if event_categories is not None:
self.build_list_params(params,
event_categories,
'EventCategories.member')
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeEvents',
verb='POST',
path='/', params=params)
def describe_option_group_options(self, engine_name,
major_engine_version=None,
max_records=None, marker=None):
"""
Describes all available options.
:type engine_name: string
:param engine_name: A required parameter. Options available for the
given Engine name will be described.
:type major_engine_version: string
:param major_engine_version: If specified, filters the results to
include only options for the specified major engine version.
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more records exist than the specified `MaxRecords`
value, a pagination token called a marker is included in the
response so that the remaining results can be retrieved.
Default: 100
Constraints: minimum 20, maximum 100
:type marker: string
:param marker: An optional pagination token provided by a previous
request. If this parameter is specified, the response includes only
records beyond the marker, up to the value specified by
`MaxRecords`.
"""
params = {'EngineName': engine_name, }
if major_engine_version is not None:
params['MajorEngineVersion'] = major_engine_version
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeOptionGroupOptions',
verb='POST',
path='/', params=params)
def describe_option_groups(self, option_group_name=None, filters=None,
marker=None, max_records=None,
engine_name=None, major_engine_version=None):
"""
Describes the available option groups.
:type option_group_name: string
:param option_group_name: The name of the option group to describe.
Cannot be supplied together with EngineName or MajorEngineVersion.
:type filters: list
:param filters:
:type marker: string
:param marker: An optional pagination token provided by a previous
DescribeOptionGroups request. If this parameter is specified, the
response includes only records beyond the marker, up to the value
specified by `MaxRecords`.
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more records exist than the specified `MaxRecords`
value, a pagination token called a marker is included in the
response so that the remaining results can be retrieved.
Default: 100
Constraints: minimum 20, maximum 100
:type engine_name: string
:param engine_name: Filters the list of option groups to only include
groups associated with a specific database engine.
:type major_engine_version: string
:param major_engine_version: Filters the list of option groups to only
include groups associated with a specific database engine version.
If specified, then EngineName must also be specified.
"""
params = {}
if option_group_name is not None:
params['OptionGroupName'] = option_group_name
if filters is not None:
self.build_complex_list_params(
params, filters,
'Filters.member',
('FilterName', 'FilterValue'))
if marker is not None:
params['Marker'] = marker
if max_records is not None:
params['MaxRecords'] = max_records
if engine_name is not None:
params['EngineName'] = engine_name
if major_engine_version is not None:
params['MajorEngineVersion'] = major_engine_version
return self._make_request(
action='DescribeOptionGroups',
verb='POST',
path='/', params=params)
def describe_orderable_db_instance_options(self, engine,
engine_version=None,
db_instance_class=None,
license_model=None, vpc=None,
max_records=None, marker=None):
"""
Returns a list of orderable DB instance options for the
specified engine.
:type engine: string
:param engine: The name of the engine to retrieve DB instance options
for.
:type engine_version: string
:param engine_version: The engine version filter value. Specify this
parameter to show only the available offerings matching the
specified engine version.
:type db_instance_class: string
:param db_instance_class: The DB instance class filter value. Specify
this parameter to show only the available offerings matching the
specified DB instance class.
:type license_model: string
:param license_model: The license model filter value. Specify this
parameter to show only the available offerings matching the
specified license model.
:type vpc: boolean
:param vpc: The VPC filter value. Specify this parameter to show only
the available VPC or non-VPC offerings.
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more records exist than the specified `MaxRecords`
value, a pagination token called a marker is included in the
response so that the remaining results can be retrieved.
Default: 100
Constraints: minimum 20, maximum 100
:type marker: string
:param marker: An optional pagination token provided by a previous
DescribeOrderableDBInstanceOptions request. If this parameter is
specified, the response includes only records beyond the marker, up
to the value specified by `MaxRecords` .
"""
params = {'Engine': engine, }
if engine_version is not None:
params['EngineVersion'] = engine_version
if db_instance_class is not None:
params['DBInstanceClass'] = db_instance_class
if license_model is not None:
params['LicenseModel'] = license_model
if vpc is not None:
params['Vpc'] = str(
vpc).lower()
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeOrderableDBInstanceOptions',
verb='POST',
path='/', params=params)
def describe_reserved_db_instances(self, reserved_db_instance_id=None,
reserved_db_instances_offering_id=None,
db_instance_class=None, duration=None,
product_description=None,
offering_type=None, multi_az=None,
filters=None, max_records=None,
marker=None):
"""
Returns information about reserved DB instances for this
account, or about a specified reserved DB instance.
:type reserved_db_instance_id: string
:param reserved_db_instance_id: The reserved DB instance identifier
filter value. Specify this parameter to show only the reservation
that matches the specified reservation ID.
:type reserved_db_instances_offering_id: string
:param reserved_db_instances_offering_id: The offering identifier
filter value. Specify this parameter to show only purchased
reservations matching the specified offering identifier.
:type db_instance_class: string
:param db_instance_class: The DB instance class filter value. Specify
this parameter to show only those reservations matching the
specified DB instances class.
:type duration: string
:param duration: The duration filter value, specified in years or
seconds. Specify this parameter to show only reservations for this
duration.
Valid Values: `1 | 3 | 31536000 | 94608000`
:type product_description: string
:param product_description: The product description filter value.
Specify this parameter to show only those reservations matching the
specified product description.
:type offering_type: string
:param offering_type: The offering type filter value. Specify this
parameter to show only the available offerings matching the
specified offering type.
Valid Values: `"Light Utilization" | "Medium Utilization" | "Heavy
Utilization" `
:type multi_az: boolean
:param multi_az: The Multi-AZ filter value. Specify this parameter to
show only those reservations matching the specified Multi-AZ
parameter.
:type filters: list
:param filters:
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more than the `MaxRecords` value is available, a
pagination token called a marker is included in the response so
that the following results can be retrieved.
Default: 100
Constraints: minimum 20, maximum 100
:type marker: string
:param marker: An optional pagination token provided by a previous
request. If this parameter is specified, the response includes only
records beyond the marker, up to the value specified by
`MaxRecords`.
"""
params = {}
if reserved_db_instance_id is not None:
params['ReservedDBInstanceId'] = reserved_db_instance_id
if reserved_db_instances_offering_id is not None:
params['ReservedDBInstancesOfferingId'] = reserved_db_instances_offering_id
if db_instance_class is not None:
params['DBInstanceClass'] = db_instance_class
if duration is not None:
params['Duration'] = duration
if product_description is not None:
params['ProductDescription'] = product_description
if offering_type is not None:
params['OfferingType'] = offering_type
if multi_az is not None:
params['MultiAZ'] = str(
multi_az).lower()
if filters is not None:
self.build_complex_list_params(
params, filters,
'Filters.member',
('FilterName', 'FilterValue'))
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeReservedDBInstances',
verb='POST',
path='/', params=params)
def describe_reserved_db_instances_offerings(self,
reserved_db_instances_offering_id=None,
db_instance_class=None,
duration=None,
product_description=None,
offering_type=None,
multi_az=None,
max_records=None,
marker=None):
"""
Lists available reserved DB instance offerings.
:type reserved_db_instances_offering_id: string
:param reserved_db_instances_offering_id: The offering identifier
filter value. Specify this parameter to show only the available
offering that matches the specified reservation identifier.
Example: `438012d3-4052-4cc7-b2e3-8d3372e0e706`
:type db_instance_class: string
:param db_instance_class: The DB instance class filter value. Specify
this parameter to show only the available offerings matching the
specified DB instance class.
:type duration: string
:param duration: Duration filter value, specified in years or seconds.
Specify this parameter to show only reservations for this duration.
Valid Values: `1 | 3 | 31536000 | 94608000`
:type product_description: string
:param product_description: Product description filter value. Specify
this parameter to show only the available offerings matching the
specified product description.
:type offering_type: string
:param offering_type: The offering type filter value. Specify this
parameter to show only the available offerings matching the
specified offering type.
Valid Values: `"Light Utilization" | "Medium Utilization" | "Heavy
Utilization" `
:type multi_az: boolean
:param multi_az: The Multi-AZ filter value. Specify this parameter to
show only the available offerings matching the specified Multi-AZ
parameter.
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more than the `MaxRecords` value is available, a
pagination token called a marker is included in the response so
that the following results can be retrieved.
Default: 100
Constraints: minimum 20, maximum 100
:type marker: string
:param marker: An optional pagination token provided by a previous
request. If this parameter is specified, the response includes only
records beyond the marker, up to the value specified by
`MaxRecords`.
"""
params = {}
if reserved_db_instances_offering_id is not None:
params['ReservedDBInstancesOfferingId'] = reserved_db_instances_offering_id
if db_instance_class is not None:
params['DBInstanceClass'] = db_instance_class
if duration is not None:
params['Duration'] = duration
if product_description is not None:
params['ProductDescription'] = product_description
if offering_type is not None:
params['OfferingType'] = offering_type
if multi_az is not None:
params['MultiAZ'] = str(
multi_az).lower()
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeReservedDBInstancesOfferings',
verb='POST',
path='/', params=params)
def download_db_log_file_portion(self, db_instance_identifier,
log_file_name, marker=None,
number_of_lines=None):
"""
Downloads the last line of the specified log file.
:type db_instance_identifier: string
:param db_instance_identifier:
The customer-assigned name of the DB instance that contains the log
files you want to list.
Constraints:
+ Must contain from 1 to 63 alphanumeric characters or hyphens
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
:type log_file_name: string
:param log_file_name: The name of the log file to be downloaded.
:type marker: string
:param marker: The pagination token provided in the previous request.
If this parameter is specified the response includes only records
beyond the marker, up to MaxRecords.
:type number_of_lines: integer
:param number_of_lines: The number of lines remaining to be downloaded.
"""
params = {
'DBInstanceIdentifier': db_instance_identifier,
'LogFileName': log_file_name,
}
if marker is not None:
params['Marker'] = marker
if number_of_lines is not None:
params['NumberOfLines'] = number_of_lines
return self._make_request(
action='DownloadDBLogFilePortion',
verb='POST',
path='/', params=params)
def list_tags_for_resource(self, resource_name):
"""
Lists all tags on an Amazon RDS resource.
For an overview on tagging an Amazon RDS resource, see
`Tagging Amazon RDS Resources`_.
:type resource_name: string
:param resource_name: The Amazon RDS resource with tags to be listed.
This value is an Amazon Resource Name (ARN). For information about
creating an ARN, see ` Constructing an RDS Amazon Resource Name
(ARN)`_.
"""
params = {'ResourceName': resource_name, }
return self._make_request(
action='ListTagsForResource',
verb='POST',
path='/', params=params)
def modify_db_instance(self, db_instance_identifier,
allocated_storage=None, db_instance_class=None,
db_security_groups=None,
vpc_security_group_ids=None,
apply_immediately=None, master_user_password=None,
db_parameter_group_name=None,
backup_retention_period=None,
preferred_backup_window=None,
preferred_maintenance_window=None, multi_az=None,
engine_version=None,
allow_major_version_upgrade=None,
auto_minor_version_upgrade=None, iops=None,
option_group_name=None,
new_db_instance_identifier=None):
"""
Modify settings for a DB instance. You can change one or more
database configuration parameters by specifying these
parameters and the new values in the request.
:type db_instance_identifier: string
:param db_instance_identifier:
The DB instance identifier. This value is stored as a lowercase string.
Constraints:
+ Must be the identifier for an existing DB instance
+ Must contain from 1 to 63 alphanumeric characters or hyphens
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
:type allocated_storage: integer
:param allocated_storage: The new storage capacity of the RDS instance.
Changing this parameter does not result in an outage and the change
is applied during the next maintenance window unless the
`ApplyImmediately` parameter is set to `True` for this request.
**MySQL**
Default: Uses existing setting
Valid Values: 5-1024
Constraints: Value supplied must be at least 10% greater than the
current value. Values that are not at least 10% greater than the
existing value are rounded up so that they are 10% greater than the
current value.
Type: Integer
**Oracle**
Default: Uses existing setting
Valid Values: 10-1024
Constraints: Value supplied must be at least 10% greater than the
current value. Values that are not at least 10% greater than the
existing value are rounded up so that they are 10% greater than the
current value.
**SQL Server**
Cannot be modified.
If you choose to migrate your DB instance from using standard storage
to using Provisioned IOPS, or from using Provisioned IOPS to using
standard storage, the process can take time. The duration of the
migration depends on several factors such as database load, storage
size, storage type (standard or Provisioned IOPS), amount of IOPS
provisioned (if any), and the number of prior scale storage
operations. Typical migration times are under 24 hours, but the
process can take up to several days in some cases. During the
migration, the DB instance will be available for use, but may
experience performance degradation. While the migration takes
place, nightly backups for the instance will be suspended. No other
Amazon RDS operations can take place for the instance, including
modifying the instance, rebooting the instance, deleting the
instance, creating a read replica for the instance, and creating a
DB snapshot of the instance.
:type db_instance_class: string
:param db_instance_class: The new compute and memory capacity of the DB
instance. To determine the instance classes that are available for
a particular DB engine, use the DescribeOrderableDBInstanceOptions
action.
Passing a value for this parameter causes an outage during the change
and is applied during the next maintenance window, unless the
`ApplyImmediately` parameter is specified as `True` for this
request.
Default: Uses existing setting
Valid Values: `db.t1.micro | db.m1.small | db.m1.medium | db.m1.large |
db.m1.xlarge | db.m2.xlarge | db.m2.2xlarge | db.m2.4xlarge`
:type db_security_groups: list
:param db_security_groups:
A list of DB security groups to authorize on this DB instance. Changing
this parameter does not result in an outage and the change is
asynchronously applied as soon as possible.
Constraints:
+ Must be 1 to 255 alphanumeric characters
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
:type vpc_security_group_ids: list
:param vpc_security_group_ids:
A list of EC2 VPC security groups to authorize on this DB instance.
This change is asynchronously applied as soon as possible.
Constraints:
+ Must be 1 to 255 alphanumeric characters
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
:type apply_immediately: boolean
:param apply_immediately: Specifies whether or not the modifications in
this request and any pending modifications are asynchronously
applied as soon as possible, regardless of the
`PreferredMaintenanceWindow` setting for the DB instance.
If this parameter is passed as `False`, changes to the DB instance are
applied on the next call to RebootDBInstance, the next maintenance
reboot, or the next failure reboot, whichever occurs first. See
each parameter to determine when a change is applied.
Default: `False`
:type master_user_password: string
:param master_user_password:
The new password for the DB instance master user. Can be any printable
ASCII character except "/", '"', or "@".
Changing this parameter does not result in an outage and the change is
asynchronously applied as soon as possible. Between the time of the
request and the completion of the request, the `MasterUserPassword`
element exists in the `PendingModifiedValues` element of the
operation response.
Default: Uses existing setting
Constraints: Must be 8 to 41 alphanumeric characters (MySQL), 8 to 30
alphanumeric characters (Oracle), or 8 to 128 alphanumeric
characters (SQL Server).
Amazon RDS API actions never return the password, so this action
provides a way to regain access to a master instance user if the
password is lost.
:type db_parameter_group_name: string
:param db_parameter_group_name: The name of the DB parameter group to
apply to this DB instance. Changing this parameter does not result
in an outage and the change is applied during the next maintenance
window unless the `ApplyImmediately` parameter is set to `True` for
this request.
Default: Uses existing setting
Constraints: The DB parameter group must be in the same DB parameter
group family as this DB instance.
:type backup_retention_period: integer
:param backup_retention_period:
The number of days to retain automated backups. Setting this parameter
to a positive number enables backups. Setting this parameter to 0
disables automated backups.
Changing this parameter can result in an outage if you change from 0 to
a non-zero value or from a non-zero value to 0. These changes are
applied during the next maintenance window unless the
`ApplyImmediately` parameter is set to `True` for this request. If
you change the parameter from one non-zero value to another non-
zero value, the change is asynchronously applied as soon as
possible.
Default: Uses existing setting
Constraints:
+ Must be a value from 0 to 8
+ Cannot be set to 0 if the DB instance is a master instance with read
replicas or if the DB instance is a read replica
:type preferred_backup_window: string
:param preferred_backup_window:
The daily time range during which automated backups are created if
automated backups are enabled, as determined by the
`BackupRetentionPeriod`. Changing this parameter does not result in
an outage and the change is asynchronously applied as soon as
possible.
Constraints:
+ Must be in the format hh24:mi-hh24:mi
+ Times should be Universal Time Coordinated (UTC)
+ Must not conflict with the preferred maintenance window
+ Must be at least 30 minutes
:type preferred_maintenance_window: string
:param preferred_maintenance_window: The weekly time range (in UTC)
during which system maintenance can occur, which may result in an
outage. Changing this parameter does not result in an outage,
except in the following situation, and the change is asynchronously
applied as soon as possible. If there are pending actions that
cause a reboot, and the maintenance window is changed to include
the current time, then changing this parameter will cause a reboot
of the DB instance. If moving this window to the current time,
there must be at least 30 minutes between the current time and end
of the window to ensure pending changes are applied.
Default: Uses existing setting
Format: ddd:hh24:mi-ddd:hh24:mi
Valid Days: Mon | Tue | Wed | Thu | Fri | Sat | Sun
Constraints: Must be at least 30 minutes
:type multi_az: boolean
:param multi_az: Specifies if the DB instance is a Multi-AZ deployment.
Changing this parameter does not result in an outage and the change
is applied during the next maintenance window unless the
`ApplyImmediately` parameter is set to `True` for this request.
Constraints: Cannot be specified if the DB instance is a read replica.
:type engine_version: string
:param engine_version: The version number of the database engine to
upgrade to. Changing this parameter results in an outage and the
change is applied during the next maintenance window unless the
`ApplyImmediately` parameter is set to `True` for this request.
For major version upgrades, if a non-default DB parameter group is
currently in use, a new DB parameter group in the DB parameter
group family for the new engine version must be specified. The new
DB parameter group can be the default for that DB parameter group
family.
Example: `5.1.42`
:type allow_major_version_upgrade: boolean
:param allow_major_version_upgrade: Indicates that major version
upgrades are allowed. Changing this parameter does not result in an
outage and the change is asynchronously applied as soon as
possible.
Constraints: This parameter must be set to true when specifying a value
for the EngineVersion parameter that is a different major version
than the DB instance's current version.
:type auto_minor_version_upgrade: boolean
:param auto_minor_version_upgrade: Indicates that minor version
upgrades will be applied automatically to the DB instance during
the maintenance window. Changing this parameter does not result in
an outage except in the following case and the change is
asynchronously applied as soon as possible. An outage will result
if this parameter is set to `True` during the maintenance window,
and a newer minor version is available, and RDS has enabled auto
patching for that engine version.
:type iops: integer
:param iops: The new Provisioned IOPS (I/O operations per second) value
for the RDS instance. Changing this parameter does not result in an
outage and the change is applied during the next maintenance window
unless the `ApplyImmediately` parameter is set to `True` for this
request.
Default: Uses existing setting
Constraints: Value supplied must be at least 10% greater than the
current value. Values that are not at least 10% greater than the
existing value are rounded up so that they are 10% greater than the
current value.
Type: Integer
If you choose to migrate your DB instance from using standard storage
to using Provisioned IOPS, or from using Provisioned IOPS to using
standard storage, the process can take time. The duration of the
migration depends on several factors such as database load, storage
size, storage type (standard or Provisioned IOPS), amount of IOPS
provisioned (if any), and the number of prior scale storage
operations. Typical migration times are under 24 hours, but the
process can take up to several days in some cases. During the
migration, the DB instance will be available for use, but may
experience performance degradation. While the migration takes
place, nightly backups for the instance will be suspended. No other
Amazon RDS operations can take place for the instance, including
modifying the instance, rebooting the instance, deleting the
instance, creating a read replica for the instance, and creating a
DB snapshot of the instance.
:type option_group_name: string
:param option_group_name: Indicates that the DB instance should be
associated with the specified option group. Changing this parameter
does not result in an outage except in the following case and the
change is applied during the next maintenance window unless the
`ApplyImmediately` parameter is set to `True` for this request. If
the parameter change results in an option group that enables OEM,
this change can cause a brief (sub-second) period during which new
connections are rejected but existing connections are not
interrupted.
Permanent options, such as the TDE option for Oracle Advanced Security
TDE, cannot be removed from an option group, and that option group
cannot be removed from a DB instance once it is associated with a
DB instance
:type new_db_instance_identifier: string
:param new_db_instance_identifier:
The new DB instance identifier for the DB instance when renaming a DB
Instance. This value is stored as a lowercase string.
Constraints:
+ Must contain from 1 to 63 alphanumeric characters or hyphens
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
"""
params = {'DBInstanceIdentifier': db_instance_identifier, }
if allocated_storage is not None:
params['AllocatedStorage'] = allocated_storage
if db_instance_class is not None:
params['DBInstanceClass'] = db_instance_class
if db_security_groups is not None:
self.build_list_params(params,
db_security_groups,
'DBSecurityGroups.member')
if vpc_security_group_ids is not None:
self.build_list_params(params,
vpc_security_group_ids,
'VpcSecurityGroupIds.member')
if apply_immediately is not None:
params['ApplyImmediately'] = str(
apply_immediately).lower()
if master_user_password is not None:
params['MasterUserPassword'] = master_user_password
if db_parameter_group_name is not None:
params['DBParameterGroupName'] = db_parameter_group_name
if backup_retention_period is not None:
params['BackupRetentionPeriod'] = backup_retention_period
if preferred_backup_window is not None:
params['PreferredBackupWindow'] = preferred_backup_window
if preferred_maintenance_window is not None:
params['PreferredMaintenanceWindow'] = preferred_maintenance_window
if multi_az is not None:
params['MultiAZ'] = str(
multi_az).lower()
if engine_version is not None:
params['EngineVersion'] = engine_version
if allow_major_version_upgrade is not None:
params['AllowMajorVersionUpgrade'] = str(
allow_major_version_upgrade).lower()
if auto_minor_version_upgrade is not None:
params['AutoMinorVersionUpgrade'] = str(
auto_minor_version_upgrade).lower()
if iops is not None:
params['Iops'] = iops
if option_group_name is not None:
params['OptionGroupName'] = option_group_name
if new_db_instance_identifier is not None:
params['NewDBInstanceIdentifier'] = new_db_instance_identifier
return self._make_request(
action='ModifyDBInstance',
verb='POST',
path='/', params=params)
def modify_db_parameter_group(self, db_parameter_group_name, parameters):
"""
Modifies the parameters of a DB parameter group. To modify
more than one parameter, submit a list of the following:
`ParameterName`, `ParameterValue`, and `ApplyMethod`. A
maximum of 20 parameters can be modified in a single request.
The `apply-immediate` method can be used only for dynamic
parameters; the `pending-reboot` method can be used with MySQL
and Oracle DB instances for either dynamic or static
parameters. For Microsoft SQL Server DB instances, the
`pending-reboot` method can be used only for static
parameters.
:type db_parameter_group_name: string
:param db_parameter_group_name:
The name of the DB parameter group.
Constraints:
+ Must be the name of an existing DB parameter group
+ Must be 1 to 255 alphanumeric characters
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
:type parameters: list
:param parameters:
An array of parameter names, values, and the apply method for the
parameter update. At least one parameter name, value, and apply
method must be supplied; subsequent arguments are optional. A
maximum of 20 parameters may be modified in a single request.
Valid Values (for the application method): `immediate | pending-reboot`
You can use the immediate value with dynamic parameters only. You can
use the pending-reboot value for both dynamic and static
parameters, and changes are applied when DB instance reboots.
"""
params = {'DBParameterGroupName': db_parameter_group_name, }
self.build_complex_list_params(
params, parameters,
'Parameters.member',
('ParameterName', 'ParameterValue', 'Description', 'Source', 'ApplyType', 'DataType', 'AllowedValues', 'IsModifiable', 'MinimumEngineVersion', 'ApplyMethod'))
return self._make_request(
action='ModifyDBParameterGroup',
verb='POST',
path='/', params=params)
def modify_db_subnet_group(self, db_subnet_group_name, subnet_ids,
db_subnet_group_description=None):
"""
Modifies an existing DB subnet group. DB subnet groups must
contain at least one subnet in at least two AZs in the region.
:type db_subnet_group_name: string
:param db_subnet_group_name: The name for the DB subnet group. This
value is stored as a lowercase string.
Constraints: Must contain no more than 255 alphanumeric characters or
hyphens. Must not be "Default".
Example: `mySubnetgroup`
:type db_subnet_group_description: string
:param db_subnet_group_description: The description for the DB subnet
group.
:type subnet_ids: list
:param subnet_ids: The EC2 subnet IDs for the DB subnet group.
"""
params = {'DBSubnetGroupName': db_subnet_group_name, }
self.build_list_params(params,
subnet_ids,
'SubnetIds.member')
if db_subnet_group_description is not None:
params['DBSubnetGroupDescription'] = db_subnet_group_description
return self._make_request(
action='ModifyDBSubnetGroup',
verb='POST',
path='/', params=params)
def modify_event_subscription(self, subscription_name,
sns_topic_arn=None, source_type=None,
event_categories=None, enabled=None):
"""
Modifies an existing RDS event notification subscription. Note
that you cannot modify the source identifiers using this call;
to change source identifiers for a subscription, use the
AddSourceIdentifierToSubscription and
RemoveSourceIdentifierFromSubscription calls.
You can see a list of the event categories for a given
SourceType in the `Events`_ topic in the Amazon RDS User Guide
or by using the **DescribeEventCategories** action.
:type subscription_name: string
:param subscription_name: The name of the RDS event notification
subscription.
:type sns_topic_arn: string
:param sns_topic_arn: The Amazon Resource Name (ARN) of the SNS topic
created for event notification. The ARN is created by Amazon SNS
when you create a topic and subscribe to it.
:type source_type: string
:param source_type: The type of source that will be generating the
events. For example, if you want to be notified of events generated
by a DB instance, you would set this parameter to db-instance. if
this value is not specified, all events are returned.
Valid values: db-instance | db-parameter-group | db-security-group |
db-snapshot
:type event_categories: list
:param event_categories: A list of event categories for a SourceType
that you want to subscribe to. You can see a list of the categories
for a given SourceType in the `Events`_ topic in the Amazon RDS
User Guide or by using the **DescribeEventCategories** action.
:type enabled: boolean
:param enabled: A Boolean value; set to **true** to activate the
subscription.
"""
params = {'SubscriptionName': subscription_name, }
if sns_topic_arn is not None:
params['SnsTopicArn'] = sns_topic_arn
if source_type is not None:
params['SourceType'] = source_type
if event_categories is not None:
self.build_list_params(params,
event_categories,
'EventCategories.member')
if enabled is not None:
params['Enabled'] = str(
enabled).lower()
return self._make_request(
action='ModifyEventSubscription',
verb='POST',
path='/', params=params)
def modify_option_group(self, option_group_name, options_to_include=None,
options_to_remove=None, apply_immediately=None):
"""
Modifies an existing option group.
:type option_group_name: string
:param option_group_name: The name of the option group to be modified.
Permanent options, such as the TDE option for Oracle Advanced Security
TDE, cannot be removed from an option group, and that option group
cannot be removed from a DB instance once it is associated with a
DB instance
:type options_to_include: list
:param options_to_include: Options in this list are added to the option
group or, if already present, the specified configuration is used
to update the existing configuration.
:type options_to_remove: list
:param options_to_remove: Options in this list are removed from the
option group.
:type apply_immediately: boolean
:param apply_immediately: Indicates whether the changes should be
applied immediately, or during the next maintenance window for each
instance associated with the option group.
"""
params = {'OptionGroupName': option_group_name, }
if options_to_include is not None:
self.build_complex_list_params(
params, options_to_include,
'OptionsToInclude.member',
('OptionName', 'Port', 'DBSecurityGroupMemberships', 'VpcSecurityGroupMemberships', 'OptionSettings'))
if options_to_remove is not None:
self.build_list_params(params,
options_to_remove,
'OptionsToRemove.member')
if apply_immediately is not None:
params['ApplyImmediately'] = str(
apply_immediately).lower()
return self._make_request(
action='ModifyOptionGroup',
verb='POST',
path='/', params=params)
def promote_read_replica(self, db_instance_identifier,
backup_retention_period=None,
preferred_backup_window=None):
"""
Promotes a read replica DB instance to a standalone DB
instance.
:type db_instance_identifier: string
:param db_instance_identifier: The DB instance identifier. This value
is stored as a lowercase string.
Constraints:
+ Must be the identifier for an existing read replica DB instance
+ Must contain from 1 to 63 alphanumeric characters or hyphens
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
Example: mydbinstance
:type backup_retention_period: integer
:param backup_retention_period:
The number of days to retain automated backups. Setting this parameter
to a positive number enables backups. Setting this parameter to 0
disables automated backups.
Default: 1
Constraints:
+ Must be a value from 0 to 8
:type preferred_backup_window: string
:param preferred_backup_window: The daily time range during which
automated backups are created if automated backups are enabled,
using the `BackupRetentionPeriod` parameter.
Default: A 30-minute window selected at random from an 8-hour block of
time per region. See the Amazon RDS User Guide for the time blocks
for each region from which the default backup windows are assigned.
Constraints: Must be in the format `hh24:mi-hh24:mi`. Times should be
Universal Time Coordinated (UTC). Must not conflict with the
preferred maintenance window. Must be at least 30 minutes.
"""
params = {'DBInstanceIdentifier': db_instance_identifier, }
if backup_retention_period is not None:
params['BackupRetentionPeriod'] = backup_retention_period
if preferred_backup_window is not None:
params['PreferredBackupWindow'] = preferred_backup_window
return self._make_request(
action='PromoteReadReplica',
verb='POST',
path='/', params=params)
def purchase_reserved_db_instances_offering(self,
reserved_db_instances_offering_id,
reserved_db_instance_id=None,
db_instance_count=None,
tags=None):
"""
Purchases a reserved DB instance offering.
:type reserved_db_instances_offering_id: string
:param reserved_db_instances_offering_id: The ID of the Reserved DB
instance offering to purchase.
Example: 438012d3-4052-4cc7-b2e3-8d3372e0e706
:type reserved_db_instance_id: string
:param reserved_db_instance_id: Customer-specified identifier to track
this reservation.
Example: myreservationID
:type db_instance_count: integer
:param db_instance_count: The number of instances to reserve.
Default: `1`
:type tags: list
:param tags: A list of tags.
"""
params = {
'ReservedDBInstancesOfferingId': reserved_db_instances_offering_id,
}
if reserved_db_instance_id is not None:
params['ReservedDBInstanceId'] = reserved_db_instance_id
if db_instance_count is not None:
params['DBInstanceCount'] = db_instance_count
if tags is not None:
self.build_complex_list_params(
params, tags,
'Tags.member',
('Key', 'Value'))
return self._make_request(
action='PurchaseReservedDBInstancesOffering',
verb='POST',
path='/', params=params)
def reboot_db_instance(self, db_instance_identifier, force_failover=None):
"""
Rebooting a DB instance restarts the database engine service.
A reboot also applies to the DB instance any modifications to
the associated DB parameter group that were pending. Rebooting
a DB instance results in a momentary outage of the instance,
during which the DB instance status is set to rebooting. If
the RDS instance is configured for MultiAZ, it is possible
that the reboot will be conducted through a failover. An
Amazon RDS event is created when the reboot is completed.
If your DB instance is deployed in multiple Availability
Zones, you can force a failover from one AZ to the other
during the reboot. You might force a failover to test the
availability of your DB instance deployment or to restore
operations to the original AZ after a failover occurs.
The time required to reboot is a function of the specific
database engine's crash recovery process. To improve the
reboot time, we recommend that you reduce database activities
as much as possible during the reboot process to reduce
rollback activity for in-transit transactions.
:type db_instance_identifier: string
:param db_instance_identifier:
The DB instance identifier. This parameter is stored as a lowercase
string.
Constraints:
+ Must contain from 1 to 63 alphanumeric characters or hyphens
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
:type force_failover: boolean
:param force_failover: When `True`, the reboot will be conducted
through a MultiAZ failover.
Constraint: You cannot specify `True` if the instance is not configured
for MultiAZ.
"""
params = {'DBInstanceIdentifier': db_instance_identifier, }
if force_failover is not None:
params['ForceFailover'] = str(
force_failover).lower()
return self._make_request(
action='RebootDBInstance',
verb='POST',
path='/', params=params)
def remove_source_identifier_from_subscription(self, subscription_name,
source_identifier):
"""
Removes a source identifier from an existing RDS event
notification subscription.
:type subscription_name: string
:param subscription_name: The name of the RDS event notification
subscription you want to remove a source identifier from.
:type source_identifier: string
:param source_identifier: The source identifier to be removed from the
subscription, such as the **DB instance identifier** for a DB
instance or the name of a security group.
"""
params = {
'SubscriptionName': subscription_name,
'SourceIdentifier': source_identifier,
}
return self._make_request(
action='RemoveSourceIdentifierFromSubscription',
verb='POST',
path='/', params=params)
def remove_tags_from_resource(self, resource_name, tag_keys):
"""
Removes metadata tags from an Amazon RDS resource.
For an overview on tagging an Amazon RDS resource, see
`Tagging Amazon RDS Resources`_.
:type resource_name: string
:param resource_name: The Amazon RDS resource the tags will be removed
from. This value is an Amazon Resource Name (ARN). For information
about creating an ARN, see ` Constructing an RDS Amazon Resource
Name (ARN)`_.
:type tag_keys: list
:param tag_keys: The tag key (name) of the tag to be removed.
"""
params = {'ResourceName': resource_name, }
self.build_list_params(params,
tag_keys,
'TagKeys.member')
return self._make_request(
action='RemoveTagsFromResource',
verb='POST',
path='/', params=params)
def reset_db_parameter_group(self, db_parameter_group_name,
reset_all_parameters=None, parameters=None):
"""
Modifies the parameters of a DB parameter group to the
engine/system default value. To reset specific parameters
submit a list of the following: `ParameterName` and
`ApplyMethod`. To reset the entire DB parameter group, specify
the `DBParameterGroup` name and `ResetAllParameters`
parameters. When resetting the entire group, dynamic
parameters are updated immediately and static parameters are
set to `pending-reboot` to take effect on the next DB instance
restart or `RebootDBInstance` request.
:type db_parameter_group_name: string
:param db_parameter_group_name:
The name of the DB parameter group.
Constraints:
+ Must be 1 to 255 alphanumeric characters
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
:type reset_all_parameters: boolean
:param reset_all_parameters: Specifies whether ( `True`) or not (
`False`) to reset all parameters in the DB parameter group to
default values.
Default: `True`
:type parameters: list
:param parameters: An array of parameter names, values, and the apply
method for the parameter update. At least one parameter name,
value, and apply method must be supplied; subsequent arguments are
optional. A maximum of 20 parameters may be modified in a single
request.
**MySQL**
Valid Values (for Apply method): `immediate` | `pending-reboot`
You can use the immediate value with dynamic parameters only. You can
use the `pending-reboot` value for both dynamic and static
parameters, and changes are applied when DB instance reboots.
**Oracle**
Valid Values (for Apply method): `pending-reboot`
"""
params = {'DBParameterGroupName': db_parameter_group_name, }
if reset_all_parameters is not None:
params['ResetAllParameters'] = str(
reset_all_parameters).lower()
if parameters is not None:
self.build_complex_list_params(
params, parameters,
'Parameters.member',
('ParameterName', 'ParameterValue', 'Description', 'Source', 'ApplyType', 'DataType', 'AllowedValues', 'IsModifiable', 'MinimumEngineVersion', 'ApplyMethod'))
return self._make_request(
action='ResetDBParameterGroup',
verb='POST',
path='/', params=params)
def restore_db_instance_from_db_snapshot(self, db_instance_identifier,
db_snapshot_identifier,
db_instance_class=None,
port=None,
availability_zone=None,
db_subnet_group_name=None,
multi_az=None,
publicly_accessible=None,
auto_minor_version_upgrade=None,
license_model=None,
db_name=None, engine=None,
iops=None,
option_group_name=None,
tags=None):
"""
Creates a new DB instance from a DB snapshot. The target
database is created from the source database restore point
with the same configuration as the original source database,
except that the new RDS instance is created with the default
security group.
:type db_instance_identifier: string
:param db_instance_identifier:
The identifier for the DB snapshot to restore from.
Constraints:
+ Must contain from 1 to 63 alphanumeric characters or hyphens
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
:type db_snapshot_identifier: string
:param db_snapshot_identifier: Name of the DB instance to create from
the DB snapshot. This parameter isn't case sensitive.
Constraints:
+ Must contain from 1 to 255 alphanumeric characters or hyphens
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
Example: `my-snapshot-id`
:type db_instance_class: string
:param db_instance_class: The compute and memory capacity of the Amazon
RDS DB instance.
Valid Values: `db.t1.micro | db.m1.small | db.m1.medium | db.m1.large |
db.m1.xlarge | db.m2.2xlarge | db.m2.4xlarge`
:type port: integer
:param port: The port number on which the database accepts connections.
Default: The same port as the original DB instance
Constraints: Value must be `1150-65535`
:type availability_zone: string
:param availability_zone: The EC2 Availability Zone that the database
instance will be created in.
Default: A random, system-chosen Availability Zone.
Constraint: You cannot specify the AvailabilityZone parameter if the
MultiAZ parameter is set to `True`.
Example: `us-east-1a`
:type db_subnet_group_name: string
:param db_subnet_group_name: The DB subnet group name to use for the
new instance.
:type multi_az: boolean
:param multi_az: Specifies if the DB instance is a Multi-AZ deployment.
Constraint: You cannot specify the AvailabilityZone parameter if the
MultiAZ parameter is set to `True`.
:type publicly_accessible: boolean
:param publicly_accessible: Specifies the accessibility options for the
DB instance. A value of true specifies an Internet-facing instance
with a publicly resolvable DNS name, which resolves to a public IP
address. A value of false specifies an internal instance with a DNS
name that resolves to a private IP address.
Default: The default behavior varies depending on whether a VPC has
been requested or not. The following list shows the default
behavior in each case.
+ **Default VPC:**true
+ **VPC:**false
If no DB subnet group has been specified as part of the request and the
PubliclyAccessible value has not been set, the DB instance will be
publicly accessible. If a specific DB subnet group has been
specified as part of the request and the PubliclyAccessible value
has not been set, the DB instance will be private.
:type auto_minor_version_upgrade: boolean
:param auto_minor_version_upgrade: Indicates that minor version
upgrades will be applied automatically to the DB instance during
the maintenance window.
:type license_model: string
:param license_model: License model information for the restored DB
instance.
Default: Same as source.
Valid values: `license-included` | `bring-your-own-license` | `general-
public-license`
:type db_name: string
:param db_name:
The database name for the restored DB instance.
This parameter doesn't apply to the MySQL engine.
:type engine: string
:param engine: The database engine to use for the new instance.
Default: The same as source
Constraint: Must be compatible with the engine of the source
Example: `oracle-ee`
:type iops: integer
:param iops: Specifies the amount of provisioned IOPS for the DB
instance, expressed in I/O operations per second. If this parameter
is not specified, the IOPS value will be taken from the backup. If
this parameter is set to 0, the new instance will be converted to a
non-PIOPS instance, which will take additional time, though your DB
instance will be available for connections before the conversion
starts.
Constraints: Must be an integer greater than 1000.
:type option_group_name: string
:param option_group_name: The name of the option group to be used for
the restored DB instance.
Permanent options, such as the TDE option for Oracle Advanced Security
TDE, cannot be removed from an option group, and that option group
cannot be removed from a DB instance once it is associated with a
DB instance
:type tags: list
:param tags: A list of tags.
"""
params = {
'DBInstanceIdentifier': db_instance_identifier,
'DBSnapshotIdentifier': db_snapshot_identifier,
}
if db_instance_class is not None:
params['DBInstanceClass'] = db_instance_class
if port is not None:
params['Port'] = port
if availability_zone is not None:
params['AvailabilityZone'] = availability_zone
if db_subnet_group_name is not None:
params['DBSubnetGroupName'] = db_subnet_group_name
if multi_az is not None:
params['MultiAZ'] = str(
multi_az).lower()
if publicly_accessible is not None:
params['PubliclyAccessible'] = str(
publicly_accessible).lower()
if auto_minor_version_upgrade is not None:
params['AutoMinorVersionUpgrade'] = str(
auto_minor_version_upgrade).lower()
if license_model is not None:
params['LicenseModel'] = license_model
if db_name is not None:
params['DBName'] = db_name
if engine is not None:
params['Engine'] = engine
if iops is not None:
params['Iops'] = iops
if option_group_name is not None:
params['OptionGroupName'] = option_group_name
if tags is not None:
self.build_complex_list_params(
params, tags,
'Tags.member',
('Key', 'Value'))
return self._make_request(
action='RestoreDBInstanceFromDBSnapshot',
verb='POST',
path='/', params=params)
def restore_db_instance_to_point_in_time(self,
source_db_instance_identifier,
target_db_instance_identifier,
restore_time=None,
use_latest_restorable_time=None,
db_instance_class=None,
port=None,
availability_zone=None,
db_subnet_group_name=None,
multi_az=None,
publicly_accessible=None,
auto_minor_version_upgrade=None,
license_model=None,
db_name=None, engine=None,
iops=None,
option_group_name=None,
tags=None):
"""
Restores a DB instance to an arbitrary point-in-time. Users
can restore to any point in time before the
latestRestorableTime for up to backupRetentionPeriod days. The
target database is created from the source database with the
same configuration as the original database except that the DB
instance is created with the default DB security group.
:type source_db_instance_identifier: string
:param source_db_instance_identifier:
The identifier of the source DB instance from which to restore.
Constraints:
+ Must be the identifier of an existing database instance
+ Must contain from 1 to 63 alphanumeric characters or hyphens
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
:type target_db_instance_identifier: string
:param target_db_instance_identifier:
The name of the new database instance to be created.
Constraints:
+ Must contain from 1 to 63 alphanumeric characters or hyphens
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
:type restore_time: timestamp
:param restore_time: The date and time to restore from.
Valid Values: Value must be a UTC time
Constraints:
+ Must be before the latest restorable time for the DB instance
+ Cannot be specified if UseLatestRestorableTime parameter is true
Example: `2009-09-07T23:45:00Z`
:type use_latest_restorable_time: boolean
:param use_latest_restorable_time: Specifies whether ( `True`) or not (
`False`) the DB instance is restored from the latest backup time.
Default: `False`
Constraints: Cannot be specified if RestoreTime parameter is provided.
:type db_instance_class: string
:param db_instance_class: The compute and memory capacity of the Amazon
RDS DB instance.
Valid Values: `db.t1.micro | db.m1.small | db.m1.medium | db.m1.large |
db.m1.xlarge | db.m2.2xlarge | db.m2.4xlarge`
Default: The same DBInstanceClass as the original DB instance.
:type port: integer
:param port: The port number on which the database accepts connections.
Constraints: Value must be `1150-65535`
Default: The same port as the original DB instance.
:type availability_zone: string
:param availability_zone: The EC2 Availability Zone that the database
instance will be created in.
Default: A random, system-chosen Availability Zone.
Constraint: You cannot specify the AvailabilityZone parameter if the
MultiAZ parameter is set to true.
Example: `us-east-1a`
:type db_subnet_group_name: string
:param db_subnet_group_name: The DB subnet group name to use for the
new instance.
:type multi_az: boolean
:param multi_az: Specifies if the DB instance is a Multi-AZ deployment.
Constraint: You cannot specify the AvailabilityZone parameter if the
MultiAZ parameter is set to `True`.
:type publicly_accessible: boolean
:param publicly_accessible: Specifies the accessibility options for the
DB instance. A value of true specifies an Internet-facing instance
with a publicly resolvable DNS name, which resolves to a public IP
address. A value of false specifies an internal instance with a DNS
name that resolves to a private IP address.
Default: The default behavior varies depending on whether a VPC has
been requested or not. The following list shows the default
behavior in each case.
+ **Default VPC:**true
+ **VPC:**false
If no DB subnet group has been specified as part of the request and the
PubliclyAccessible value has not been set, the DB instance will be
publicly accessible. If a specific DB subnet group has been
specified as part of the request and the PubliclyAccessible value
has not been set, the DB instance will be private.
:type auto_minor_version_upgrade: boolean
:param auto_minor_version_upgrade: Indicates that minor version
upgrades will be applied automatically to the DB instance during
the maintenance window.
:type license_model: string
:param license_model: License model information for the restored DB
instance.
Default: Same as source.
Valid values: `license-included` | `bring-your-own-license` | `general-
public-license`
:type db_name: string
:param db_name:
The database name for the restored DB instance.
This parameter is not used for the MySQL engine.
:type engine: string
:param engine: The database engine to use for the new instance.
Default: The same as source
Constraint: Must be compatible with the engine of the source
Example: `oracle-ee`
:type iops: integer
:param iops: The amount of Provisioned IOPS (input/output operations
per second) to be initially allocated for the DB instance.
Constraints: Must be an integer greater than 1000.
:type option_group_name: string
:param option_group_name: The name of the option group to be used for
the restored DB instance.
Permanent options, such as the TDE option for Oracle Advanced Security
TDE, cannot be removed from an option group, and that option group
cannot be removed from a DB instance once it is associated with a
DB instance
:type tags: list
:param tags: A list of tags.
"""
params = {
'SourceDBInstanceIdentifier': source_db_instance_identifier,
'TargetDBInstanceIdentifier': target_db_instance_identifier,
}
if restore_time is not None:
params['RestoreTime'] = restore_time
if use_latest_restorable_time is not None:
params['UseLatestRestorableTime'] = str(
use_latest_restorable_time).lower()
if db_instance_class is not None:
params['DBInstanceClass'] = db_instance_class
if port is not None:
params['Port'] = port
if availability_zone is not None:
params['AvailabilityZone'] = availability_zone
if db_subnet_group_name is not None:
params['DBSubnetGroupName'] = db_subnet_group_name
if multi_az is not None:
params['MultiAZ'] = str(
multi_az).lower()
if publicly_accessible is not None:
params['PubliclyAccessible'] = str(
publicly_accessible).lower()
if auto_minor_version_upgrade is not None:
params['AutoMinorVersionUpgrade'] = str(
auto_minor_version_upgrade).lower()
if license_model is not None:
params['LicenseModel'] = license_model
if db_name is not None:
params['DBName'] = db_name
if engine is not None:
params['Engine'] = engine
if iops is not None:
params['Iops'] = iops
if option_group_name is not None:
params['OptionGroupName'] = option_group_name
if tags is not None:
self.build_complex_list_params(
params, tags,
'Tags.member',
('Key', 'Value'))
return self._make_request(
action='RestoreDBInstanceToPointInTime',
verb='POST',
path='/', params=params)
def revoke_db_security_group_ingress(self, db_security_group_name,
cidrip=None,
ec2_security_group_name=None,
ec2_security_group_id=None,
ec2_security_group_owner_id=None):
"""
Revokes ingress from a DBSecurityGroup for previously
authorized IP ranges or EC2 or VPC Security Groups. Required
parameters for this API are one of CIDRIP, EC2SecurityGroupId
for VPC, or (EC2SecurityGroupOwnerId and either
EC2SecurityGroupName or EC2SecurityGroupId).
:type db_security_group_name: string
:param db_security_group_name: The name of the DB security group to
revoke ingress from.
:type cidrip: string
:param cidrip: The IP range to revoke access from. Must be a valid CIDR
range. If `CIDRIP` is specified, `EC2SecurityGroupName`,
`EC2SecurityGroupId` and `EC2SecurityGroupOwnerId` cannot be
provided.
:type ec2_security_group_name: string
:param ec2_security_group_name: The name of the EC2 security group to
revoke access from. For VPC DB security groups,
`EC2SecurityGroupId` must be provided. Otherwise,
EC2SecurityGroupOwnerId and either `EC2SecurityGroupName` or
`EC2SecurityGroupId` must be provided.
:type ec2_security_group_id: string
:param ec2_security_group_id: The id of the EC2 security group to
revoke access from. For VPC DB security groups,
`EC2SecurityGroupId` must be provided. Otherwise,
EC2SecurityGroupOwnerId and either `EC2SecurityGroupName` or
`EC2SecurityGroupId` must be provided.
:type ec2_security_group_owner_id: string
:param ec2_security_group_owner_id: The AWS Account Number of the owner
of the EC2 security group specified in the `EC2SecurityGroupName`
parameter. The AWS Access Key ID is not an acceptable value. For
VPC DB security groups, `EC2SecurityGroupId` must be provided.
Otherwise, EC2SecurityGroupOwnerId and either
`EC2SecurityGroupName` or `EC2SecurityGroupId` must be provided.
"""
params = {'DBSecurityGroupName': db_security_group_name, }
if cidrip is not None:
params['CIDRIP'] = cidrip
if ec2_security_group_name is not None:
params['EC2SecurityGroupName'] = ec2_security_group_name
if ec2_security_group_id is not None:
params['EC2SecurityGroupId'] = ec2_security_group_id
if ec2_security_group_owner_id is not None:
params['EC2SecurityGroupOwnerId'] = ec2_security_group_owner_id
return self._make_request(
action='RevokeDBSecurityGroupIngress',
verb='POST',
path='/', params=params)
def _make_request(self, action, verb, path, params):
params['ContentType'] = 'JSON'
response = self.make_request(action=action, verb='POST',
path='/', params=params)
body = response.read()
boto.log.debug(body)
if response.status == 200:
return json.loads(body)
else:
json_body = json.loads(body)
fault_name = json_body.get('Error', {}).get('Code', None)
exception_class = self._faults.get(fault_name, self.ResponseError)
raise exception_class(response.status, response.reason,
body=json_body)
| [
"[email protected]"
] | |
f1c36c2f5193255fecfcd93b9edf3e5806fbce99 | b6639af28745c7cee140b4d76332c937557df0dd | /python/lab3.1.py | 6163a99e1effe76fb1ae315e913cedbbb33a0dc5 | [] | no_license | ston1x/uni | a9ef682115ef50994012a887a1a62ec0d8dc90ee | 8062f9f1d0beeddabe74cbbf672ca68d7ac626ec | refs/heads/master | 2020-09-22T02:35:04.927509 | 2020-02-24T07:14:14 | 2020-02-24T07:14:14 | 225,019,033 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 389 | py | def string_to_words(string, separator):
try:
return string.split(separator)
except Exception as e:
print(e)
try:
string = input("Enter the string divided by a separator: ")
separator= input("Enter the character by which the string will be splitted (separator): ")
except Exception as e:
print(e)
words = string_to_words(string, separator)
print(words)
| [
"[email protected]"
] | |
15d401d6d7e7ae93bfe617029b03a032d777b847 | 50f747ae46f0c1c7aedbd701ec191f332779d103 | /Main/test_algo_bot.py | 6e49f7050bfa79fbf324a3383aa34d2cd02f718f | [] | no_license | marcellinamichie291/Machine_Teachers | 03c842187cd4352f01b98c20c17c60eedb08bf2d | 417e41428a65f88a7612874edaa60d7018ff9b0f | refs/heads/main | 2023-03-23T02:47:41.572264 | 2021-01-16T15:51:46 | 2021-01-16T15:51:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,269 | py | import os
import numpy as np
import pandas as pd
import ccxt
import time
from dotenv import load_dotenv
from numpy.random import seed
seed(1)
from tensorflow import random
random.set_seed(2)
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import LSTM, Dense, Dropout
from stock_predictor import Stock_predictor
def initialize(cash=None):
"""Initialize the dashboard, data storage, and account balances."""
print("Intializing Account and DataFrame")
# Initialize Account
account = {"balance": cash, "shares": 0}
# Initialize dataframe
df = fetch_data()
# @TODO: We will complete the rest of this later!
return account, df
def build_dashboard(data, signals):
"""Build the dashboard."""
# @TODO: We will complete this later!
def fetch_data():
"""Fetches the latest prices."""
print("Fetching data...")
load_dotenv()
kraken_public_key = os.getenv("KRAKEN_PUBLIC_KEY")
kraken_secret_key = os.getenv("KRAKEN_SECRET_KEY")
kraken = ccxt.kraken({"apiKey": kraken_public_key, "secret": kraken_secret_key})
close = kraken.fetch_ticker("NFLX")["close"]
volume = kraken.fetch_ticker("NFLX")["volume"]
datetime = kraken.fetch_ticker("NFLX")["datetime"]
df = pd.DataFrame({"close": [close]})
df.index = pd.to_datetime([datetime])
return df
def generate_signals(df):
"""Generates trading signals for a given dataset."""
print("Generating Signals")
# Set window
short_window = 10
signals = df.copy()
signals["signal"] = 0.0
# Generate the short and long moving averages
signals["sma10"] = signals["close"].rolling(window=10).mean()
signals["sma20"] = signals["close"].rolling(window=20).mean()
# Generate the trading signal 0 or 1,
signals["signal"][short_window:] = np.where(
signals["sma10"][short_window:] > signals["sma20"][short_window:], 1.0, 0.0
)
# Calculate the points in time at which a position should be taken, 1 or -1
signals["entry/exit"] = signals["signal"].diff()
return signals
def execute_trade_strategy(signals, account):
"""Makes a buy/sell/hold decision."""
print("Executing Trading Strategy!")
if signals["entry/exit"].iloc[-1] == 1.0:
print("buy")
number_to_buy = round(account["balance"] / signals["close"].iloc[-1], 0) * 0.001
account["balance"] -= number_to_buy * signals["close"].iloc[-1]
account["shares"] += number_to_buy
elif signals["entry/exit"].iloc[-1] == -1.0:
print("sell")
account["balance"] += signals["close"].iloc[-1] * account["shares"]
account["shares"] = 0
else:
print("hold")
return account
print("Initializing account and DataFrame")
account, df = initialize(10000)
print(df)
def main():
while True:
global account
global df
# Fetch and save new data
new_df = fetch_data()
df = df.append(new_df, ignore_index=True)
min_window = 22
if df.shape[0] >= min_window:
signals = generate_signals(df)
print(signals)
account = execute_trade_strategy(signals, account)
time.sleep(.3)
main()
| [
"[email protected]"
] | |
ef82571b3a9d413818632a92cb1e3edb2d75dab3 | 385a63d3c9e6f5815979165001f78ec3d7b90cd2 | /DrivingTDM_SetupMatlabOOP/headerAndFunctionsMotor/ximc/python-profiles/STANDA/8MT195X-540-4.py | 391e7db3d811458155873424999b6ceb86b43093 | [
"BSD-2-Clause"
] | permissive | Rasedujjaman/matlabOOP | 5abb6ec94998fda5e9214ed94cf67a42bf243d4f | e1f025ab9b00a3646719df23852079736d2b5701 | refs/heads/main | 2023-07-23T21:40:53.905045 | 2021-08-31T16:12:39 | 2021-08-31T16:12:39 | 378,249,559 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 22,654 | py | def set_profile_8MT195X_540_4(lib, id):
worst_result = Result.Ok
result = Result.Ok
feedback_settings = feedback_settings_t()
feedback_settings.IPS = 4000
class FeedbackType_:
FEEDBACK_ENCODER_MEDIATED = 6
FEEDBACK_NONE = 5
FEEDBACK_EMF = 4
FEEDBACK_ENCODER = 1
feedback_settings.FeedbackType = FeedbackType_.FEEDBACK_EMF
class FeedbackFlags_:
FEEDBACK_ENC_TYPE_BITS = 192
FEEDBACK_ENC_TYPE_DIFFERENTIAL = 128
FEEDBACK_ENC_TYPE_SINGLE_ENDED = 64
FEEDBACK_ENC_REVERSE = 1
FEEDBACK_ENC_TYPE_AUTO = 0
feedback_settings.FeedbackFlags = FeedbackFlags_.FEEDBACK_ENC_TYPE_SINGLE_ENDED | FeedbackFlags_.FEEDBACK_ENC_TYPE_AUTO
feedback_settings.CountsPerTurn = 4000
result = lib.set_feedback_settings(id, byref(feedback_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
home_settings = home_settings_t()
home_settings.FastHome = 500
home_settings.uFastHome = 0
home_settings.SlowHome = 500
home_settings.uSlowHome = 0
home_settings.HomeDelta = 500
home_settings.uHomeDelta = 0
class HomeFlags_:
HOME_USE_FAST = 256
HOME_STOP_SECOND_BITS = 192
HOME_STOP_SECOND_LIM = 192
HOME_STOP_SECOND_SYN = 128
HOME_STOP_SECOND_REV = 64
HOME_STOP_FIRST_BITS = 48
HOME_STOP_FIRST_LIM = 48
HOME_STOP_FIRST_SYN = 32
HOME_STOP_FIRST_REV = 16
HOME_HALF_MV = 8
HOME_MV_SEC_EN = 4
HOME_DIR_SECOND = 2
HOME_DIR_FIRST = 1
home_settings.HomeFlags = HomeFlags_.HOME_USE_FAST | HomeFlags_.HOME_STOP_SECOND_REV | HomeFlags_.HOME_STOP_FIRST_BITS | HomeFlags_.HOME_DIR_SECOND
result = lib.set_home_settings(id, byref(home_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
move_settings = move_settings_t()
move_settings.Speed = 1000
move_settings.uSpeed = 0
move_settings.Accel = 2000
move_settings.Decel = 4000
move_settings.AntiplaySpeed = 1000
move_settings.uAntiplaySpeed = 0
class MoveFlags_:
RPM_DIV_1000 = 1
result = lib.set_move_settings(id, byref(move_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
engine_settings = engine_settings_t()
engine_settings.NomVoltage = 1
engine_settings.NomCurrent = 2100
engine_settings.NomSpeed = 2000
engine_settings.uNomSpeed = 0
class EngineFlags_:
ENGINE_LIMIT_RPM = 128
ENGINE_LIMIT_CURR = 64
ENGINE_LIMIT_VOLT = 32
ENGINE_ACCEL_ON = 16
ENGINE_ANTIPLAY = 8
ENGINE_MAX_SPEED = 4
ENGINE_CURRENT_AS_RMS = 2
ENGINE_REVERSE = 1
engine_settings.EngineFlags = EngineFlags_.ENGINE_LIMIT_RPM | EngineFlags_.ENGINE_ACCEL_ON | EngineFlags_.ENGINE_REVERSE
engine_settings.Antiplay = 575
class MicrostepMode_:
MICROSTEP_MODE_FRAC_256 = 9
MICROSTEP_MODE_FRAC_128 = 8
MICROSTEP_MODE_FRAC_64 = 7
MICROSTEP_MODE_FRAC_32 = 6
MICROSTEP_MODE_FRAC_16 = 5
MICROSTEP_MODE_FRAC_8 = 4
MICROSTEP_MODE_FRAC_4 = 3
MICROSTEP_MODE_FRAC_2 = 2
MICROSTEP_MODE_FULL = 1
engine_settings.MicrostepMode = MicrostepMode_.MICROSTEP_MODE_FRAC_256
engine_settings.StepsPerRev = 200
result = lib.set_engine_settings(id, byref(engine_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
entype_settings = entype_settings_t()
class EngineType_:
ENGINE_TYPE_BRUSHLESS = 5
ENGINE_TYPE_TEST = 4
ENGINE_TYPE_STEP = 3
ENGINE_TYPE_2DC = 2
ENGINE_TYPE_DC = 1
ENGINE_TYPE_NONE = 0
entype_settings.EngineType = EngineType_.ENGINE_TYPE_STEP | EngineType_.ENGINE_TYPE_NONE
class DriverType_:
DRIVER_TYPE_EXTERNAL = 3
DRIVER_TYPE_INTEGRATE = 2
DRIVER_TYPE_DISCRETE_FET = 1
entype_settings.DriverType = DriverType_.DRIVER_TYPE_INTEGRATE
result = lib.set_entype_settings(id, byref(entype_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
power_settings = power_settings_t()
power_settings.HoldCurrent = 50
power_settings.CurrReductDelay = 1000
power_settings.PowerOffDelay = 60
power_settings.CurrentSetTime = 300
class PowerFlags_:
POWER_SMOOTH_CURRENT = 4
POWER_OFF_ENABLED = 2
POWER_REDUCT_ENABLED = 1
power_settings.PowerFlags = PowerFlags_.POWER_SMOOTH_CURRENT | PowerFlags_.POWER_OFF_ENABLED | PowerFlags_.POWER_REDUCT_ENABLED
result = lib.set_power_settings(id, byref(power_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
secure_settings = secure_settings_t()
secure_settings.LowUpwrOff = 800
secure_settings.CriticalIpwr = 4000
secure_settings.CriticalUpwr = 5500
secure_settings.CriticalT = 800
secure_settings.CriticalIusb = 450
secure_settings.CriticalUusb = 520
secure_settings.MinimumUusb = 420
class Flags_:
ALARM_ENGINE_RESPONSE = 128
ALARM_WINDING_MISMATCH = 64
USB_BREAK_RECONNECT = 32
ALARM_FLAGS_STICKING = 16
ALARM_ON_BORDERS_SWAP_MISSET = 8
H_BRIDGE_ALERT = 4
LOW_UPWR_PROTECTION = 2
ALARM_ON_DRIVER_OVERHEATING = 1
secure_settings.Flags = Flags_.ALARM_ENGINE_RESPONSE | Flags_.ALARM_FLAGS_STICKING | Flags_.ALARM_ON_BORDERS_SWAP_MISSET | Flags_.H_BRIDGE_ALERT | Flags_.ALARM_ON_DRIVER_OVERHEATING
result = lib.set_secure_settings(id, byref(secure_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
edges_settings = edges_settings_t()
class BorderFlags_:
BORDERS_SWAP_MISSET_DETECTION = 8
BORDER_STOP_RIGHT = 4
BORDER_STOP_LEFT = 2
BORDER_IS_ENCODER = 1
edges_settings.BorderFlags = BorderFlags_.BORDER_STOP_RIGHT | BorderFlags_.BORDER_STOP_LEFT
class EnderFlags_:
ENDER_SW2_ACTIVE_LOW = 4
ENDER_SW1_ACTIVE_LOW = 2
ENDER_SWAP = 1
edges_settings.EnderFlags = EnderFlags_.ENDER_SWAP
edges_settings.LeftBorder = 175
edges_settings.uLeftBorder = 0
edges_settings.RightBorder = 25825
edges_settings.uRightBorder = 0
result = lib.set_edges_settings(id, byref(edges_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
pid_settings = pid_settings_t()
pid_settings.KpU = 0
pid_settings.KiU = 0
pid_settings.KdU = 0
pid_settings.Kpf = 0.003599999938160181
pid_settings.Kif = 0.03799999877810478
pid_settings.Kdf = 2.8000000384054147e-05
result = lib.set_pid_settings(id, byref(pid_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
sync_in_settings = sync_in_settings_t()
class SyncInFlags_:
SYNCIN_GOTOPOSITION = 4
SYNCIN_INVERT = 2
SYNCIN_ENABLED = 1
sync_in_settings.ClutterTime = 4
sync_in_settings.Position = 0
sync_in_settings.uPosition = 0
sync_in_settings.Speed = 0
sync_in_settings.uSpeed = 0
result = lib.set_sync_in_settings(id, byref(sync_in_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
sync_out_settings = sync_out_settings_t()
class SyncOutFlags_:
SYNCOUT_ONPERIOD = 64
SYNCOUT_ONSTOP = 32
SYNCOUT_ONSTART = 16
SYNCOUT_IN_STEPS = 8
SYNCOUT_INVERT = 4
SYNCOUT_STATE = 2
SYNCOUT_ENABLED = 1
sync_out_settings.SyncOutFlags = SyncOutFlags_.SYNCOUT_ONSTOP | SyncOutFlags_.SYNCOUT_ONSTART
sync_out_settings.SyncOutPulseSteps = 100
sync_out_settings.SyncOutPeriod = 2000
sync_out_settings.Accuracy = 0
sync_out_settings.uAccuracy = 0
result = lib.set_sync_out_settings(id, byref(sync_out_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
extio_settings = extio_settings_t()
class EXTIOSetupFlags_:
EXTIO_SETUP_INVERT = 2
EXTIO_SETUP_OUTPUT = 1
extio_settings.EXTIOSetupFlags = EXTIOSetupFlags_.EXTIO_SETUP_OUTPUT
class EXTIOModeFlags_:
EXTIO_SETUP_MODE_OUT_BITS = 240
EXTIO_SETUP_MODE_OUT_MOTOR_ON = 64
EXTIO_SETUP_MODE_OUT_ALARM = 48
EXTIO_SETUP_MODE_OUT_MOVING = 32
EXTIO_SETUP_MODE_OUT_ON = 16
EXTIO_SETUP_MODE_IN_BITS = 15
EXTIO_SETUP_MODE_IN_ALARM = 5
EXTIO_SETUP_MODE_IN_HOME = 4
EXTIO_SETUP_MODE_IN_MOVR = 3
EXTIO_SETUP_MODE_IN_PWOF = 2
EXTIO_SETUP_MODE_IN_STOP = 1
EXTIO_SETUP_MODE_IN_NOP = 0
EXTIO_SETUP_MODE_OUT_OFF = 0
extio_settings.EXTIOModeFlags = EXTIOModeFlags_.EXTIO_SETUP_MODE_IN_STOP | EXTIOModeFlags_.EXTIO_SETUP_MODE_IN_NOP | EXTIOModeFlags_.EXTIO_SETUP_MODE_OUT_OFF
result = lib.set_extio_settings(id, byref(extio_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
brake_settings = brake_settings_t()
brake_settings.t1 = 300
brake_settings.t2 = 500
brake_settings.t3 = 300
brake_settings.t4 = 400
class BrakeFlags_:
BRAKE_ENG_PWROFF = 2
BRAKE_ENABLED = 1
brake_settings.BrakeFlags = BrakeFlags_.BRAKE_ENG_PWROFF
result = lib.set_brake_settings(id, byref(brake_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
control_settings = control_settings_t()
control_settings.MaxSpeed[0] = 100
control_settings.MaxSpeed[1] = 1000
control_settings.MaxSpeed[2] = 0
control_settings.MaxSpeed[3] = 0
control_settings.MaxSpeed[4] = 0
control_settings.MaxSpeed[5] = 0
control_settings.MaxSpeed[6] = 0
control_settings.MaxSpeed[7] = 0
control_settings.MaxSpeed[8] = 0
control_settings.MaxSpeed[9] = 0
control_settings.uMaxSpeed[0] = 0
control_settings.uMaxSpeed[1] = 0
control_settings.uMaxSpeed[2] = 0
control_settings.uMaxSpeed[3] = 0
control_settings.uMaxSpeed[4] = 0
control_settings.uMaxSpeed[5] = 0
control_settings.uMaxSpeed[6] = 0
control_settings.uMaxSpeed[7] = 0
control_settings.uMaxSpeed[8] = 0
control_settings.uMaxSpeed[9] = 0
control_settings.Timeout[0] = 1000
control_settings.Timeout[1] = 1000
control_settings.Timeout[2] = 1000
control_settings.Timeout[3] = 1000
control_settings.Timeout[4] = 1000
control_settings.Timeout[5] = 1000
control_settings.Timeout[6] = 1000
control_settings.Timeout[7] = 1000
control_settings.Timeout[8] = 1000
control_settings.MaxClickTime = 300
class Flags_:
CONTROL_BTN_RIGHT_PUSHED_OPEN = 8
CONTROL_BTN_LEFT_PUSHED_OPEN = 4
CONTROL_MODE_BITS = 3
CONTROL_MODE_LR = 2
CONTROL_MODE_JOY = 1
CONTROL_MODE_OFF = 0
control_settings.Flags = Flags_.CONTROL_MODE_LR | Flags_.CONTROL_MODE_OFF
control_settings.DeltaPosition = 1
control_settings.uDeltaPosition = 0
result = lib.set_control_settings(id, byref(control_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
joystick_settings = joystick_settings_t()
joystick_settings.JoyLowEnd = 0
joystick_settings.JoyCenter = 5000
joystick_settings.JoyHighEnd = 10000
joystick_settings.ExpFactor = 100
joystick_settings.DeadZone = 50
class JoyFlags_:
JOY_REVERSE = 1
result = lib.set_joystick_settings(id, byref(joystick_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
ctp_settings = ctp_settings_t()
ctp_settings.CTPMinError = 3
class CTPFlags_:
CTP_ERROR_CORRECTION = 16
REV_SENS_INV = 8
CTP_ALARM_ON_ERROR = 4
CTP_BASE = 2
CTP_ENABLED = 1
ctp_settings.CTPFlags = CTPFlags_.CTP_ERROR_CORRECTION
result = lib.set_ctp_settings(id, byref(ctp_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
uart_settings = uart_settings_t()
uart_settings.Speed = 115200
class UARTSetupFlags_:
UART_STOP_BIT = 8
UART_PARITY_BIT_USE = 4
UART_PARITY_BITS = 3
UART_PARITY_BIT_MARK = 3
UART_PARITY_BIT_SPACE = 2
UART_PARITY_BIT_ODD = 1
UART_PARITY_BIT_EVEN = 0
uart_settings.UARTSetupFlags = UARTSetupFlags_.UART_PARITY_BIT_EVEN
result = lib.set_uart_settings(id, byref(uart_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
controller_name = controller_name_t()
controller_name.ControllerName = bytes([0, 113, 252, 118, 36, 0, 72, 0, 3, 0, 0, 0, 104, 101, 103, 0])
class CtrlFlags_:
EEPROM_PRECEDENCE = 1
result = lib.set_controller_name(id, byref(controller_name))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
emf_settings = emf_settings_t()
emf_settings.L = 0.013000000268220901
emf_settings.R = 2.5999999046325684
emf_settings.Km = 0.015599999576807022
class BackEMFFlags_:
BACK_EMF_KM_AUTO = 4
BACK_EMF_RESISTANCE_AUTO = 2
BACK_EMF_INDUCTANCE_AUTO = 1
result = lib.set_emf_settings(id, byref(emf_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
engine_advansed_setup = engine_advansed_setup_t()
engine_advansed_setup.stepcloseloop_Kw = 50
engine_advansed_setup.stepcloseloop_Kp_low = 1000
engine_advansed_setup.stepcloseloop_Kp_high = 33
result = lib.set_engine_advansed_setup(id, byref(engine_advansed_setup))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
extended_settings = extended_settings_t()
extended_settings.Param1 = 0
result = lib.set_extended_settings(id, byref(extended_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
stage_name = stage_name_t()
stage_name.PositionerName = bytes([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
result = lib.set_stage_name(id, byref(stage_name))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
stage_information = stage_information_t()
stage_information.Manufacturer = bytes([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
stage_information.PartNumber = bytes([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
result = lib.set_stage_information(id, byref(stage_information))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
stage_settings = stage_settings_t()
stage_settings.LeadScrewPitch = 0
stage_settings.Units = bytes([0, 0, 0, 0, 0, 0, 0, 0])
stage_settings.MaxSpeed = 0
stage_settings.TravelRange = 0
stage_settings.SupplyVoltageMin = 0
stage_settings.SupplyVoltageMax = 0
stage_settings.MaxCurrentConsumption = 0
stage_settings.HorizontalLoadCapacity = 0
stage_settings.VerticalLoadCapacity = 0
result = lib.set_stage_settings(id, byref(stage_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
motor_information = motor_information_t()
motor_information.Manufacturer = bytes([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
motor_information.PartNumber = bytes([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
result = lib.set_motor_information(id, byref(motor_information))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
motor_settings = motor_settings_t()
class MotorType_:
MOTOR_TYPE_BLDC = 3
MOTOR_TYPE_DC = 2
MOTOR_TYPE_STEP = 1
MOTOR_TYPE_UNKNOWN = 0
motor_settings.MotorType = MotorType_.MOTOR_TYPE_UNKNOWN
motor_settings.ReservedField = 0
motor_settings.Poles = 0
motor_settings.Phases = 0
motor_settings.NominalVoltage = 0
motor_settings.NominalCurrent = 0
motor_settings.NominalSpeed = 0
motor_settings.NominalTorque = 0
motor_settings.NominalPower = 0
motor_settings.WindingResistance = 0
motor_settings.WindingInductance = 0
motor_settings.RotorInertia = 0
motor_settings.StallTorque = 0
motor_settings.DetentTorque = 0
motor_settings.TorqueConstant = 0
motor_settings.SpeedConstant = 0
motor_settings.SpeedTorqueGradient = 0
motor_settings.MechanicalTimeConstant = 0
motor_settings.MaxSpeed = 0
motor_settings.MaxCurrent = 0
motor_settings.MaxCurrentTime = 0
motor_settings.NoLoadCurrent = 0
motor_settings.NoLoadSpeed = 0
result = lib.set_motor_settings(id, byref(motor_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
encoder_information = encoder_information_t()
encoder_information.Manufacturer = bytes([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
encoder_information.PartNumber = bytes([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
result = lib.set_encoder_information(id, byref(encoder_information))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
encoder_settings = encoder_settings_t()
encoder_settings.MaxOperatingFrequency = 0
encoder_settings.SupplyVoltageMin = 0
encoder_settings.SupplyVoltageMax = 0
encoder_settings.MaxCurrentConsumption = 0
encoder_settings.PPR = 0
class EncoderSettings_:
ENCSET_REVOLUTIONSENSOR_ACTIVE_HIGH = 256
ENCSET_REVOLUTIONSENSOR_PRESENT = 64
ENCSET_INDEXCHANNEL_PRESENT = 16
ENCSET_PUSHPULL_OUTPUT = 4
ENCSET_DIFFERENTIAL_OUTPUT = 1
result = lib.set_encoder_settings(id, byref(encoder_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
hallsensor_information = hallsensor_information_t()
hallsensor_information.Manufacturer = bytes([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
hallsensor_information.PartNumber = bytes([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
result = lib.set_hallsensor_information(id, byref(hallsensor_information))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
hallsensor_settings = hallsensor_settings_t()
hallsensor_settings.MaxOperatingFrequency = 0
hallsensor_settings.SupplyVoltageMin = 0
hallsensor_settings.SupplyVoltageMax = 0
hallsensor_settings.MaxCurrentConsumption = 0
hallsensor_settings.PPR = 0
result = lib.set_hallsensor_settings(id, byref(hallsensor_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
gear_information = gear_information_t()
gear_information.Manufacturer = bytes([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
gear_information.PartNumber = bytes([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
result = lib.set_gear_information(id, byref(gear_information))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
gear_settings = gear_settings_t()
gear_settings.ReductionIn = 0
gear_settings.ReductionOut = 0
gear_settings.RatedInputTorque = 0
gear_settings.RatedInputSpeed = 0
gear_settings.MaxOutputBacklash = 0
gear_settings.InputInertia = 0
gear_settings.Efficiency = 0
result = lib.set_gear_settings(id, byref(gear_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
accessories_settings = accessories_settings_t()
accessories_settings.MagneticBrakeInfo = bytes([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
accessories_settings.MBRatedVoltage = 0
accessories_settings.MBRatedCurrent = 0
accessories_settings.MBTorque = 0
class MBSettings_:
MB_POWERED_HOLD = 2
MB_AVAILABLE = 1
accessories_settings.TemperatureSensorInfo = bytes([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
accessories_settings.TSMin = 0
accessories_settings.TSMax = 0
accessories_settings.TSGrad = 0
class TSSettings_:
TS_AVAILABLE = 8
TS_TYPE_BITS = 7
TS_TYPE_SEMICONDUCTOR = 2
TS_TYPE_THERMOCOUPLE = 1
TS_TYPE_UNKNOWN = 0
accessories_settings.TSSettings = TSSettings_.TS_TYPE_UNKNOWN
class LimitSwitchesSettings_:
LS_SHORTED = 16
LS_SW2_ACTIVE_LOW = 8
LS_SW1_ACTIVE_LOW = 4
LS_ON_SW2_AVAILABLE = 2
LS_ON_SW1_AVAILABLE = 1
result = lib.set_accessories_settings(id, byref(accessories_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
return worst_result
| [
"[email protected]"
] | |
141b42291593793cc3abb2c8eb5ac5c5b1d0950b | b9a900189095b6af89fb5b941773edaee69bd47d | /InformationAPI/information/migrations/0004_auto_20201209_1427.py | 99cc52a1662a6739531891e8388c62b500bc13f7 | [] | no_license | tyagisen/information | 6aa84a0b44920f69efa7ac2bdf62278c4260efaf | ecc6dfd902632603c5080a6e330c3b57462edd97 | refs/heads/master | 2023-01-30T22:18:22.738513 | 2020-12-10T01:50:00 | 2020-12-10T01:50:00 | 318,956,132 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 413 | py | # Generated by Django 3.1.3 on 2020-12-09 14:27
import ckeditor.fields
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('information', '0003_auto_20201207_0911'),
]
operations = [
migrations.AlterField(
model_name='information',
name='info_list',
field=ckeditor.fields.RichTextField(),
),
]
| [
"[email protected]"
] | |
7bbcb9ca9dda54951983837756510a5c06b96ee6 | 3df53a7188586c9e6ae26ebcc2ae788480c2f84a | /src/main2.py | 4bac4c6f4a29927f97706645fb6f3e81324a528e | [] | no_license | webclinic017/f-indicators | b24927a7008bf812fcfc394b39275ea7fb767039 | bfdd34a8e259fee8bce43ac5d0c268820e7bdd90 | refs/heads/master | 2023-08-10T02:58:49.123664 | 2020-05-11T15:31:12 | 2020-05-11T15:31:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,436 | py | # Import indicators
# Attach them to strategy obj
# Start GA with strategy obj
import logging
import pandas as pd
import numpy as np
from backtesting import Strategy, Backtest
from talib import SMA
from backtesting.lib import crossover
from pathlib import Path, PurePosixPath
from utils import TFConvertor
log = logging.getLogger("GA")
log.setLevel(logging.DEBUG)
path = Path(__file__).parent.resolve().parent
path = path.joinpath("logs/ga.log")
log.addHandler(logging.FileHandler(path.resolve()))
data = pd.read_csv("data_large/EURUSD_Candlestick_1_M_BID_09.05.2018-30.03.2020.csv")
data['Datetime'] = pd.to_datetime(data['Datetime'], format="%d.%m.%Y %H:%M:%S")
# set datetime as index
data = data.set_index('Datetime')
data_loc = data.loc["2017":"2020"]
datatmp = TFConvertor(data_loc, '4H') # It is different for every new individual
class SmaCross(Strategy):
# Define the two MA lags as *class variables*
# genome:
n1 = 2
n2 = 6
n3 = 10
n4 = 20
price = 'Close'
def init(self, *args, **kwargs):
# Precompute two moving averages
self.sma1 = self.I(SMA, datatmp["Close"], self.n1)
self.sma2 = self.I(SMA, datatmp["Close"], self.n2)
self.sma3 = self.I(SMA, datatmp["Close"], self.n3)
self.sma4 = self.I(SMA, datatmp["Close"], self.n4)
# self.sma1 = SMA(datatmp["Close"], self.n1)
# self.sma2 = SMA(datatmp["Close"], self.n2)
# self.sma3 = SMA(datatmp["Close"], self.n3)
# self.sma4 = SMA(datatmp["Close"], self.n4)
# Precompute support and resistance using specified function as first input of self.I()
# self.support_resistance = self.I(Pivot5points, self.data, self.sup_res_candles)
def next(self):
# If sma1 crosses above sma2, buy the asset
if crossover(self.sma1, self.sma2) and crossover(self.sma3, self.sma4):
try:
print("Is buying...")
self.buy()
except:
log.error("Something went wrong in buy() function!")
# Else, if sma1 crosses below sma2, sell it
elif crossover(self.sma2, self.sma1) and crossover(self.sma4, self.sma3):
try:
self.sell()
except:
log.error("Something went wrong in sell() function!")
bt = Backtest(datatmp, SmaCross, cash=10000, commission=.02)
result = bt.run()
print(result)
print(np.isnan(result.SQN)) | [
"[email protected]"
] | |
1aeaca94f2d4d9feb9733db3c8cad22d7ff94e80 | cf5b2850dc9794eb0fc11826da4fd3ea6c22e9b1 | /examples/conditional_format.py | 868eec6890126a075a32371064be80ab9628e826 | [
"BSD-2-Clause"
] | permissive | glasah/XlsxWriter | bcf74b43b9c114e45e1a3dd679b5ab49ee20a0ec | 1e8aaeb03000dc2f294ccb89b33806ac40dabc13 | refs/heads/main | 2023-09-05T03:03:53.857387 | 2021-11-01T07:35:46 | 2021-11-01T07:35:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,956 | py | ###############################################################################
#
# Example of how to add conditional formatting to an XlsxWriter file.
#
# Conditional formatting allows you to apply a format to a cell or a
# range of cells based on certain criteria.
#
# SPDX-License-Identifier: BSD-2-Clause
# Copyright 2013-2021, John McNamara, [email protected]
#
import xlsxwriter
workbook = xlsxwriter.Workbook('conditional_format.xlsx')
worksheet1 = workbook.add_worksheet()
worksheet2 = workbook.add_worksheet()
worksheet3 = workbook.add_worksheet()
worksheet4 = workbook.add_worksheet()
worksheet5 = workbook.add_worksheet()
worksheet6 = workbook.add_worksheet()
worksheet7 = workbook.add_worksheet()
worksheet8 = workbook.add_worksheet()
worksheet9 = workbook.add_worksheet()
# Add a format. Light red fill with dark red text.
format1 = workbook.add_format({'bg_color': '#FFC7CE',
'font_color': '#9C0006'})
# Add a format. Green fill with dark green text.
format2 = workbook.add_format({'bg_color': '#C6EFCE',
'font_color': '#006100'})
# Some sample data to run the conditional formatting against.
data = [
[34, 72, 38, 30, 75, 48, 75, 66, 84, 86],
[6, 24, 1, 84, 54, 62, 60, 3, 26, 59],
[28, 79, 97, 13, 85, 93, 93, 22, 5, 14],
[27, 71, 40, 17, 18, 79, 90, 93, 29, 47],
[88, 25, 33, 23, 67, 1, 59, 79, 47, 36],
[24, 100, 20, 88, 29, 33, 38, 54, 54, 88],
[6, 57, 88, 28, 10, 26, 37, 7, 41, 48],
[52, 78, 1, 96, 26, 45, 47, 33, 96, 36],
[60, 54, 81, 66, 81, 90, 80, 93, 12, 55],
[70, 5, 46, 14, 71, 19, 66, 36, 41, 21],
]
###############################################################################
#
# Example 1.
#
caption = ('Cells with values >= 50 are in light red. '
'Values < 50 are in light green.')
# Write the data.
worksheet1.write('A1', caption)
for row, row_data in enumerate(data):
worksheet1.write_row(row + 2, 1, row_data)
# Write a conditional format over a range.
worksheet1.conditional_format('B3:K12', {'type': 'cell',
'criteria': '>=',
'value': 50,
'format': format1})
# Write another conditional format over the same range.
worksheet1.conditional_format('B3:K12', {'type': 'cell',
'criteria': '<',
'value': 50,
'format': format2})
###############################################################################
#
# Example 2.
#
caption = ('Values between 30 and 70 are in light red. '
'Values outside that range are in light green.')
worksheet2.write('A1', caption)
for row, row_data in enumerate(data):
worksheet2.write_row(row + 2, 1, row_data)
worksheet2.conditional_format('B3:K12', {'type': 'cell',
'criteria': 'between',
'minimum': 30,
'maximum': 70,
'format': format1})
worksheet2.conditional_format('B3:K12', {'type': 'cell',
'criteria': 'not between',
'minimum': 30,
'maximum': 70,
'format': format2})
###############################################################################
#
# Example 3.
#
caption = ('Duplicate values are in light red. '
'Unique values are in light green.')
worksheet3.write('A1', caption)
for row, row_data in enumerate(data):
worksheet3.write_row(row + 2, 1, row_data)
worksheet3.conditional_format('B3:K12', {'type': 'duplicate',
'format': format1})
worksheet3.conditional_format('B3:K12', {'type': 'unique',
'format': format2})
###############################################################################
#
# Example 4.
#
caption = ('Above average values are in light red. '
'Below average values are in light green.')
worksheet4.write('A1', caption)
for row, row_data in enumerate(data):
worksheet4.write_row(row + 2, 1, row_data)
worksheet4.conditional_format('B3:K12', {'type': 'average',
'criteria': 'above',
'format': format1})
worksheet4.conditional_format('B3:K12', {'type': 'average',
'criteria': 'below',
'format': format2})
###############################################################################
#
# Example 5.
#
caption = ('Top 10 values are in light red. '
'Bottom 10 values are in light green.')
worksheet5.write('A1', caption)
for row, row_data in enumerate(data):
worksheet5.write_row(row + 2, 1, row_data)
worksheet5.conditional_format('B3:K12', {'type': 'top',
'value': '10',
'format': format1})
worksheet5.conditional_format('B3:K12', {'type': 'bottom',
'value': '10',
'format': format2})
###############################################################################
#
# Example 6.
#
caption = ('Cells with values >= 50 are in light red. '
'Values < 50 are in light green. Non-contiguous ranges.')
# Write the data.
worksheet6.write('A1', caption)
for row, row_data in enumerate(data):
worksheet6.write_row(row + 2, 1, row_data)
# Write a conditional format over a range.
worksheet6.conditional_format('B3:K6', {'type': 'cell',
'criteria': '>=',
'value': 50,
'format': format1,
'multi_range': 'B3:K6 B9:K12'})
# Write another conditional format over the same range.
worksheet6.conditional_format('B3:K6', {'type': 'cell',
'criteria': '<',
'value': 50,
'format': format2,
'multi_range': 'B3:K6 B9:K12'})
###############################################################################
#
# Example 7.
#
caption = 'Examples of color scales with default and user colors.'
data = range(1, 13)
worksheet7.write('A1', caption)
worksheet7.write('B2', "2 Color Scale")
worksheet7.write('D2', "2 Color Scale + user colors")
worksheet7.write('G2', "3 Color Scale")
worksheet7.write('I2', "3 Color Scale + user colors")
for row, row_data in enumerate(data):
worksheet7.write(row + 2, 1, row_data)
worksheet7.write(row + 2, 3, row_data)
worksheet7.write(row + 2, 6, row_data)
worksheet7.write(row + 2, 8, row_data)
worksheet7.conditional_format('B3:B14', {'type': '2_color_scale'})
worksheet7.conditional_format('D3:D14', {'type': '2_color_scale',
'min_color': "#FF0000",
'max_color': "#00FF00"})
worksheet7.conditional_format('G3:G14', {'type': '3_color_scale'})
worksheet7.conditional_format('I3:I14', {'type': '3_color_scale',
'min_color': "#C5D9F1",
'mid_color': "#8DB4E3",
'max_color': "#538ED5"})
###############################################################################
#
# Example 8.
#
caption = 'Examples of data bars.'
worksheet8.write('A1', caption)
worksheet8.write('B2', "Default data bars")
worksheet8.write('D2', "Bars only")
worksheet8.write('F2', "With user color")
worksheet8.write('H2', "Solid bars")
worksheet8.write('J2', "Right to left")
worksheet8.write('L2', "Excel 2010 style")
worksheet8.write('N2', "Negative same as positive")
data = range(1, 13)
for row, row_data in enumerate(data):
worksheet8.write(row + 2, 1, row_data)
worksheet8.write(row + 2, 3, row_data)
worksheet8.write(row + 2, 5, row_data)
worksheet8.write(row + 2, 7, row_data)
worksheet8.write(row + 2, 9, row_data)
data = [-1, -2, -3, -2, -1, 0, 1, 2, 3, 2, 1, 0]
for row, row_data in enumerate(data):
worksheet8.write(row + 2, 11, row_data)
worksheet8.write(row + 2, 13, row_data)
worksheet8.conditional_format('B3:B14', {'type': 'data_bar'})
worksheet8.conditional_format('D3:D14', {'type': 'data_bar',
'bar_only': True})
worksheet8.conditional_format('F3:F14', {'type': 'data_bar',
'bar_color': '#63C384'})
worksheet8.conditional_format('H3:H14', {'type': 'data_bar',
'bar_solid': True})
worksheet8.conditional_format('J3:J14', {'type': 'data_bar',
'bar_direction': 'right'})
worksheet8.conditional_format('L3:L14', {'type': 'data_bar',
'data_bar_2010': True})
worksheet8.conditional_format('N3:N14', {'type': 'data_bar',
'bar_negative_color_same': True,
'bar_negative_border_color_same': True})
###############################################################################
#
# Example 9.
#
caption = 'Examples of conditional formats with icon sets.'
data = [
[1, 2, 3],
[1, 2, 3],
[1, 2, 3],
[1, 2, 3],
[1, 2, 3, 4],
[1, 2, 3, 4, 5],
[1, 2, 3, 4, 5],
]
worksheet9.write('A1', caption)
for row, row_data in enumerate(data):
worksheet9.write_row(row + 2, 1, row_data)
worksheet9.conditional_format('B3:D3', {'type': 'icon_set',
'icon_style': '3_traffic_lights'})
worksheet9.conditional_format('B4:D4', {'type': 'icon_set',
'icon_style': '3_traffic_lights',
'reverse_icons': True})
worksheet9.conditional_format('B5:D5', {'type': 'icon_set',
'icon_style': '3_traffic_lights',
'icons_only': True})
worksheet9.conditional_format('B6:D6', {'type': 'icon_set',
'icon_style': '3_arrows'})
worksheet9.conditional_format('B7:E7', {'type': 'icon_set',
'icon_style': '4_arrows'})
worksheet9.conditional_format('B8:F8', {'type': 'icon_set',
'icon_style': '5_arrows'})
worksheet9.conditional_format('B9:F9', {'type': 'icon_set',
'icon_style': '5_ratings'})
workbook.close()
| [
"[email protected]"
] | |
47dd3a6058d1b02752e213e67af6ad515280a64c | 4ddc0d9f83bb9f7dc917749f6085ab1881510bce | /preprocess.py | f045d2bea897ade4e29cf706d4fe1d88e9aadca4 | [] | no_license | dannyng95/VTMS-ER | 0c6a839d4167c3cd312ca41476033d02e0c1caf8 | 9a4eedfaeb67d51268ede29fc82e333ab29e49e9 | refs/heads/main | 2023-01-24T13:45:30.647636 | 2020-12-07T07:03:47 | 2020-12-07T07:03:47 | 318,388,151 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,019 | py | import pickle
import json
from tqdm import tqdm
import glob2
import codecs
import csv
import re
import sys
import random
import string
import re
# https://realpython.com/python-encodings-guide/
# List of available words with mark in Vietnamese
intab_l = "ạảãàáâậầấẩẫăắằặẳẵóòọõỏôộổỗồốơờớợởỡéèẻẹẽêếềệểễúùụủũưựữửừứíìịỉĩýỳỷỵỹđ"
ascii_lowercase = 'abcdefghijklmnopqrstuvwxyz'
digits = '0123456789'
punctuation = r"""!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~"""
whitespace = ' '
accept_strings = intab_l + ascii_lowercase + digits + punctuation + whitespace
r = re.compile('^[' + accept_strings + ']+$')
#Check Vietnamese function :
def _check_tieng_viet(seq):
if re.match(r, seq.lower()):
return True
else:
return False
# _check_tieng_viet('tiếng việt thần thánh cực kỳ')
# Remove tone Function :
def remove_tone_line(utf8_str):
intab_l = "ạảãàáâậầấẩẫăắằặẳẵóòọõỏôộổỗồốơờớợởỡéèẻẹẽêếềệểễúùụủũưựữửừứíìịỉĩýỳỷỵỹđ"
intab_u = "ẠẢÃÀÁÂẬẦẤẨẪĂẮẰẶẲẴÓÒỌÕỎÔỘỔỖỒỐƠỜỚỢỞỠÉÈẺẸẼÊẾỀỆỂỄÚÙỤỦŨƯỰỮỬỪỨÍÌỊỈĨÝỲỶỴỸĐ"
intab = list(intab_l+intab_u)
outtab_l = "a"*17 + "o"*17 + "e"*11 + "u"*11 + "i"*5 + "y"*5 + "d"
outtab_u = "A"*17 + "O"*17 + "E"*11 + "U"*11 + "I"*5 + "Y"*5 + "D"
outtab = outtab_l + outtab_u
# Using regex to find out the order of alphabe has tone like this 'ạ|ả|ã|...'
r = re.compile("|".join(intab))
# Dictionary replace them from tone to untone. VD: {'â' : 'a'}
replaces_dict = dict(zip(intab, outtab))
# Replace all of them by regex through the order of it
non_dia_str = r.sub(lambda m: replaces_dict[m.group(0)], utf8_str)
return non_dia_str
# remove_tone_line('Đi một ngày đàng học 1 sàng khôn')
| [
"[email protected]"
] | |
8cb3d749f4466525d40f270c8a048fd83397d6b0 | e25e7f0d944d302c2fd13b7517d97c5e0b5558ec | /FixTree_TBCNN/pycparser/c_parser.py | 9a9d09657ad6d9acb7465f692d2e3c1c7d25ba04 | [] | no_license | NizhenJenny/FixTree | 06702a0d529d861e34b045aac286434b0ce3d86f | be30a2cdeb6cc0aa13f29d2cd4d4ce325f00f2a0 | refs/heads/master | 2020-05-24T21:33:04.030992 | 2019-08-19T09:52:10 | 2019-08-19T09:52:10 | 187,477,281 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 63,913 | py | #------------------------------------------------------------------------------
# pycparser: c_parser.py
#
# CParser class: Parser and AST builder for the C language
#
# Copyright (C) 2008-2015, Eli Bendersky
# License: BSD
#------------------------------------------------------------------------------
import re
from .ply import yacc
from . import c_ast
from .c_lexer import CLexer
from .plyparser import PLYParser, Coord, ParseError
from .ast_transforms import fix_switch_cases
class CParser(PLYParser):
def __init__(
self,
lex_optimize=True,
lextab='pycparser.lextab',
yacc_optimize=True,
yacctab='pycparser.yacctab',
yacc_debug=False,
taboutputdir=''):
""" Create a new CParser.
Some arguments for controlling the debug/optimization
level of the parser are provided. The defaults are
tuned for release/performance mode.
The simple rules for using them are:
*) When tweaking CParser/CLexer, set these to False
*) When releasing a stable parser, set to True
lex_optimize:
Set to False when you're modifying the lexer.
Otherwise, changes in the lexer won't be used, if
some lextab.py file exists.
When releasing with a stable lexer, set to True
to save the re-generation of the lexer table on
each run.
lextab:
Points to the lex table that's used for optimized
mode. Only if you're modifying the lexer and want
some tests to avoid re-generating the table, make
this point to a local lex table file (that's been
earlier generated with lex_optimize=True)
yacc_optimize:
Set to False when you're modifying the parser.
Otherwise, changes in the parser won't be used, if
some parsetab.py file exists.
When releasing with a stable parser, set to True
to save the re-generation of the parser table on
each run.
yacctab:
Points to the yacc table that's used for optimized
mode. Only if you're modifying the parser, make
this point to a local yacc table file
yacc_debug:
Generate a parser.out file that explains how yacc
built the parsing table from the grammar.
taboutputdir:
Set this parameter to control the location of generated
lextab and yacctab files.
"""
self.clex = CLexer(
error_func=self._lex_error_func,
on_lbrace_func=self._lex_on_lbrace_func,
on_rbrace_func=self._lex_on_rbrace_func,
type_lookup_func=self._lex_type_lookup_func)
self.clex.build(
optimize=lex_optimize,
lextab=lextab,
outputdir=taboutputdir)
self.tokens = self.clex.tokens
rules_with_opt = [
'abstract_declarator',
'assignment_expression',
'declaration_list',
'declaration_specifiers',
'designation',
'expression',
'identifier_list',
'init_declarator_list',
'initializer_list',
'parameter_type_list',
'specifier_qualifier_list',
'block_item_list',
'type_qualifier_list',
'struct_declarator_list'
]
for rule in rules_with_opt:
self._create_opt_rule(rule)
self.cparser = yacc.yacc(
module=self,
start='translation_unit_or_empty',
debug=yacc_debug,
optimize=yacc_optimize,
tabmodule=yacctab,
outputdir=taboutputdir)
# Stack of scopes for keeping track of symbols. _scope_stack[-1] is
# the current (topmost) scope. Each scope is a dictionary that
# specifies whether a name is a type. If _scope_stack[n][name] is
# True, 'name' is currently a type in the scope. If it's False,
# 'name' is used in the scope but not as a type (for instance, if we
# saw: int name;
# If 'name' is not a key in _scope_stack[n] then 'name' was not defined
# in this scope at all.
self._scope_stack = [dict()]
# Keeps track of the last token given to yacc (the lookahead token)
self._last_yielded_token = None
def parse(self, text, filename='', debuglevel=0):
""" Parses C code and returns an AST.
text:
A string containing the C source code
filename:
Name of the file being parsed (for meaningful
error messages)
debuglevel:
Debug level to yacc
"""
self.clex.filename = filename
self.clex.reset_lineno()
self._scope_stack = [dict()]
self._last_yielded_token = None
return self.cparser.parse(
input=text,
lexer=self.clex,
debug=debuglevel)
######################-- PRIVATE --######################
def _push_scope(self):
self._scope_stack.append(dict())
def _pop_scope(self):
assert len(self._scope_stack) > 1
self._scope_stack.pop()
def _add_typedef_name(self, name, coord):
""" Add a new typedef name (ie a TYPEID) to the current scope
"""
if not self._scope_stack[-1].get(name, True):
self._parse_error(
"Typedef %r previously declared as non-typedef "
"in this scope" % name, coord)
self._scope_stack[-1][name] = True
def _add_identifier(self, name, coord):
""" Add a new object, function, or enum member name (ie an ID) to the
current scope
"""
if self._scope_stack[-1].get(name, False):
self._parse_error(
"Non-typedef %r previously declared as typedef "
"in this scope" % name, coord)
self._scope_stack[-1][name] = False
def _is_type_in_scope(self, name):
""" Is *name* a typedef-name in the current scope?
"""
for scope in reversed(self._scope_stack):
# If name is an identifier in this scope it shadows typedefs in
# higher scopes.
in_scope = scope.get(name)
if in_scope is not None: return in_scope
return False
def _lex_error_func(self, msg, line, column):
self._parse_error(msg, self._coord(line, column))
def _lex_on_lbrace_func(self):
self._push_scope()
def _lex_on_rbrace_func(self):
self._pop_scope()
def _lex_type_lookup_func(self, name):
""" Looks up types that were previously defined with
typedef.
Passed to the lexer for recognizing identifiers that
are types.
"""
is_type = self._is_type_in_scope(name)
return is_type
def _get_yacc_lookahead_token(self):
""" We need access to yacc's lookahead token in certain cases.
This is the last token yacc requested from the lexer, so we
ask the lexer.
"""
return self.clex.last_token
# To understand what's going on here, read sections A.8.5 and
# A.8.6 of K&R2 very carefully.
#
# A C type consists of a basic type declaration, with a list
# of modifiers. For example:
#
# int *c[5];
#
# The basic declaration here is 'int c', and the pointer and
# the array are the modifiers.
#
# Basic declarations are represented by TypeDecl (from module c_ast) and the
# modifiers are FuncDecl, PtrDecl and ArrayDecl.
#
# The standard states that whenever a new modifier is parsed, it should be
# added to the end of the list of modifiers. For example:
#
# K&R2 A.8.6.2: Array Declarators
#
# In a declaration T D where D has the form
# D1 [constant-expression-opt]
# and the type of the identifier in the declaration T D1 is
# "type-modifier T", the type of the
# identifier of D is "type-modifier array of T"
#
# This is what this method does. The declarator it receives
# can be a list of declarators ending with TypeDecl. It
# tacks the modifier to the end of this list, just before
# the TypeDecl.
#
# Additionally, the modifier may be a list itself. This is
# useful for pointers, that can come as a chain from the rule
# p_pointer. In this case, the whole modifier list is spliced
# into the new location.
def _type_modify_decl(self, decl, modifier):
""" Tacks a type modifier on a declarator, and returns
the modified declarator.
Note: the declarator and modifier may be modified
"""
#~ print '****'
#~ decl.show(offset=3)
#~ modifier.show(offset=3)
#~ print '****'
modifier_head = modifier
modifier_tail = modifier
# The modifier may be a nested list. Reach its tail.
#
while modifier_tail.type:
modifier_tail = modifier_tail.type
# If the decl is a basic type, just tack the modifier onto
# it
#
if isinstance(decl, c_ast.TypeDecl):
modifier_tail.type = decl
return modifier
else:
# Otherwise, the decl is a list of modifiers. Reach
# its tail and splice the modifier onto the tail,
# pointing to the underlying basic type.
#
decl_tail = decl
while not isinstance(decl_tail.type, c_ast.TypeDecl):
decl_tail = decl_tail.type
modifier_tail.type = decl_tail.type
decl_tail.type = modifier_head
return decl
# Due to the order in which declarators are constructed,
# they have to be fixed in order to look like a normal AST.
#
# When a declaration arrives from syntax construction, it has
# these problems:
# * The innermost TypeDecl has no type (because the basic
# type is only known at the uppermost declaration level)
# * The declaration has no variable name, since that is saved
# in the innermost TypeDecl
# * The typename of the declaration is a list of type
# specifiers, and not a node. Here, basic identifier types
# should be separated from more complex types like enums
# and structs.
#
# This method fixes these problems.
#
def _fix_decl_name_type(self, decl, typename):
""" Fixes a declaration. Modifies decl.
"""
# Reach the underlying basic type
#
type = decl
while not isinstance(type, c_ast.TypeDecl):
type = type.type
decl.name = type.declname
type.quals = decl.quals
# The typename is a list of types. If any type in this
# list isn't an IdentifierType, it must be the only
# type in the list (it's illegal to declare "int enum ..")
# If all the types are basic, they're collected in the
# IdentifierType holder.
#
for tn in typename:
if not isinstance(tn, c_ast.IdentifierType):
if len(typename) > 1:
self._parse_error(
"Invalid multiple types specified", tn.coord)
else:
type.type = tn
return decl
if not typename:
# Functions default to returning int
#
if not isinstance(decl.type, c_ast.FuncDecl):
self._parse_error(
"Missing type in declaration", decl.coord)
type.type = c_ast.IdentifierType(
['int'],
coord=decl.coord)
else:
# At this point, we know that typename is a list of IdentifierType
# nodes. Concatenate all the names into a single list.
#
type.type = c_ast.IdentifierType(
[name for id in typename for name in id.names],
coord=typename[0].coord)
return decl
def _add_declaration_specifier(self, declspec, newspec, kind):
""" Declaration specifiers are represented by a dictionary
with the entries:
* qual: a list of type qualifiers
* storage: a list of storage type qualifiers
* type: a list of type specifiers
* function: a list of function specifiers
This method is given a declaration specifier, and a
new specifier of a given kind.
Returns the declaration specifier, with the new
specifier incorporated.
"""
spec = declspec or dict(qual=[], storage=[], type=[], function=[])
spec[kind].insert(0, newspec)
return spec
def _build_declarations(self, spec, decls, typedef_namespace=False):
""" Builds a list of declarations all sharing the given specifiers.
If typedef_namespace is true, each declared name is added
to the "typedef namespace", which also includes objects,
functions, and enum constants.
"""
is_typedef = 'typedef' in spec['storage']
declarations = []
# Bit-fields are allowed to be unnamed.
#
if decls[0].get('bitsize') is not None:
pass
# When redeclaring typedef names as identifiers in inner scopes, a
# problem can occur where the identifier gets grouped into
# spec['type'], leaving decl as None. This can only occur for the
# first declarator.
#
elif decls[0]['decl'] is None:
if len(spec['type']) < 2 or len(spec['type'][-1].names) != 1 or \
not self._is_type_in_scope(spec['type'][-1].names[0]):
coord = '?'
for t in spec['type']:
if hasattr(t, 'coord'):
coord = t.coord
break
self._parse_error('Invalid declaration', coord)
# Make this look as if it came from "direct_declarator:ID"
decls[0]['decl'] = c_ast.TypeDecl(
declname=spec['type'][-1].names[0],
type=None,
quals=None,
coord=spec['type'][-1].coord)
# Remove the "new" type's name from the end of spec['type']
del spec['type'][-1]
# A similar problem can occur where the declaration ends up looking
# like an abstract declarator. Give it a name if this is the case.
#
elif not isinstance(decls[0]['decl'],
(c_ast.Struct, c_ast.Union, c_ast.IdentifierType)):
decls_0_tail = decls[0]['decl']
while not isinstance(decls_0_tail, c_ast.TypeDecl):
decls_0_tail = decls_0_tail.type
if decls_0_tail.declname is None:
decls_0_tail.declname = spec['type'][-1].names[0]
del spec['type'][-1]
for decl in decls:
assert decl['decl'] is not None
if is_typedef:
declaration = c_ast.Typedef(
name=None,
quals=spec['qual'],
storage=spec['storage'],
type=decl['decl'],
coord=decl['decl'].coord)
else:
declaration = c_ast.Decl(
name=None,
quals=spec['qual'],
storage=spec['storage'],
funcspec=spec['function'],
type=decl['decl'],
init=decl.get('init'),
bitsize=decl.get('bitsize'),
coord=decl['decl'].coord)
if isinstance(declaration.type,
(c_ast.Struct, c_ast.Union, c_ast.IdentifierType)):
fixed_decl = declaration
else:
fixed_decl = self._fix_decl_name_type(declaration, spec['type'])
# Add the type name defined by typedef to a
# symbol table (for usage in the lexer)
#
if typedef_namespace:
if is_typedef:
self._add_typedef_name(fixed_decl.name, fixed_decl.coord)
else:
self._add_identifier(fixed_decl.name, fixed_decl.coord)
declarations.append(fixed_decl)
return declarations
def _build_function_definition(self, spec, decl, param_decls, body):
""" Builds a function definition.
"""
assert 'typedef' not in spec['storage']
declaration = self._build_declarations(
spec=spec,
decls=[dict(decl=decl, init=None)],
typedef_namespace=True)[0]
return c_ast.FuncDef(
decl=declaration,
param_decls=param_decls,
body=body,
coord=decl.coord)
def _select_struct_union_class(self, token):
""" Given a token (either STRUCT or UNION), selects the
appropriate AST class.
"""
if token == 'struct':
return c_ast.Struct
else:
return c_ast.Union
##
## Precedence and associativity of operators
##
precedence = (
('left', 'LOR'),
('left', 'LAND'),
('left', 'OR'),
('left', 'XOR'),
('left', 'AND'),
('left', 'EQ', 'NE'),
('left', 'GT', 'GE', 'LT', 'LE'),
('left', 'RSHIFT', 'LSHIFT'),
('left', 'PLUS', 'MINUS'),
('left', 'TIMES', 'DIVIDE', 'MOD')
)
##
## Grammar productions
## Implementation of the BNF defined in K&R2 A.13
##
# Wrapper around a translation unit, to allow for empty input.
# Not strictly part of the C99 Grammar, but useful in practice.
#
def p_translation_unit_or_empty(self, p):
""" translation_unit_or_empty : translation_unit
| empty
"""
if p[1] is None:
p[0] = c_ast.FileAST([])
else:
p[0] = c_ast.FileAST(p[1])
def p_translation_unit_1(self, p):
""" translation_unit : external_declaration
"""
# Note: external_declaration is already a list
#
p[0] = p[1]
def p_translation_unit_2(self, p):
""" translation_unit : translation_unit external_declaration
"""
if p[2] is not None:
p[1].extend(p[2])
p[0] = p[1]
# Declarations always come as lists (because they can be
# several in one line), so we wrap the function definition
# into a list as well, to make the return value of
# external_declaration homogenous.
#
def p_external_declaration_1(self, p):
""" external_declaration : function_definition
"""
p[0] = [p[1]]
def p_external_declaration_2(self, p):
""" external_declaration : declaration
"""
p[0] = p[1]
def p_external_declaration_3(self, p):
""" external_declaration : pp_directive
"""
p[0] = p[1]
def p_external_declaration_4(self, p):
""" external_declaration : SEMI
"""
p[0] = None
def p_pp_directive(self, p):
""" pp_directive : PPHASH
"""
self._parse_error('Directives not supported yet',
self._coord(p.lineno(1)))
# In function definitions, the declarator can be followed by
# a declaration list, for old "K&R style" function definitios.
#
def p_function_definition_1(self, p):
""" function_definition : declarator declaration_list_opt compound_statement
"""
# no declaration specifiers - 'int' becomes the default type
spec = dict(
qual=[],
storage=[],
type=[c_ast.IdentifierType(['int'],
coord=self._coord(p.lineno(1)))],
function=[])
p[0] = self._build_function_definition(
spec=spec,
decl=p[1],
param_decls=p[2],
body=p[3])
def p_function_definition_2(self, p):
""" function_definition : declaration_specifiers declarator declaration_list_opt compound_statement
"""
spec = p[1]
p[0] = self._build_function_definition(
spec=spec,
decl=p[2],
param_decls=p[3],
body=p[4])
def p_statement(self, p):
""" statement : labeled_statement
| expression_statement
| compound_statement
| selection_statement
| iteration_statement
| jump_statement
"""
p[0] = p[1]
# In C, declarations can come several in a line:
# int x, *px, romulo = 5;
#
# However, for the AST, we will split them to separate Decl
# nodes.
#
# This rule splits its declarations and always returns a list
# of Decl nodes, even if it's one element long.
#
def p_decl_body(self, p):
""" decl_body : declaration_specifiers init_declarator_list_opt
"""
spec = p[1]
# p[2] (init_declarator_list_opt) is either a list or None
#
if p[2] is None:
# By the standard, you must have at least one declarator unless
# declaring a structure tag, a union tag, or the members of an
# enumeration.
#
ty = spec['type']
s_u_or_e = (c_ast.Struct, c_ast.Union, c_ast.Enum)
if len(ty) == 1 and isinstance(ty[0], s_u_or_e):
decls = [c_ast.Decl(
name=None,
quals=spec['qual'],
storage=spec['storage'],
funcspec=spec['function'],
type=ty[0],
init=None,
bitsize=None,
coord=ty[0].coord)]
# However, this case can also occur on redeclared identifiers in
# an inner scope. The trouble is that the redeclared type's name
# gets grouped into declaration_specifiers; _build_declarations
# compensates for this.
#
else:
decls = self._build_declarations(
spec=spec,
decls=[dict(decl=None, init=None)],
typedef_namespace=True)
else:
decls = self._build_declarations(
spec=spec,
decls=p[2],
typedef_namespace=True)
p[0] = decls
# The declaration has been split to a decl_body sub-rule and
# SEMI, because having them in a single rule created a problem
# for defining typedefs.
#
# If a typedef line was directly followed by a line using the
# type defined with the typedef, the type would not be
# recognized. This is because to reduce the declaration rule,
# the parser's lookahead asked for the token after SEMI, which
# was the type from the next line, and the lexer had no chance
# to see the updated type symbol table.
#
# Splitting solves this problem, because after seeing SEMI,
# the parser reduces decl_body, which actually adds the new
# type into the table to be seen by the lexer before the next
# line is reached.
def p_declaration(self, p):
""" declaration : decl_body SEMI
"""
p[0] = p[1]
# Since each declaration is a list of declarations, this
# rule will combine all the declarations and return a single
# list
#
def p_declaration_list(self, p):
""" declaration_list : declaration
| declaration_list declaration
"""
p[0] = p[1] if len(p) == 2 else p[1] + p[2]
def p_declaration_specifiers_1(self, p):
""" declaration_specifiers : type_qualifier declaration_specifiers_opt
"""
p[0] = self._add_declaration_specifier(p[2], p[1], 'qual')
def p_declaration_specifiers_2(self, p):
""" declaration_specifiers : type_specifier declaration_specifiers_opt
"""
p[0] = self._add_declaration_specifier(p[2], p[1], 'type')
def p_declaration_specifiers_3(self, p):
""" declaration_specifiers : storage_class_specifier declaration_specifiers_opt
"""
p[0] = self._add_declaration_specifier(p[2], p[1], 'storage')
def p_declaration_specifiers_4(self, p):
""" declaration_specifiers : function_specifier declaration_specifiers_opt
"""
p[0] = self._add_declaration_specifier(p[2], p[1], 'function')
def p_storage_class_specifier(self, p):
""" storage_class_specifier : AUTO
| REGISTER
| STATIC
| EXTERN
| TYPEDEF
"""
p[0] = p[1]
def p_function_specifier(self, p):
""" function_specifier : INLINE
"""
p[0] = p[1]
def p_type_specifier_1(self, p):
""" type_specifier : VOID
| _BOOL
| CHAR
| SHORT
| INT
| LONG
| FLOAT
| DOUBLE
| _COMPLEX
| SIGNED
| UNSIGNED
"""
p[0] = c_ast.IdentifierType([p[1]], coord=self._coord(p.lineno(1)))
def p_type_specifier_2(self, p):
""" type_specifier : typedef_name
| enum_specifier
| struct_or_union_specifier
"""
p[0] = p[1]
def p_type_qualifier(self, p):
""" type_qualifier : CONST
| RESTRICT
| VOLATILE
"""
p[0] = p[1]
def p_init_declarator_list_1(self, p):
""" init_declarator_list : init_declarator
| init_declarator_list COMMA init_declarator
"""
p[0] = p[1] + [p[3]] if len(p) == 4 else [p[1]]
# If the code is declaring a variable that was declared a typedef in an
# outer scope, yacc will think the name is part of declaration_specifiers,
# not init_declarator, and will then get confused by EQUALS. Pass None
# up in place of declarator, and handle this at a higher level.
#
def p_init_declarator_list_2(self, p):
""" init_declarator_list : EQUALS initializer
"""
p[0] = [dict(decl=None, init=p[2])]
# Similarly, if the code contains duplicate typedefs of, for example,
# array types, the array portion will appear as an abstract declarator.
#
def p_init_declarator_list_3(self, p):
""" init_declarator_list : abstract_declarator
"""
p[0] = [dict(decl=p[1], init=None)]
# Returns a {decl=<declarator> : init=<initializer>} dictionary
# If there's no initializer, uses None
#
def p_init_declarator(self, p):
""" init_declarator : declarator
| declarator EQUALS initializer
"""
p[0] = dict(decl=p[1], init=(p[3] if len(p) > 2 else None))
def p_specifier_qualifier_list_1(self, p):
""" specifier_qualifier_list : type_qualifier specifier_qualifier_list_opt
"""
p[0] = self._add_declaration_specifier(p[2], p[1], 'qual')
def p_specifier_qualifier_list_2(self, p):
""" specifier_qualifier_list : type_specifier specifier_qualifier_list_opt
"""
p[0] = self._add_declaration_specifier(p[2], p[1], 'type')
# TYPEID is allowed here (and in other struct/enum related tag names), because
# struct/enum tags reside in their own namespace and can be named the same as types
#
def p_struct_or_union_specifier_1(self, p):
""" struct_or_union_specifier : struct_or_union ID
| struct_or_union TYPEID
"""
klass = self._select_struct_union_class(p[1])
p[0] = klass(
name=p[2],
decls=None,
coord=self._coord(p.lineno(2)))
def p_struct_or_union_specifier_2(self, p):
""" struct_or_union_specifier : struct_or_union brace_open struct_declaration_list brace_close
"""
klass = self._select_struct_union_class(p[1])
p[0] = klass(
name=None,
decls=p[3],
coord=self._coord(p.lineno(2)))
def p_struct_or_union_specifier_3(self, p):
""" struct_or_union_specifier : struct_or_union ID brace_open struct_declaration_list brace_close
| struct_or_union TYPEID brace_open struct_declaration_list brace_close
"""
klass = self._select_struct_union_class(p[1])
p[0] = klass(
name=p[2],
decls=p[4],
coord=self._coord(p.lineno(2)))
def p_struct_or_union(self, p):
""" struct_or_union : STRUCT
| UNION
"""
p[0] = p[1]
# Combine all declarations into a single list
#
def p_struct_declaration_list(self, p):
""" struct_declaration_list : struct_declaration
| struct_declaration_list struct_declaration
"""
p[0] = p[1] if len(p) == 2 else p[1] + p[2]
def p_struct_declaration_1(self, p):
""" struct_declaration : specifier_qualifier_list struct_declarator_list_opt SEMI
"""
spec = p[1]
assert 'typedef' not in spec['storage']
if p[2] is not None:
decls = self._build_declarations(
spec=spec,
decls=p[2])
elif len(spec['type']) == 1:
# Anonymous struct/union, gcc extension, C1x feature.
# Although the standard only allows structs/unions here, I see no
# reason to disallow other types since some compilers have typedefs
# here, and pycparser isn't about rejecting all invalid code.
#
node = spec['type'][0]
if isinstance(node, c_ast.Node):
decl_type = node
else:
decl_type = c_ast.IdentifierType(node)
decls = self._build_declarations(
spec=spec,
decls=[dict(decl=decl_type)])
else:
# Structure/union members can have the same names as typedefs.
# The trouble is that the member's name gets grouped into
# specifier_qualifier_list; _build_declarations compensates.
#
decls = self._build_declarations(
spec=spec,
decls=[dict(decl=None, init=None)])
p[0] = decls
def p_struct_declaration_2(self, p):
""" struct_declaration : specifier_qualifier_list abstract_declarator SEMI
"""
# "Abstract declarator?!", you ask? Structure members can have the
# same names as typedefs. The trouble is that the member's name gets
# grouped into specifier_qualifier_list, leaving any remainder to
# appear as an abstract declarator, as in:
# typedef int Foo;
# struct { Foo Foo[3]; };
#
p[0] = self._build_declarations(
spec=p[1],
decls=[dict(decl=p[2], init=None)])
def p_struct_declarator_list(self, p):
""" struct_declarator_list : struct_declarator
| struct_declarator_list COMMA struct_declarator
"""
p[0] = p[1] + [p[3]] if len(p) == 4 else [p[1]]
# struct_declarator passes up a dict with the keys: decl (for
# the underlying declarator) and bitsize (for the bitsize)
#
def p_struct_declarator_1(self, p):
""" struct_declarator : declarator
"""
p[0] = {'decl': p[1], 'bitsize': None}
def p_struct_declarator_2(self, p):
""" struct_declarator : declarator COLON constant_expression
| COLON constant_expression
"""
if len(p) > 3:
p[0] = {'decl': p[1], 'bitsize': p[3]}
else:
p[0] = {'decl': c_ast.TypeDecl(None, None, None), 'bitsize': p[2]}
def p_enum_specifier_1(self, p):
""" enum_specifier : ENUM ID
| ENUM TYPEID
"""
p[0] = c_ast.Enum(p[2], None, self._coord(p.lineno(1)))
def p_enum_specifier_2(self, p):
""" enum_specifier : ENUM brace_open enumerator_list brace_close
"""
p[0] = c_ast.Enum(None, p[3], self._coord(p.lineno(1)))
def p_enum_specifier_3(self, p):
""" enum_specifier : ENUM ID brace_open enumerator_list brace_close
| ENUM TYPEID brace_open enumerator_list brace_close
"""
p[0] = c_ast.Enum(p[2], p[4], self._coord(p.lineno(1)))
def p_enumerator_list(self, p):
""" enumerator_list : enumerator
| enumerator_list COMMA
| enumerator_list COMMA enumerator
"""
if len(p) == 2:
p[0] = c_ast.EnumeratorList([p[1]], p[1].coord)
elif len(p) == 3:
p[0] = p[1]
else:
p[1].enumerators.append(p[3])
p[0] = p[1]
def p_enumerator(self, p):
""" enumerator : ID
| ID EQUALS constant_expression
"""
if len(p) == 2:
enumerator = c_ast.Enumerator(
p[1], None,
self._coord(p.lineno(1)))
else:
enumerator = c_ast.Enumerator(
p[1], p[3],
self._coord(p.lineno(1)))
self._add_identifier(enumerator.name, enumerator.coord)
p[0] = enumerator
def p_declarator_1(self, p):
""" declarator : direct_declarator
"""
p[0] = p[1]
def p_declarator_2(self, p):
""" declarator : pointer direct_declarator
"""
p[0] = self._type_modify_decl(p[2], p[1])
# Since it's impossible for a type to be specified after a pointer, assume
# it's intended to be the name for this declaration. _add_identifier will
# raise an error if this TYPEID can't be redeclared.
#
def p_declarator_3(self, p):
""" declarator : pointer TYPEID
"""
decl = c_ast.TypeDecl(
declname=p[2],
type=None,
quals=None,
coord=self._coord(p.lineno(2)))
p[0] = self._type_modify_decl(decl, p[1])
def p_direct_declarator_1(self, p):
""" direct_declarator : ID
"""
p[0] = c_ast.TypeDecl(
declname=p[1],
type=None,
quals=None,
coord=self._coord(p.lineno(1)))
def p_direct_declarator_2(self, p):
""" direct_declarator : LPAREN declarator RPAREN
"""
p[0] = p[2]
def p_direct_declarator_3(self, p):
""" direct_declarator : direct_declarator LBRACKET type_qualifier_list_opt assignment_expression_opt RBRACKET
"""
quals = (p[3] if len(p) > 5 else []) or []
# Accept dimension qualifiers
# Per C99 6.7.5.3 p7
arr = c_ast.ArrayDecl(
type=None,
dim=p[4] if len(p) > 5 else p[3],
dim_quals=quals,
coord=p[1].coord)
p[0] = self._type_modify_decl(decl=p[1], modifier=arr)
def p_direct_declarator_4(self, p):
""" direct_declarator : direct_declarator LBRACKET STATIC type_qualifier_list_opt assignment_expression RBRACKET
| direct_declarator LBRACKET type_qualifier_list STATIC assignment_expression RBRACKET
"""
# Using slice notation for PLY objects doesn't work in Python 3 for the
# version of PLY embedded with pycparser; see PLY Google Code issue 30.
# Work around that here by listing the two elements separately.
listed_quals = [item if isinstance(item, list) else [item]
for item in [p[3],p[4]]]
dim_quals = [qual for sublist in listed_quals for qual in sublist
if qual is not None]
arr = c_ast.ArrayDecl(
type=None,
dim=p[5],
dim_quals=dim_quals,
coord=p[1].coord)
p[0] = self._type_modify_decl(decl=p[1], modifier=arr)
# Special for VLAs
#
def p_direct_declarator_5(self, p):
""" direct_declarator : direct_declarator LBRACKET type_qualifier_list_opt TIMES RBRACKET
"""
arr = c_ast.ArrayDecl(
type=None,
dim=c_ast.ID(p[4], self._coord(p.lineno(4))),
dim_quals=p[3] if p[3] != None else [],
coord=p[1].coord)
p[0] = self._type_modify_decl(decl=p[1], modifier=arr)
def p_direct_declarator_6(self, p):
""" direct_declarator : direct_declarator LPAREN parameter_type_list RPAREN
| direct_declarator LPAREN identifier_list_opt RPAREN
"""
func = c_ast.FuncDecl(
args=p[3],
type=None,
coord=p[1].coord)
# To see why _get_yacc_lookahead_token is needed, consider:
# typedef char TT;
# void foo(int TT) { TT = 10; }
# Outside the function, TT is a typedef, but inside (starting and
# ending with the braces) it's a parameter. The trouble begins with
# yacc's lookahead token. We don't know if we're declaring or
# defining a function until we see LBRACE, but if we wait for yacc to
# trigger a rule on that token, then TT will have already been read
# and incorrectly interpreted as TYPEID. We need to add the
# parameters to the scope the moment the lexer sees LBRACE.
#
if self._get_yacc_lookahead_token().type == "LBRACE":
if func.args is not None:
for param in func.args.params:
if isinstance(param, c_ast.EllipsisParam): break
self._add_identifier(param.name, param.coord)
p[0] = self._type_modify_decl(decl=p[1], modifier=func)
def p_pointer(self, p):
""" pointer : TIMES type_qualifier_list_opt
| TIMES type_qualifier_list_opt pointer
"""
coord = self._coord(p.lineno(1))
# Pointer decls nest from inside out. This is important when different
# levels have different qualifiers. For example:
#
# char * const * p;
#
# Means "pointer to const pointer to char"
#
# While:
#
# char ** const p;
#
# Means "const pointer to pointer to char"
#
# So when we construct PtrDecl nestings, the leftmost pointer goes in
# as the most nested type.
nested_type = c_ast.PtrDecl(quals=p[2] or [], type=None, coord=coord)
if len(p) > 3:
tail_type = p[3]
while tail_type.type is not None:
tail_type = tail_type.type
tail_type.type = nested_type
p[0] = p[3]
else:
p[0] = nested_type
def p_type_qualifier_list(self, p):
""" type_qualifier_list : type_qualifier
| type_qualifier_list type_qualifier
"""
p[0] = [p[1]] if len(p) == 2 else p[1] + [p[2]]
def p_parameter_type_list(self, p):
""" parameter_type_list : parameter_list
| parameter_list COMMA ELLIPSIS
"""
if len(p) > 2:
p[1].params.append(c_ast.EllipsisParam(self._coord(p.lineno(3))))
p[0] = p[1]
def p_parameter_list(self, p):
""" parameter_list : parameter_declaration
| parameter_list COMMA parameter_declaration
"""
if len(p) == 2: # single parameter
p[0] = c_ast.ParamList([p[1]], p[1].coord)
else:
p[1].params.append(p[3])
p[0] = p[1]
def p_parameter_declaration_1(self, p):
""" parameter_declaration : declaration_specifiers declarator
"""
spec = p[1]
if not spec['type']:
spec['type'] = [c_ast.IdentifierType(['int'],
coord=self._coord(p.lineno(1)))]
p[0] = self._build_declarations(
spec=spec,
decls=[dict(decl=p[2])])[0]
def p_parameter_declaration_2(self, p):
""" parameter_declaration : declaration_specifiers abstract_declarator_opt
"""
spec = p[1]
if not spec['type']:
spec['type'] = [c_ast.IdentifierType(['int'],
coord=self._coord(p.lineno(1)))]
# Parameters can have the same names as typedefs. The trouble is that
# the parameter's name gets grouped into declaration_specifiers, making
# it look like an old-style declaration; compensate.
#
if len(spec['type']) > 1 and len(spec['type'][-1].names) == 1 and \
self._is_type_in_scope(spec['type'][-1].names[0]):
decl = self._build_declarations(
spec=spec,
decls=[dict(decl=p[2], init=None)])[0]
# This truly is an old-style parameter declaration
#
else:
decl = c_ast.Typename(
name='',
quals=spec['qual'],
type=p[2] or c_ast.TypeDecl(None, None, None),
coord=self._coord(p.lineno(2)))
typename = spec['type']
decl = self._fix_decl_name_type(decl, typename)
p[0] = decl
def p_identifier_list(self, p):
""" identifier_list : identifier
| identifier_list COMMA identifier
"""
if len(p) == 2: # single parameter
p[0] = c_ast.ParamList([p[1]], p[1].coord)
else:
p[1].params.append(p[3])
p[0] = p[1]
def p_initializer_1(self, p):
""" initializer : assignment_expression
"""
p[0] = p[1]
def p_initializer_2(self, p):
""" initializer : brace_open initializer_list_opt brace_close
| brace_open initializer_list COMMA brace_close
"""
if p[2] is None:
p[0] = c_ast.InitList([], self._coord(p.lineno(1)))
else:
p[0] = p[2]
def p_initializer_list(self, p):
""" initializer_list : designation_opt initializer
| initializer_list COMMA designation_opt initializer
"""
if len(p) == 3: # single initializer
init = p[2] if p[1] is None else c_ast.NamedInitializer(p[1], p[2])
p[0] = c_ast.InitList([init], p[2].coord)
else:
init = p[4] if p[3] is None else c_ast.NamedInitializer(p[3], p[4])
p[1].exprs.append(init)
p[0] = p[1]
def p_designation(self, p):
""" designation : designator_list EQUALS
"""
p[0] = p[1]
# Designators are represented as a list of nodes, in the order in which
# they're written in the code.
#
def p_designator_list(self, p):
""" designator_list : designator
| designator_list designator
"""
p[0] = [p[1]] if len(p) == 2 else p[1] + [p[2]]
def p_designator(self, p):
""" designator : LBRACKET constant_expression RBRACKET
| PERIOD identifier
"""
p[0] = p[2]
def p_type_name(self, p):
""" type_name : specifier_qualifier_list abstract_declarator_opt
"""
#~ print '=========='
#~ print p[1]
#~ print p[2]
#~ print p[2].children()
#~ print '=========='
typename = c_ast.Typename(
name='',
quals=p[1]['qual'],
type=p[2] or c_ast.TypeDecl(None, None, None),
coord=self._coord(p.lineno(2)))
p[0] = self._fix_decl_name_type(typename, p[1]['type'])
def p_abstract_declarator_1(self, p):
""" abstract_declarator : pointer
"""
dummytype = c_ast.TypeDecl(None, None, None)
p[0] = self._type_modify_decl(
decl=dummytype,
modifier=p[1])
def p_abstract_declarator_2(self, p):
""" abstract_declarator : pointer direct_abstract_declarator
"""
p[0] = self._type_modify_decl(p[2], p[1])
def p_abstract_declarator_3(self, p):
""" abstract_declarator : direct_abstract_declarator
"""
p[0] = p[1]
# Creating and using direct_abstract_declarator_opt here
# instead of listing both direct_abstract_declarator and the
# lack of it in the beginning of _1 and _2 caused two
# shift/reduce errors.
#
def p_direct_abstract_declarator_1(self, p):
""" direct_abstract_declarator : LPAREN abstract_declarator RPAREN """
p[0] = p[2]
def p_direct_abstract_declarator_2(self, p):
""" direct_abstract_declarator : direct_abstract_declarator LBRACKET assignment_expression_opt RBRACKET
"""
arr = c_ast.ArrayDecl(
type=None,
dim=p[3],
dim_quals=[],
coord=p[1].coord)
p[0] = self._type_modify_decl(decl=p[1], modifier=arr)
def p_direct_abstract_declarator_3(self, p):
""" direct_abstract_declarator : LBRACKET assignment_expression_opt RBRACKET
"""
p[0] = c_ast.ArrayDecl(
type=c_ast.TypeDecl(None, None, None),
dim=p[2],
dim_quals=[],
coord=self._coord(p.lineno(1)))
def p_direct_abstract_declarator_4(self, p):
""" direct_abstract_declarator : direct_abstract_declarator LBRACKET TIMES RBRACKET
"""
arr = c_ast.ArrayDecl(
type=None,
dim=c_ast.ID(p[3], self._coord(p.lineno(3))),
dim_quals=[],
coord=p[1].coord)
p[0] = self._type_modify_decl(decl=p[1], modifier=arr)
def p_direct_abstract_declarator_5(self, p):
""" direct_abstract_declarator : LBRACKET TIMES RBRACKET
"""
p[0] = c_ast.ArrayDecl(
type=c_ast.TypeDecl(None, None, None),
dim=c_ast.ID(p[3], self._coord(p.lineno(3))),
dim_quals=[],
coord=self._coord(p.lineno(1)))
def p_direct_abstract_declarator_6(self, p):
""" direct_abstract_declarator : direct_abstract_declarator LPAREN parameter_type_list_opt RPAREN
"""
func = c_ast.FuncDecl(
args=p[3],
type=None,
coord=p[1].coord)
p[0] = self._type_modify_decl(decl=p[1], modifier=func)
def p_direct_abstract_declarator_7(self, p):
""" direct_abstract_declarator : LPAREN parameter_type_list_opt RPAREN
"""
p[0] = c_ast.FuncDecl(
args=p[2],
type=c_ast.TypeDecl(None, None, None),
coord=self._coord(p.lineno(1)))
# declaration is a list, statement isn't. To make it consistent, block_item
# will always be a list
#
def p_block_item(self, p):
""" block_item : declaration
| statement
"""
p[0] = p[1] if isinstance(p[1], list) else [p[1]]
# Since we made block_item a list, this just combines lists
#
def p_block_item_list(self, p):
""" block_item_list : block_item
| block_item_list block_item
"""
# Empty block items (plain ';') produce [None], so ignore them
p[0] = p[1] if (len(p) == 2 or p[2] == [None]) else p[1] + p[2]
def p_compound_statement_1(self, p):
""" compound_statement : brace_open block_item_list_opt brace_close """
p[0] = c_ast.Compound(
block_items=p[2],
coord=self._coord(p.lineno(1)))
def p_labeled_statement_1(self, p):
""" labeled_statement : ID COLON statement """
p[0] = c_ast.Label(p[1], p[3], self._coord(p.lineno(1)))
def p_labeled_statement_2(self, p):
""" labeled_statement : CASE constant_expression COLON statement """
p[0] = c_ast.Case(p[2], [p[4]], self._coord(p.lineno(1)))
def p_labeled_statement_3(self, p):
""" labeled_statement : DEFAULT COLON statement """
p[0] = c_ast.Default([p[3]], self._coord(p.lineno(1)))
def p_selection_statement_1(self, p):
""" selection_statement : IF LPAREN expression RPAREN statement """
p[0] = c_ast.If(p[3], p[5], None, self._coord(p.lineno(1)))
def p_selection_statement_2(self, p):
""" selection_statement : IF LPAREN expression RPAREN statement ELSE statement """
p[0] = c_ast.If(p[3], p[5], p[7], self._coord(p.lineno(1)))
def p_selection_statement_3(self, p):
""" selection_statement : SWITCH LPAREN expression RPAREN statement """
p[0] = fix_switch_cases(
c_ast.Switch(p[3], p[5], self._coord(p.lineno(1))))
def p_iteration_statement_1(self, p):
""" iteration_statement : WHILE LPAREN expression RPAREN statement """
p[0] = c_ast.While(p[3], p[5], self._coord(p.lineno(1)))
def p_iteration_statement_2(self, p):
""" iteration_statement : DO statement WHILE LPAREN expression RPAREN SEMI """
p[0] = c_ast.DoWhile(p[5], p[2], self._coord(p.lineno(1)))
def p_iteration_statement_3(self, p):
""" iteration_statement : FOR LPAREN expression_opt SEMI expression_opt SEMI expression_opt RPAREN statement """
p[0] = c_ast.For(p[3], p[5], p[7], p[9], self._coord(p.lineno(1)))
def p_iteration_statement_4(self, p):
""" iteration_statement : FOR LPAREN declaration expression_opt SEMI expression_opt RPAREN statement """
p[0] = c_ast.For(c_ast.DeclList(p[3], self._coord(p.lineno(1))),
p[4], p[6], p[8], self._coord(p.lineno(1)))
def p_jump_statement_1(self, p):
""" jump_statement : GOTO ID SEMI """
p[0] = c_ast.Goto(p[2], self._coord(p.lineno(1)))
def p_jump_statement_2(self, p):
""" jump_statement : BREAK SEMI """
p[0] = c_ast.Break(self._coord(p.lineno(1)))
def p_jump_statement_3(self, p):
""" jump_statement : CONTINUE SEMI """
p[0] = c_ast.Continue(self._coord(p.lineno(1)))
def p_jump_statement_4(self, p):
""" jump_statement : RETURN expression SEMI
| RETURN SEMI
"""
p[0] = c_ast.Return(p[2] if len(p) == 4 else None, self._coord(p.lineno(1)))
def p_expression_statement(self, p):
""" expression_statement : expression_opt SEMI """
if p[1] is None:
p[0] = c_ast.EmptyStatement(self._coord(p.lineno(1)))
else:
p[0] = p[1]
def p_expression(self, p):
""" expression : assignment_expression
| expression COMMA assignment_expression
"""
if len(p) == 2:
p[0] = p[1]
else:
if not isinstance(p[1], c_ast.ExprList):
p[1] = c_ast.ExprList([p[1]], p[1].coord)
p[1].exprs.append(p[3])
p[0] = p[1]
def p_typedef_name(self, p):
""" typedef_name : TYPEID """
p[0] = c_ast.IdentifierType([p[1]], coord=self._coord(p.lineno(1)))
def p_assignment_expression(self, p):
""" assignment_expression : conditional_expression
| unary_expression assignment_operator assignment_expression
"""
if len(p) == 2:
p[0] = p[1]
else:
p[0] = c_ast.Assignment(p[2], p[1], p[3], p[1].coord)
# K&R2 defines these as many separate rules, to encode
# precedence and associativity. Why work hard ? I'll just use
# the built in precedence/associativity specification feature
# of PLY. (see precedence declaration above)
#
def p_assignment_operator(self, p):
""" assignment_operator : EQUALS
| XOREQUAL
| TIMESEQUAL
| DIVEQUAL
| MODEQUAL
| PLUSEQUAL
| MINUSEQUAL
| LSHIFTEQUAL
| RSHIFTEQUAL
| ANDEQUAL
| OREQUAL
"""
p[0] = p[1]
def p_constant_expression(self, p):
""" constant_expression : conditional_expression """
p[0] = p[1]
def p_conditional_expression(self, p):
""" conditional_expression : binary_expression
| binary_expression CONDOP expression COLON conditional_expression
"""
if len(p) == 2:
p[0] = p[1]
else:
p[0] = c_ast.TernaryOp(p[1], p[3], p[5], p[1].coord)
def p_binary_expression(self, p):
""" binary_expression : cast_expression
| binary_expression TIMES binary_expression
| binary_expression DIVIDE binary_expression
| binary_expression MOD binary_expression
| binary_expression PLUS binary_expression
| binary_expression MINUS binary_expression
| binary_expression RSHIFT binary_expression
| binary_expression LSHIFT binary_expression
| binary_expression LT binary_expression
| binary_expression LE binary_expression
| binary_expression GE binary_expression
| binary_expression GT binary_expression
| binary_expression EQ binary_expression
| binary_expression NE binary_expression
| binary_expression AND binary_expression
| binary_expression OR binary_expression
| binary_expression XOR binary_expression
| binary_expression LAND binary_expression
| binary_expression LOR binary_expression
"""
if len(p) == 2:
p[0] = p[1]
else:
p[0] = c_ast.BinaryOp(p[2], p[1], p[3], p[1].coord)
def p_cast_expression_1(self, p):
""" cast_expression : unary_expression """
p[0] = p[1]
def p_cast_expression_2(self, p):
""" cast_expression : LPAREN type_name RPAREN cast_expression """
p[0] = c_ast.Cast(p[2], p[4], self._coord(p.lineno(1)))
def p_unary_expression_1(self, p):
""" unary_expression : postfix_expression """
p[0] = p[1]
def p_unary_expression_2(self, p):
""" unary_expression : PLUSPLUS unary_expression
| MINUSMINUS unary_expression
| unary_operator cast_expression
"""
p[0] = c_ast.UnaryOp(p[1], p[2], p[2].coord)
def p_unary_expression_3(self, p):
""" unary_expression : SIZEOF unary_expression
| SIZEOF LPAREN type_name RPAREN
"""
p[0] = c_ast.UnaryOp(
p[1],
p[2] if len(p) == 3 else p[3],
self._coord(p.lineno(1)))
def p_unary_operator(self, p):
""" unary_operator : AND
| TIMES
| PLUS
| MINUS
| NOT
| LNOT
"""
p[0] = p[1]
def p_postfix_expression_1(self, p):
""" postfix_expression : primary_expression """
p[0] = p[1]
def p_postfix_expression_2(self, p):
""" postfix_expression : postfix_expression LBRACKET expression RBRACKET """
p[0] = c_ast.ArrayRef(p[1], p[3], p[1].coord)
def p_postfix_expression_3(self, p):
""" postfix_expression : postfix_expression LPAREN argument_expression_list RPAREN
| postfix_expression LPAREN RPAREN
"""
p[0] = c_ast.FuncCall(p[1], p[3] if len(p) == 5 else None, p[1].coord)
def p_postfix_expression_4(self, p):
""" postfix_expression : postfix_expression PERIOD ID
| postfix_expression PERIOD TYPEID
| postfix_expression ARROW ID
| postfix_expression ARROW TYPEID
"""
field = c_ast.ID(p[3], self._coord(p.lineno(3)))
p[0] = c_ast.StructRef(p[1], p[2], field, p[1].coord)
def p_postfix_expression_5(self, p):
""" postfix_expression : postfix_expression PLUSPLUS
| postfix_expression MINUSMINUS
"""
p[0] = c_ast.UnaryOp('p' + p[2], p[1], p[1].coord)
def p_postfix_expression_6(self, p):
""" postfix_expression : LPAREN type_name RPAREN brace_open initializer_list brace_close
| LPAREN type_name RPAREN brace_open initializer_list COMMA brace_close
"""
p[0] = c_ast.CompoundLiteral(p[2], p[5])
def p_primary_expression_1(self, p):
""" primary_expression : identifier """
p[0] = p[1]
def p_primary_expression_2(self, p):
""" primary_expression : constant """
p[0] = p[1]
def p_primary_expression_3(self, p):
""" primary_expression : unified_string_literal
| unified_wstring_literal
"""
p[0] = p[1]
def p_primary_expression_4(self, p):
""" primary_expression : LPAREN expression RPAREN """
p[0] = p[2]
def p_primary_expression_5(self, p):
""" primary_expression : OFFSETOF LPAREN type_name COMMA identifier RPAREN
"""
coord = self._coord(p.lineno(1))
p[0] = c_ast.FuncCall(c_ast.ID(p[1], coord),
c_ast.ExprList([p[3], p[5]], coord),
coord)
def p_argument_expression_list(self, p):
""" argument_expression_list : assignment_expression
| argument_expression_list COMMA assignment_expression
"""
if len(p) == 2: # single expr
p[0] = c_ast.ExprList([p[1]], p[1].coord)
else:
p[1].exprs.append(p[3])
p[0] = p[1]
def p_identifier(self, p):
""" identifier : ID """
p[0] = c_ast.ID(p[1], self._coord(p.lineno(1)))
def p_constant_1(self, p):
""" constant : INT_CONST_DEC
| INT_CONST_OCT
| INT_CONST_HEX
| INT_CONST_BIN
"""
p[0] = c_ast.Constant(
'int', p[1], self._coord(p.lineno(1)))
def p_constant_2(self, p):
""" constant : FLOAT_CONST
| HEX_FLOAT_CONST
"""
p[0] = c_ast.Constant(
'float', p[1], self._coord(p.lineno(1)))
def p_constant_3(self, p):
""" constant : CHAR_CONST
| WCHAR_CONST
"""
p[0] = c_ast.Constant(
'char', p[1], self._coord(p.lineno(1)))
# The "unified" string and wstring literal rules are for supporting
# concatenation of adjacent string literals.
# I.e. "hello " "world" is seen by the C compiler as a single string literal
# with the value "hello world"
#
def p_unified_string_literal(self, p):
""" unified_string_literal : STRING_LITERAL
| unified_string_literal STRING_LITERAL
"""
if len(p) == 2: # single literal
p[0] = c_ast.Constant(
'string', p[1], self._coord(p.lineno(1)))
else:
p[1].value = p[1].value[:-1] + p[2][1:]
p[0] = p[1]
def p_unified_wstring_literal(self, p):
""" unified_wstring_literal : WSTRING_LITERAL
| unified_wstring_literal WSTRING_LITERAL
"""
if len(p) == 2: # single literal
p[0] = c_ast.Constant(
'string', p[1], self._coord(p.lineno(1)))
else:
p[1].value = p[1].value.rstrip()[:-1] + p[2][2:]
p[0] = p[1]
def p_brace_open(self, p):
""" brace_open : LBRACE
"""
p[0] = p[1]
def p_brace_close(self, p):
""" brace_close : RBRACE
"""
p[0] = p[1]
def p_empty(self, p):
'empty : '
p[0] = None
def p_error(self, p):
# If error recovery is added here in the future, make sure
# _get_yacc_lookahead_token still works!
#
if p:
self._parse_error(
'before: %s' % p.value,
self._coord(lineno=p.lineno,
column=self.clex.find_tok_column(p)))
else:
self._parse_error('At end of input', '')
#------------------------------------------------------------------------------
if __name__ == "__main__":
import pprint
import time, sys
#t1 = time.time()
#parser = CParser(lex_optimize=True, yacc_debug=True, yacc_optimize=False)
#sys.write(time.time() - t1)
#buf = '''
#int (*k)(int);
#'''
## set debuglevel to 2 for debugging
#t = parser.parse(buf, 'x.c', debuglevel=0)
#t.show(showcoord=True)
| [
"[email protected]"
] | |
96f12a1ab1eb7f33d8ce8497a6de454ae5054716 | 12fe05ebba89ea0f11d6f5d2fd8f047ee6369ff6 | /minmax3.py | c6f28978343f73c011e14f3c2fb0c7170c66fa0b | [] | no_license | daniilvarlamov/domzad | d467c4b9f51a1a640b0b001216849131c2463500 | 69e1b4c6fa27dc4d17499cfc6817c97d90f8391a | refs/heads/main | 2023-01-20T21:58:33.078060 | 2020-11-26T09:18:16 | 2020-11-26T09:18:16 | 303,324,132 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 376 | py | N = int(input("Введите количество прямоугольников")
for i in range (N):
a = int(input("Введите стороны прямоугольника")
b = int(input())
P = 2*(a+b)
if (i=1):
Max = P
if (P>Max):
Max = P
print(Max)
| [
"[email protected]"
] | |
d0bd583a39afd75f5fe496a88755cae09a18845d | b3309601462404e22f230fd836f05b8ae2570282 | /03_Generate_question_v4.py | c2af3ee62de1029d3824b6c8f33b19f88ebd6a48 | [] | no_license | Wardl1/Math-Quiz | cf441c6213a1cd5c239acc7b71611af1a10f5dfa | 377050ee8a15f8b03994b2ed8b97602a61a2a6c4 | refs/heads/main | 2023-07-24T23:32:45.188970 | 2021-09-05T07:56:37 | 2021-09-05T07:56:37 | 392,872,129 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,944 | py | """Component 3 Generate_Questions version 2
this version fixes the negative answers for the subtraction questions and
fixes the question heading so that it keeps up to date with the question number
"""
from tkinter import *
from functools import partial # To prevent unwanted additional windows
import random
class MathQuiz:
def __init__(self):
# Formatting variables
background_color = "#66FFFF" # light blue
# Main menu GUI frame
self.main_menu_frame = Frame(width=300, height=300,
bg=background_color, pady=10)
self.main_menu_frame.grid()
# Math Quiz heading (row 0)
self.MathQuiz_label = Label(self.main_menu_frame,
text="Math Quiz",
font=("Arial", "16", "bold"),
bg=background_color,
padx=10, pady=10)
self.MathQuiz_label.grid(row=0)
# Simple instructions given
self.intstruction_label = Label(self.main_menu_frame,
text="Pick one area of math"
" to work on \n and answer "
"the 10 questions given.",
font=("Arial", "12", "italic"),
bg=background_color,
padx=10, pady=10)
self.intstruction_label.grid(row=1)
# Addition button (row 2)
self.addition_button = Button(self.main_menu_frame, text="Addition",
font=("Arial", "14"),
padx=10, pady=10,
width=10,
bg="#008CFF", # darker blue
fg="white",
command=self.math_addition)
self.addition_button.grid(row=2)
# Subtraction button (row 3)
self.subtraction_button = Button(self.main_menu_frame,
text="Subtraction",
font=("Arial", "14"),
padx=10, pady=10,
width=10,
bg="#008CFF", # darker blue
fg="white",
command=self.math_subtraction)
self.subtraction_button.grid(row=3)
# All combined button (row 4)
self.combined_button = Button(self.main_menu_frame,
text="All Combined",
font=("Arial", "14"),
padx=10, pady=10,
width=10,
bg="#008CFF", # darker blue
fg="white",
command=self.all_combined)
self.combined_button.grid(row=4)
# math_addition function for when the addition_button is pressed
def math_addition(self):
print("1 + 1 = ") # print statement to check function works
# opens question GUI
QuestionGUI(self, quest_type="add").generate_question()
# math_subtraction function for when the subtraction_button is pressed
def math_subtraction(self):
print("1 - 1 = ") # print statement to check function works
# opens question GUI
QuestionGUI(self, quest_type="sub").generate_question()
# all_combined function for when the combined_button is pressed
def all_combined(self):
print("1 + / - 1 = ") # print statement to check function works
# opens question GUI
QuestionGUI(self, quest_type="both").generate_question()
class QuestionGUI:
def __init__(self, partner, quest_type):
# Formatting variables
background_color = "#3399FF" # darker blue
# disable Main menu buttons
partner.addition_button.config(state=DISABLED)
partner.subtraction_button.config(state=DISABLED)
partner.combined_button.config(state=DISABLED)
# sets up question type to determine if its an add,
# sub or both question
self.question_type = quest_type
# sets up question answer which will be needed to evaluate
# if the user is correct
self.question_answer = ""
# sets up question number so that the question heading updates
# when next button is pressed
self.question_number = 0
# sets up child window (ie: help box)
self.question_box = Toplevel()
# if users press at top, closes help and 'releases' help button
self.question_box.protocol('WM_DELETE_WINDOW',
partial(self.close_question, partner))
# Question Frame
self.question_frame = Frame(self.question_box, width=300,
bg=background_color)
self.question_frame.grid()
# Question Heading (row 0)
self.question_heading_label = Label(self.question_frame,
text="Question 1/10",
font="Arial 16 bold",
bg=background_color,
padx=10, pady=10)
self.question_heading_label.grid(row=0)
# User question to answer (row 1)
self.question_label = Label(self.question_frame,
font="Arial 12 bold", wrap=250,
justify=CENTER, bg=background_color,
padx=10, pady=10)
self.question_label.grid(row=1)
# Answer entry box (row 2)
self.answer_entry = Entry(self.question_frame, width=20,
font="Arial 14 bold",
bg="white")
self.answer_entry.grid(row=2)
# Incorrect or correct statement (row 3)
self.evaluator_label = Label(self.question_frame,
font="Arial 14 bold",
fg="green",
bg=background_color,
pady=10, text="Correct")
self.evaluator_label.grid(row=3)
# Sets up new frame for buttons to get a nice layout
self.button_frame = Frame(self.question_box, width=300,
bg=background_color)
self.button_frame.grid(row=1)
# Close button (row 0, column 0)
self.close_button = Button(self.button_frame, text="Close",
width=8, bg="light grey",
font="arial 10 bold",
command=partial(self.close_question,
partner))
self.close_button.grid(row=0, column=0)
# Enter button (row 0, column 1)
self.enter_button = Button(self.button_frame, text="Enter",
width=8, bg="light grey",
font="arial 10 bold",
command=partial(self.enter_question))
self.enter_button.grid(row=0, column=1)
# Next button (row 0, column 2)
self.next_button = Button(self.button_frame, text="Next",
width=8, bg="light grey",
font="arial 10 bold",
command=partial(self.generate_question))
self.next_button.grid(row=0, column=2)
def generate_question(self):
self.question_number += 1
# all combined variable to switch between add and sub
all_combined = ""
num_1 = random.randint(0, 10) # generates random number
num_2 = random.randint(0, 10)
# sets up question variable which is the text for the question_label
question = ""
if self.question_type == "both":
# chooses between add and sub to generate both questions
all_combined = random.choice(["add", "sub"])
if self.question_type == "add" or all_combined == "add":
question = ("{} + {} = ".format(num_1, num_2)) # creates question
self.question_answer = num_1 + num_2 # works out answer
elif self.question_type == "sub" or all_combined == "sub":
if num_1 > num_2:
# creates question
question = ("{} - {} = ".format(num_1, num_2))
self.question_answer = num_1 - num_2 # works out answer
else:
# creates question
question = ("{} - {} = ".format(num_2, num_1))
self.question_answer = num_2 - num_1 # works out answer
# changes question label so that it is the
self.question_label.config(text=question)
self.question_heading_label.config(text="Question {}/10".
format(self.question_number))
if self.question_number == 10:
self.next_button.config(state=DISABLED)
def close_question(self, partner):
# Put main menu button's back to normal...
partner.addition_button.config(state=NORMAL)
partner.subtraction_button.config(state=NORMAL)
partner.combined_button.config(state=NORMAL)
self.question_box.destroy() # closes question GUI
def enter_question(self):
print("Wrong answer") # prints to test button
# main routine
if __name__ == "__main__":
root = Tk()
root.title("Math Quiz")
something = MathQuiz()
root.mainloop()
| [
"[email protected]"
] | |
cb2811ebb7323dde07db3204b7cbb018b4aa24df | b5aef1178c9153ca0c4dd9823e5fa2a2bc64649f | /sqlalchemy_to_ormar/maps.py | 1a9e860b78fc123c5831dcea9f9bd6c03d9d63d5 | [
"MIT"
] | permissive | collerek/sqlalchemy-to-ormar | 970a56c69ff03b7e32b11e4b1ebcb00c3b8d903c | 07c1595297221b31db86b3d34b3aad54fa3967da | refs/heads/main | 2023-04-23T10:41:04.426391 | 2021-05-16T14:10:38 | 2021-05-16T14:10:38 | 355,256,537 | 10 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,602 | py | from typing import Dict, Set, Type
import ormar
from ormar import Model
FIELD_MAP = {
"integer": ormar.Integer,
"tinyint": ormar.Integer,
"smallint": ormar.Integer,
"bigint": ormar.Integer,
"small_integer": ormar.Integer,
"big_integer": ormar.BigInteger,
"string": ormar.String,
"char": ormar.String,
"varchar": ormar.String,
"text": ormar.Text,
"mediumtext": ormar.Text,
"longtext": ormar.Text,
"float": ormar.Float,
"decimal": ormar.Decimal,
"date": ormar.Date,
"datetime": ormar.DateTime,
"timestamp": ormar.DateTime,
"time": ormar.Time,
"boolean": ormar.Boolean,
"bit": ormar.Boolean,
}
TYPE_SPECIFIC_PARAMETERS: Dict[str, Dict] = {
"string": {"max_length": {"key": "length", "default": 255}},
"varchar": {"max_length": {"key": "length", "default": 255}},
"char": {"max_length": {"key": "length", "default": 255}},
"decimal": {
"max_digits": {"key": "precision", "default": 18},
"decimal_places": {"key": "scale", "default": 6},
},
}
COMMON_PARAMETERS: Dict[str, Dict] = dict(
name={"key": "name", "default": None},
primary_key={"key": "primary_key", "default": False},
autoincrement={"key": "autoincrement", "default": False},
index={"key": "index", "default": False},
unique={"key": "unique", "default": False},
nullable={"key": "nullable", "default": None},
default={"key": "default", "default": None},
server_default={"key": "server_default", "default": None},
)
PARSED_MODELS: Dict[Type, Type[Model]] = dict()
CURRENTLY_PROCESSED: Set = set()
| [
"[email protected]"
] | |
c751ca648f50a29345bf726fcde090faab5448ec | 7019f0dd96c69fb5c66a0840ed989e86e489496a | /Exercises Python Brasil/01 - Estrutura Sequencial/18.py | d487f416b6ee013961afddee3aaf5529929b9ba4 | [] | no_license | leonardokiyota/Python-Training | e1d8343156d96fd085e1dbae8c48770ae0725347 | 38a9ce8cb5558f2a73060243d458ea92d91bf945 | refs/heads/master | 2021-01-10T08:40:30.100115 | 2016-01-12T01:36:30 | 2016-01-12T01:36:30 | 49,223,376 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 260 | py | # -*- coding: utf-8 -*-
"""
Faça um programa que peça o tamanho de um arquivo para download (em MB) e a
velocidade de um link de Internet (em Mbps), calcule e informe o tempo aproximado de
download do arquivo usando este link (em minutos).
""" | [
"[email protected]"
] | |
3d1e771da9ec0f32bfd297a1b19794e9054adce4 | 1825283527f5a479204708feeaf55f4ab6d1290b | /leetcode/python/45/sol.py | 3db6f97188dd189aef4c4caf07b43524d9f7f299 | [] | no_license | frankieliu/problems | b82c61d3328ffcc1da2cbc95712563355f5d44b5 | 911c6622448a4be041834bcab25051dd0f9209b2 | refs/heads/master | 2023-01-06T14:41:58.044871 | 2019-11-24T03:47:22 | 2019-11-24T03:47:22 | 115,065,956 | 1 | 0 | null | 2023-01-04T07:25:52 | 2017-12-22T02:06:57 | HTML | UTF-8 | Python | false | false | 2,156 | py |
10-lines C++ (16ms) / Python BFS Solutions with Explanations
https://leetcode.com/problems/jump-game-ii/discuss/18019
* Lang: python3
* Author: jianchao-li
* Votes: 71
This problem has a nice BFS structure. Let's illustrate it using the example `nums = [2, 3, 1, 1, 4]` in the problem statement. We are initially at position `0`. Then we can move at most `nums[0]` steps from it. So, after one move, we may reach `nums[1] = 3` or `nums[2] = 1`. So these nodes are reachable in `1` move. From these nodes, we can further move to `nums[3] = 1` and `nums[4] = 4`. Now you can see that the target `nums[4] = 4` is reachable in `2` moves.
Putting these into codes, we keep two pointers `start` and `end` that record the current range of the starting nodes. Each time after we make a move, update `start` to be `end + 1` and `end` to be the farthest index that can be reached in `1` move from the current `[start, end]`.
To get an accepted solution, it is important to handle all the edge cases. And the following codes handle all of them in a unified way without using the unclean `if` statements :-)
----------
**C++**
class Solution {
public:
int jump(vector<int>& nums) {
int n = nums.size(), step = 0, start = 0, end = 0;
while (end < n - 1) {
step++;
int maxend = end + 1;
for (int i = start; i <= end; i++) {
if (i + nums[i] >= n - 1) return step;
maxend = max(maxend, i + nums[i]);
}
start = end + 1;
end = maxend;
}
return step;
}
};
----------
**Python**
class Solution:
# @param {integer[]} nums
# @return {integer}
def jump(self, nums):
n, start, end, step = len(nums), 0, 0, 0
while end < n - 1:
step += 1
maxend = end + 1
for i in range(start, end + 1):
if i + nums[i] >= n - 1:
return step
maxend = max(maxend, i + nums[i])
start, end = end + 1, maxend
return step
| [
"[email protected]"
] | |
08a65bb7db851c3827f50ea795ce9e58ad45c818 | 7eebbfaee45fdc57c4fc6ba32c87c35be1e62b14 | /airbyte-integrations/connectors/source-facebook-pages/source_facebook_pages/streams.py | 717fb1c76800fc295cff19b40b475069c0e2914a | [
"MIT",
"Elastic-2.0"
] | permissive | Velocity-Engineering/airbyte | b6e1fcead5b9fd7c74d50b9f27118654604dc8e0 | 802a8184cdd11c1eb905a54ed07c8732b0c0b807 | refs/heads/master | 2023-07-31T15:16:27.644737 | 2021-09-28T08:43:51 | 2021-09-28T08:43:51 | 370,730,633 | 0 | 1 | MIT | 2021-06-08T05:58:44 | 2021-05-25T14:55:43 | Java | UTF-8 | Python | false | false | 4,651 | py | #
# Copyright (c) 2021 Airbyte, Inc., all rights reserved.
#
from abc import ABC
from typing import Any, Iterable, Mapping, MutableMapping, Optional
import requests
from airbyte_cdk.sources.streams.http import HttpStream
from source_facebook_pages.metrics import PAGE_FIELDS, PAGE_METRICS, POST_FIELDS, POST_METRICS
class FacebookPagesStream(HttpStream, ABC):
url_base = "https://graph.facebook.com/v11.0/"
primary_key = "id"
data_field = "data"
def __init__(
self,
access_token: str = None,
page_id: str = None,
**kwargs,
):
super().__init__(**kwargs)
self._access_token = access_token
self._page_id = page_id
@property
def path_param(self):
return self.name[:-1]
def next_page_token(self, response: requests.Response) -> Optional[Mapping[str, Any]]:
data = response.json()
if not data.get("data") or not data.get("paging"):
return {}
return {
"limit": 100,
"after": data.get("paging", {}).get("cursors", {}).get("after"),
}
def request_params(
self,
stream_state: Mapping[str, Any],
stream_slice: Mapping[str, any] = None,
next_page_token: Mapping[str, Any] = None,
) -> MutableMapping[str, Any]:
next_page_token = next_page_token or {}
params = {"access_token": self._access_token, **next_page_token}
return params
def parse_response(self, response: requests.Response, **kwargs) -> Iterable[Mapping]:
if not self.data_field:
yield response.json()
records = response.json().get(self.data_field, [])
for record in records:
yield record
class Page(FacebookPagesStream):
"""
API docs: https://developers.facebook.com/docs/graph-api/reference/page/,
"""
data_field = ""
def path(self, **kwargs) -> str:
return self._page_id
def next_page_token(self, response: requests.Response) -> Optional[Mapping[str, Any]]:
return None
def request_params(self, **kwargs) -> MutableMapping[str, Any]:
params = super().request_params(**kwargs)
# we have to define which fields will return from Facebook API
# because FB API doesn't provide opportunity to get fields dynamically without delays
# so in PAGE_FIELDS we define fields that user can get from API
params["fields"] = PAGE_FIELDS
return params
class Post(FacebookPagesStream):
"""
https://developers.facebook.com/docs/graph-api/reference/v11.0/page/feed,
"""
def path(self, **kwargs) -> str:
return f"{self._page_id}/posts"
def request_params(self, **kwargs) -> MutableMapping[str, Any]:
params = super().request_params(**kwargs)
params["fields"] = POST_FIELDS
return params
class PageInsights(FacebookPagesStream):
"""
API docs: https://developers.facebook.com/docs/graph-api/reference/page/insights/,
"""
def path(self, **kwargs) -> str:
return f"{self._page_id}/insights"
def next_page_token(self, response: requests.Response) -> Optional[Mapping[str, Any]]:
return None
def request_params(
self,
stream_state: Mapping[str, Any],
stream_slice: Mapping[str, any] = None,
next_page_token: Mapping[str, Any] = None,
) -> MutableMapping[str, Any]:
params = super().request_params(stream_state, stream_slice, next_page_token)
params["metric"] = ",".join(PAGE_METRICS)
return params
class PostInsights(FacebookPagesStream):
"""
API docs: https://developers.facebook.com/docs/graph-api/reference/post/insights/,
"""
def path(self, **kwargs) -> str:
return f"{self._page_id}/posts"
def request_params(
self,
stream_state: Mapping[str, Any],
stream_slice: Mapping[str, any] = None,
next_page_token: Mapping[str, Any] = None,
) -> MutableMapping[str, Any]:
params = super().request_params(stream_state, stream_slice, next_page_token)
params["fields"] = f'insights.metric({",".join(POST_METRICS)})'
return params
def parse_response(self, response: requests.Response, **kwargs) -> Iterable[Mapping]:
# unique case so we override this method
records = response.json().get(self.data_field) or []
for insights in records:
if insights.get("insights"):
data = insights.get("insights").get("data")
for insight in data:
yield insight
else:
yield insights
| [
"[email protected]"
] |
Subsets and Splits